1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 #include "dc_trace.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
58 #include "inc/link_dpcd.h"
59 
60 #define DC_LOGGER_INIT(logger)
61 
62 #define CTX \
63 	hws->ctx
64 #define REG(reg)\
65 	hws->regs->reg
66 
67 #undef FN
68 #define FN(reg_name, field_name) \
69 	hws->shifts->field_name, hws->masks->field_name
70 
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 	print_microsec(dc_ctx, log_ctx, ref_cycle)
74 
75 #define GAMMA_HW_POINTS_NUM 256
76 
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79 
80 void print_microsec(struct dc_context *dc_ctx,
81 	struct dc_log_buffer_ctx *log_ctx,
82 	uint32_t ref_cycle)
83 {
84 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 	static const unsigned int frac = 1000;
86 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87 
88 	DTN_INFO("  %11d.%03d",
89 			us_x10 / frac,
90 			us_x10 % frac);
91 }
92 
93 void dcn10_lock_all_pipes(struct dc *dc,
94 	struct dc_state *context,
95 	bool lock)
96 {
97 	struct pipe_ctx *pipe_ctx;
98 	struct timing_generator *tg;
99 	int i;
100 
101 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
102 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
103 		tg = pipe_ctx->stream_res.tg;
104 
105 		/*
106 		 * Only lock the top pipe's tg to prevent redundant
107 		 * (un)locking. Also skip if pipe is disabled.
108 		 */
109 		if (pipe_ctx->top_pipe ||
110 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
111 		    !tg->funcs->is_tg_enabled(tg))
112 			continue;
113 
114 		if (lock)
115 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
116 		else
117 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
118 	}
119 }
120 
121 static void log_mpc_crc(struct dc *dc,
122 	struct dc_log_buffer_ctx *log_ctx)
123 {
124 	struct dc_context *dc_ctx = dc->ctx;
125 	struct dce_hwseq *hws = dc->hwseq;
126 
127 	if (REG(MPC_CRC_RESULT_GB))
128 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
129 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
130 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
131 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
132 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
133 }
134 
135 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
136 {
137 	struct dc_context *dc_ctx = dc->ctx;
138 	struct dcn_hubbub_wm wm;
139 	int i;
140 
141 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
142 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
143 
144 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
145 			"         sr_enter          sr_exit  dram_clk_change\n");
146 
147 	for (i = 0; i < 4; i++) {
148 		struct dcn_hubbub_wm_set *s;
149 
150 		s = &wm.sets[i];
151 		DTN_INFO("WM_Set[%d]:", s->wm_set);
152 		DTN_INFO_MICRO_SEC(s->data_urgent);
153 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
154 		DTN_INFO_MICRO_SEC(s->sr_enter);
155 		DTN_INFO_MICRO_SEC(s->sr_exit);
156 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
157 		DTN_INFO("\n");
158 	}
159 
160 	DTN_INFO("\n");
161 }
162 
163 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
164 {
165 	struct dc_context *dc_ctx = dc->ctx;
166 	struct resource_pool *pool = dc->res_pool;
167 	int i;
168 
169 	DTN_INFO(
170 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
171 	for (i = 0; i < pool->pipe_count; i++) {
172 		struct hubp *hubp = pool->hubps[i];
173 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
174 
175 		hubp->funcs->hubp_read_state(hubp);
176 
177 		if (!s->blank_en) {
178 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
179 					hubp->inst,
180 					s->pixel_format,
181 					s->inuse_addr_hi,
182 					s->viewport_width,
183 					s->viewport_height,
184 					s->rotation_angle,
185 					s->h_mirror_en,
186 					s->sw_mode,
187 					s->dcc_en,
188 					s->blank_en,
189 					s->clock_en,
190 					s->ttu_disable,
191 					s->underflow_status);
192 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
193 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
194 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
195 			DTN_INFO("\n");
196 		}
197 	}
198 
199 	DTN_INFO("\n=========RQ========\n");
200 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
201 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
202 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
203 	for (i = 0; i < pool->pipe_count; i++) {
204 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
205 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
206 
207 		if (!s->blank_en)
208 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
209 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
210 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
211 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
212 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
213 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
214 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
215 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
216 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
217 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
218 	}
219 
220 	DTN_INFO("========DLG========\n");
221 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
222 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
223 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
224 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
225 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
226 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
227 			"  x_rp_dlay  x_rr_sfl\n");
228 	for (i = 0; i < pool->pipe_count; i++) {
229 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
230 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
231 
232 		if (!s->blank_en)
233 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
234 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
235 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
236 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
237 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
238 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
239 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
240 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
241 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
242 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
243 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
244 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
245 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
246 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
247 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
248 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
249 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
250 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
251 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
252 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
253 				dlg_regs->xfc_reg_remote_surface_flip_latency);
254 	}
255 
256 	DTN_INFO("========TTU========\n");
257 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
258 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
259 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
260 	for (i = 0; i < pool->pipe_count; i++) {
261 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
262 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
263 
264 		if (!s->blank_en)
265 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
266 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
267 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
268 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
269 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
270 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
271 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
272 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
273 	}
274 	DTN_INFO("\n");
275 }
276 
277 void dcn10_log_hw_state(struct dc *dc,
278 	struct dc_log_buffer_ctx *log_ctx)
279 {
280 	struct dc_context *dc_ctx = dc->ctx;
281 	struct resource_pool *pool = dc->res_pool;
282 	int i;
283 
284 	DTN_INFO_BEGIN();
285 
286 	dcn10_log_hubbub_state(dc, log_ctx);
287 
288 	dcn10_log_hubp_states(dc, log_ctx);
289 
290 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
291 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
292 			"C31 C32   C33 C34\n");
293 	for (i = 0; i < pool->pipe_count; i++) {
294 		struct dpp *dpp = pool->dpps[i];
295 		struct dcn_dpp_state s = {0};
296 
297 		dpp->funcs->dpp_read_state(dpp, &s);
298 
299 		if (!s.is_enabled)
300 			continue;
301 
302 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
303 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
304 				dpp->inst,
305 				s.igam_input_format,
306 				(s.igam_lut_mode == 0) ? "BypassFixed" :
307 					((s.igam_lut_mode == 1) ? "BypassFloat" :
308 					((s.igam_lut_mode == 2) ? "RAM" :
309 					((s.igam_lut_mode == 3) ? "RAM" :
310 								 "Unknown"))),
311 				(s.dgam_lut_mode == 0) ? "Bypass" :
312 					((s.dgam_lut_mode == 1) ? "sRGB" :
313 					((s.dgam_lut_mode == 2) ? "Ycc" :
314 					((s.dgam_lut_mode == 3) ? "RAM" :
315 					((s.dgam_lut_mode == 4) ? "RAM" :
316 								 "Unknown")))),
317 				(s.rgam_lut_mode == 0) ? "Bypass" :
318 					((s.rgam_lut_mode == 1) ? "sRGB" :
319 					((s.rgam_lut_mode == 2) ? "Ycc" :
320 					((s.rgam_lut_mode == 3) ? "RAM" :
321 					((s.rgam_lut_mode == 4) ? "RAM" :
322 								 "Unknown")))),
323 				s.gamut_remap_mode,
324 				s.gamut_remap_c11_c12,
325 				s.gamut_remap_c13_c14,
326 				s.gamut_remap_c21_c22,
327 				s.gamut_remap_c23_c24,
328 				s.gamut_remap_c31_c32,
329 				s.gamut_remap_c33_c34);
330 		DTN_INFO("\n");
331 	}
332 	DTN_INFO("\n");
333 
334 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
335 	for (i = 0; i < pool->pipe_count; i++) {
336 		struct mpcc_state s = {0};
337 
338 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
339 		if (s.opp_id != 0xf)
340 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
341 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
342 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
343 				s.idle);
344 	}
345 	DTN_INFO("\n");
346 
347 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
348 
349 	for (i = 0; i < pool->timing_generator_count; i++) {
350 		struct timing_generator *tg = pool->timing_generators[i];
351 		struct dcn_otg_state s = {0};
352 		/* Read shared OTG state registers for all DCNx */
353 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
354 
355 		/*
356 		 * For DCN2 and greater, a register on the OPP is used to
357 		 * determine if the CRTC is blanked instead of the OTG. So use
358 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
359 		 *
360 		 * TODO: Implement DCN-specific read_otg_state hooks.
361 		 */
362 		if (pool->opps[i]->funcs->dpg_is_blanked)
363 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
364 		else
365 			s.blank_enabled = tg->funcs->is_blanked(tg);
366 
367 		//only print if OTG master is enabled
368 		if ((s.otg_enabled & 1) == 0)
369 			continue;
370 
371 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
372 				tg->inst,
373 				s.v_blank_start,
374 				s.v_blank_end,
375 				s.v_sync_a_start,
376 				s.v_sync_a_end,
377 				s.v_sync_a_pol,
378 				s.v_total_max,
379 				s.v_total_min,
380 				s.v_total_max_sel,
381 				s.v_total_min_sel,
382 				s.h_blank_start,
383 				s.h_blank_end,
384 				s.h_sync_a_start,
385 				s.h_sync_a_end,
386 				s.h_sync_a_pol,
387 				s.h_total,
388 				s.v_total,
389 				s.underflow_occurred_status,
390 				s.blank_enabled);
391 
392 		// Clear underflow for debug purposes
393 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
394 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
395 		// it from here without affecting the original intent.
396 		tg->funcs->clear_optc_underflow(tg);
397 	}
398 	DTN_INFO("\n");
399 
400 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
401 	// TODO: Update golden log header to reflect this name change
402 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
403 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
404 		struct display_stream_compressor *dsc = pool->dscs[i];
405 		struct dcn_dsc_state s = {0};
406 
407 		dsc->funcs->dsc_read_state(dsc, &s);
408 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
409 		dsc->inst,
410 			s.dsc_clock_en,
411 			s.dsc_slice_width,
412 			s.dsc_bits_per_pixel);
413 		DTN_INFO("\n");
414 	}
415 	DTN_INFO("\n");
416 
417 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
418 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
419 	for (i = 0; i < pool->stream_enc_count; i++) {
420 		struct stream_encoder *enc = pool->stream_enc[i];
421 		struct enc_state s = {0};
422 
423 		if (enc->funcs->enc_read_state) {
424 			enc->funcs->enc_read_state(enc, &s);
425 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
426 				enc->id,
427 				s.dsc_mode,
428 				s.sec_gsp_pps_line_num,
429 				s.vbid6_line_reference,
430 				s.vbid6_line_num,
431 				s.sec_gsp_pps_enable,
432 				s.sec_stream_enable);
433 			DTN_INFO("\n");
434 		}
435 	}
436 	DTN_INFO("\n");
437 
438 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
439 	for (i = 0; i < dc->link_count; i++) {
440 		struct link_encoder *lenc = dc->links[i]->link_enc;
441 
442 		struct link_enc_state s = {0};
443 
444 		if (lenc->funcs->read_state) {
445 			lenc->funcs->read_state(lenc, &s);
446 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
447 				i,
448 				s.dphy_fec_en,
449 				s.dphy_fec_ready_shadow,
450 				s.dphy_fec_active_status,
451 				s.dp_link_training_complete);
452 			DTN_INFO("\n");
453 		}
454 	}
455 	DTN_INFO("\n");
456 
457 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
458 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
459 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
460 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
461 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
462 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
463 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
464 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
465 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
466 
467 	log_mpc_crc(dc, log_ctx);
468 
469 	{
470 		int hpo_dp_link_enc_count = 0;
471 
472 		if (pool->hpo_dp_stream_enc_count > 0) {
473 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
474 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
475 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
476 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
477 
478 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
479 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
480 
481 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
482 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
483 							hpo_dp_se_state.stream_enc_enabled,
484 							hpo_dp_se_state.otg_inst,
485 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
486 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
487 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
488 							(hpo_dp_se_state.component_depth == 0) ? 6 :
489 									((hpo_dp_se_state.component_depth == 1) ? 8 :
490 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
491 							hpo_dp_se_state.vid_stream_enabled,
492 							hpo_dp_se_state.sdp_enabled,
493 							hpo_dp_se_state.compressed_format,
494 							hpo_dp_se_state.mapped_to_link_enc);
495 				}
496 			}
497 
498 			DTN_INFO("\n");
499 		}
500 
501 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
502 		for (i = 0; i < dc->link_count; i++)
503 			if (dc->links[i]->hpo_dp_link_enc)
504 				hpo_dp_link_enc_count++;
505 
506 		if (hpo_dp_link_enc_count) {
507 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
508 
509 			for (i = 0; i < dc->link_count; i++) {
510 				struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
511 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
512 
513 				if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
514 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
515 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
516 							hpo_dp_link_enc->inst,
517 							hpo_dp_le_state.link_enc_enabled,
518 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
519 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
520 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
521 							hpo_dp_le_state.lane_count,
522 							hpo_dp_le_state.stream_src[0],
523 							hpo_dp_le_state.slot_count[0],
524 							hpo_dp_le_state.vc_rate_x[0],
525 							hpo_dp_le_state.vc_rate_y[0]);
526 					DTN_INFO("\n");
527 				}
528 			}
529 
530 			DTN_INFO("\n");
531 		}
532 	}
533 
534 	DTN_INFO_END();
535 }
536 
537 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
538 {
539 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
540 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
541 
542 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
543 		tg->funcs->clear_optc_underflow(tg);
544 		return true;
545 	}
546 
547 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
548 		hubp->funcs->hubp_clear_underflow(hubp);
549 		return true;
550 	}
551 	return false;
552 }
553 
554 void dcn10_enable_power_gating_plane(
555 	struct dce_hwseq *hws,
556 	bool enable)
557 {
558 	bool force_on = true; /* disable power gating */
559 
560 	if (enable)
561 		force_on = false;
562 
563 	/* DCHUBP0/1/2/3 */
564 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
565 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
566 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
567 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
568 
569 	/* DPP0/1/2/3 */
570 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
571 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
572 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
573 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
574 }
575 
576 void dcn10_disable_vga(
577 	struct dce_hwseq *hws)
578 {
579 	unsigned int in_vga1_mode = 0;
580 	unsigned int in_vga2_mode = 0;
581 	unsigned int in_vga3_mode = 0;
582 	unsigned int in_vga4_mode = 0;
583 
584 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
585 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
586 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
587 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
588 
589 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
590 			in_vga3_mode == 0 && in_vga4_mode == 0)
591 		return;
592 
593 	REG_WRITE(D1VGA_CONTROL, 0);
594 	REG_WRITE(D2VGA_CONTROL, 0);
595 	REG_WRITE(D3VGA_CONTROL, 0);
596 	REG_WRITE(D4VGA_CONTROL, 0);
597 
598 	/* HW Engineer's Notes:
599 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
600 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
601 	 *
602 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
603 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
604 	 */
605 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
606 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
607 }
608 
609 /**
610  * dcn10_dpp_pg_control - DPP power gate control.
611  *
612  * @hws: dce_hwseq reference.
613  * @dpp_inst: DPP instance reference.
614  * @power_on: true if we want to enable power gate, false otherwise.
615  *
616  * Enable or disable power gate in the specific DPP instance.
617  */
618 void dcn10_dpp_pg_control(
619 		struct dce_hwseq *hws,
620 		unsigned int dpp_inst,
621 		bool power_on)
622 {
623 	uint32_t power_gate = power_on ? 0 : 1;
624 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
625 
626 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
627 		return;
628 	if (REG(DOMAIN1_PG_CONFIG) == 0)
629 		return;
630 
631 	switch (dpp_inst) {
632 	case 0: /* DPP0 */
633 		REG_UPDATE(DOMAIN1_PG_CONFIG,
634 				DOMAIN1_POWER_GATE, power_gate);
635 
636 		REG_WAIT(DOMAIN1_PG_STATUS,
637 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
638 				1, 1000);
639 		break;
640 	case 1: /* DPP1 */
641 		REG_UPDATE(DOMAIN3_PG_CONFIG,
642 				DOMAIN3_POWER_GATE, power_gate);
643 
644 		REG_WAIT(DOMAIN3_PG_STATUS,
645 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
646 				1, 1000);
647 		break;
648 	case 2: /* DPP2 */
649 		REG_UPDATE(DOMAIN5_PG_CONFIG,
650 				DOMAIN5_POWER_GATE, power_gate);
651 
652 		REG_WAIT(DOMAIN5_PG_STATUS,
653 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
654 				1, 1000);
655 		break;
656 	case 3: /* DPP3 */
657 		REG_UPDATE(DOMAIN7_PG_CONFIG,
658 				DOMAIN7_POWER_GATE, power_gate);
659 
660 		REG_WAIT(DOMAIN7_PG_STATUS,
661 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
662 				1, 1000);
663 		break;
664 	default:
665 		BREAK_TO_DEBUGGER();
666 		break;
667 	}
668 }
669 
670 /**
671  * dcn10_hubp_pg_control - HUBP power gate control.
672  *
673  * @hws: dce_hwseq reference.
674  * @hubp_inst: DPP instance reference.
675  * @power_on: true if we want to enable power gate, false otherwise.
676  *
677  * Enable or disable power gate in the specific HUBP instance.
678  */
679 void dcn10_hubp_pg_control(
680 		struct dce_hwseq *hws,
681 		unsigned int hubp_inst,
682 		bool power_on)
683 {
684 	uint32_t power_gate = power_on ? 0 : 1;
685 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
686 
687 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
688 		return;
689 	if (REG(DOMAIN0_PG_CONFIG) == 0)
690 		return;
691 
692 	switch (hubp_inst) {
693 	case 0: /* DCHUBP0 */
694 		REG_UPDATE(DOMAIN0_PG_CONFIG,
695 				DOMAIN0_POWER_GATE, power_gate);
696 
697 		REG_WAIT(DOMAIN0_PG_STATUS,
698 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
699 				1, 1000);
700 		break;
701 	case 1: /* DCHUBP1 */
702 		REG_UPDATE(DOMAIN2_PG_CONFIG,
703 				DOMAIN2_POWER_GATE, power_gate);
704 
705 		REG_WAIT(DOMAIN2_PG_STATUS,
706 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
707 				1, 1000);
708 		break;
709 	case 2: /* DCHUBP2 */
710 		REG_UPDATE(DOMAIN4_PG_CONFIG,
711 				DOMAIN4_POWER_GATE, power_gate);
712 
713 		REG_WAIT(DOMAIN4_PG_STATUS,
714 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
715 				1, 1000);
716 		break;
717 	case 3: /* DCHUBP3 */
718 		REG_UPDATE(DOMAIN6_PG_CONFIG,
719 				DOMAIN6_POWER_GATE, power_gate);
720 
721 		REG_WAIT(DOMAIN6_PG_STATUS,
722 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
723 				1, 1000);
724 		break;
725 	default:
726 		BREAK_TO_DEBUGGER();
727 		break;
728 	}
729 }
730 
731 static void power_on_plane(
732 	struct dce_hwseq *hws,
733 	int plane_id)
734 {
735 	DC_LOGGER_INIT(hws->ctx->logger);
736 	if (REG(DC_IP_REQUEST_CNTL)) {
737 		REG_SET(DC_IP_REQUEST_CNTL, 0,
738 				IP_REQUEST_EN, 1);
739 
740 		if (hws->funcs.dpp_pg_control)
741 			hws->funcs.dpp_pg_control(hws, plane_id, true);
742 
743 		if (hws->funcs.hubp_pg_control)
744 			hws->funcs.hubp_pg_control(hws, plane_id, true);
745 
746 		REG_SET(DC_IP_REQUEST_CNTL, 0,
747 				IP_REQUEST_EN, 0);
748 		DC_LOG_DEBUG(
749 				"Un-gated front end for pipe %d\n", plane_id);
750 	}
751 }
752 
753 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
754 {
755 	struct dce_hwseq *hws = dc->hwseq;
756 	struct hubp *hubp = dc->res_pool->hubps[0];
757 
758 	if (!hws->wa_state.DEGVIDCN10_253_applied)
759 		return;
760 
761 	hubp->funcs->set_blank(hubp, true);
762 
763 	REG_SET(DC_IP_REQUEST_CNTL, 0,
764 			IP_REQUEST_EN, 1);
765 
766 	hws->funcs.hubp_pg_control(hws, 0, false);
767 	REG_SET(DC_IP_REQUEST_CNTL, 0,
768 			IP_REQUEST_EN, 0);
769 
770 	hws->wa_state.DEGVIDCN10_253_applied = false;
771 }
772 
773 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
774 {
775 	struct dce_hwseq *hws = dc->hwseq;
776 	struct hubp *hubp = dc->res_pool->hubps[0];
777 	int i;
778 
779 	if (dc->debug.disable_stutter)
780 		return;
781 
782 	if (!hws->wa.DEGVIDCN10_253)
783 		return;
784 
785 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
786 		if (!dc->res_pool->hubps[i]->power_gated)
787 			return;
788 	}
789 
790 	/* all pipe power gated, apply work around to enable stutter. */
791 
792 	REG_SET(DC_IP_REQUEST_CNTL, 0,
793 			IP_REQUEST_EN, 1);
794 
795 	hws->funcs.hubp_pg_control(hws, 0, true);
796 	REG_SET(DC_IP_REQUEST_CNTL, 0,
797 			IP_REQUEST_EN, 0);
798 
799 	hubp->funcs->set_hubp_blank_en(hubp, false);
800 	hws->wa_state.DEGVIDCN10_253_applied = true;
801 }
802 
803 void dcn10_bios_golden_init(struct dc *dc)
804 {
805 	struct dce_hwseq *hws = dc->hwseq;
806 	struct dc_bios *bp = dc->ctx->dc_bios;
807 	int i;
808 	bool allow_self_fresh_force_enable = true;
809 
810 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
811 		return;
812 
813 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
814 		allow_self_fresh_force_enable =
815 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
816 
817 
818 	/* WA for making DF sleep when idle after resume from S0i3.
819 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
820 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
821 	 * before calling command table and it changed to 1 after,
822 	 * it should be set back to 0.
823 	 */
824 
825 	/* initialize dcn global */
826 	bp->funcs->enable_disp_power_gating(bp,
827 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
828 
829 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
830 		/* initialize dcn per pipe */
831 		bp->funcs->enable_disp_power_gating(bp,
832 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
833 	}
834 
835 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
836 		if (allow_self_fresh_force_enable == false &&
837 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
838 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
839 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
840 
841 }
842 
843 static void false_optc_underflow_wa(
844 		struct dc *dc,
845 		const struct dc_stream_state *stream,
846 		struct timing_generator *tg)
847 {
848 	int i;
849 	bool underflow;
850 
851 	if (!dc->hwseq->wa.false_optc_underflow)
852 		return;
853 
854 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
855 
856 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
857 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
858 
859 		if (old_pipe_ctx->stream != stream)
860 			continue;
861 
862 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
863 	}
864 
865 	if (tg->funcs->set_blank_data_double_buffer)
866 		tg->funcs->set_blank_data_double_buffer(tg, true);
867 
868 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
869 		tg->funcs->clear_optc_underflow(tg);
870 }
871 
872 enum dc_status dcn10_enable_stream_timing(
873 		struct pipe_ctx *pipe_ctx,
874 		struct dc_state *context,
875 		struct dc *dc)
876 {
877 	struct dc_stream_state *stream = pipe_ctx->stream;
878 	enum dc_color_space color_space;
879 	struct tg_color black_color = {0};
880 
881 	/* by upper caller loop, pipe0 is parent pipe and be called first.
882 	 * back end is set up by for pipe0. Other children pipe share back end
883 	 * with pipe 0. No program is needed.
884 	 */
885 	if (pipe_ctx->top_pipe != NULL)
886 		return DC_OK;
887 
888 	/* TODO check if timing_changed, disable stream if timing changed */
889 
890 	/* HW program guide assume display already disable
891 	 * by unplug sequence. OTG assume stop.
892 	 */
893 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
894 
895 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
896 			pipe_ctx->clock_source,
897 			&pipe_ctx->stream_res.pix_clk_params,
898 			&pipe_ctx->pll_settings)) {
899 		BREAK_TO_DEBUGGER();
900 		return DC_ERROR_UNEXPECTED;
901 	}
902 
903 	pipe_ctx->stream_res.tg->funcs->program_timing(
904 			pipe_ctx->stream_res.tg,
905 			&stream->timing,
906 			pipe_ctx->pipe_dlg_param.vready_offset,
907 			pipe_ctx->pipe_dlg_param.vstartup_start,
908 			pipe_ctx->pipe_dlg_param.vupdate_offset,
909 			pipe_ctx->pipe_dlg_param.vupdate_width,
910 			pipe_ctx->stream->signal,
911 			true);
912 
913 #if 0 /* move to after enable_crtc */
914 	/* TODO: OPP FMT, ABM. etc. should be done here. */
915 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
916 
917 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
918 
919 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
920 				pipe_ctx->stream_res.opp,
921 				&stream->bit_depth_params,
922 				&stream->clamping);
923 #endif
924 	/* program otg blank color */
925 	color_space = stream->output_color_space;
926 	color_space_to_black_color(dc, color_space, &black_color);
927 
928 	/*
929 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
930 	 * alternate between Cb and Cr, so both channels need the pixel
931 	 * value for Y
932 	 */
933 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
934 		black_color.color_r_cr = black_color.color_g_y;
935 
936 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
937 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
938 				pipe_ctx->stream_res.tg,
939 				&black_color);
940 
941 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
942 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
943 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
944 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
945 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
946 	}
947 
948 	/* VTG is  within DCHUB command block. DCFCLK is always on */
949 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
950 		BREAK_TO_DEBUGGER();
951 		return DC_ERROR_UNEXPECTED;
952 	}
953 
954 	/* TODO program crtc source select for non-virtual signal*/
955 	/* TODO program FMT */
956 	/* TODO setup link_enc */
957 	/* TODO set stream attributes */
958 	/* TODO program audio */
959 	/* TODO enable stream if timing changed */
960 	/* TODO unblank stream if DP */
961 
962 	return DC_OK;
963 }
964 
965 static void dcn10_reset_back_end_for_pipe(
966 		struct dc *dc,
967 		struct pipe_ctx *pipe_ctx,
968 		struct dc_state *context)
969 {
970 	int i;
971 	struct dc_link *link;
972 	DC_LOGGER_INIT(dc->ctx->logger);
973 	if (pipe_ctx->stream_res.stream_enc == NULL) {
974 		pipe_ctx->stream = NULL;
975 		return;
976 	}
977 
978 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
979 		link = pipe_ctx->stream->link;
980 		/* DPMS may already disable or */
981 		/* dpms_off status is incorrect due to fastboot
982 		 * feature. When system resume from S4 with second
983 		 * screen only, the dpms_off would be true but
984 		 * VBIOS lit up eDP, so check link status too.
985 		 */
986 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
987 			core_link_disable_stream(pipe_ctx);
988 		else if (pipe_ctx->stream_res.audio)
989 			dc->hwss.disable_audio_stream(pipe_ctx);
990 
991 		if (pipe_ctx->stream_res.audio) {
992 			/*disable az_endpoint*/
993 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
994 
995 			/*free audio*/
996 			if (dc->caps.dynamic_audio == true) {
997 				/*we have to dynamic arbitrate the audio endpoints*/
998 				/*we free the resource, need reset is_audio_acquired*/
999 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1000 						pipe_ctx->stream_res.audio, false);
1001 				pipe_ctx->stream_res.audio = NULL;
1002 			}
1003 		}
1004 	}
1005 
1006 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1007 	 * back end share by all pipes and will be disable only when disable
1008 	 * parent pipe.
1009 	 */
1010 	if (pipe_ctx->top_pipe == NULL) {
1011 
1012 		if (pipe_ctx->stream_res.abm)
1013 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1014 
1015 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1016 
1017 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1018 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1019 			pipe_ctx->stream_res.tg->funcs->set_drr(
1020 					pipe_ctx->stream_res.tg, NULL);
1021 	}
1022 
1023 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1024 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1025 			break;
1026 
1027 	if (i == dc->res_pool->pipe_count)
1028 		return;
1029 
1030 	pipe_ctx->stream = NULL;
1031 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1032 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1033 }
1034 
1035 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1036 {
1037 	struct hubp *hubp ;
1038 	unsigned int i;
1039 	bool need_recover = true;
1040 
1041 	if (!dc->debug.recovery_enabled)
1042 		return false;
1043 
1044 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1045 		struct pipe_ctx *pipe_ctx =
1046 			&dc->current_state->res_ctx.pipe_ctx[i];
1047 		if (pipe_ctx != NULL) {
1048 			hubp = pipe_ctx->plane_res.hubp;
1049 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1050 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1051 					/* one pipe underflow, we will reset all the pipes*/
1052 					need_recover = true;
1053 				}
1054 			}
1055 		}
1056 	}
1057 	if (!need_recover)
1058 		return false;
1059 	/*
1060 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1061 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1062 	DCHUBP_CNTL:HUBP_DISABLE=1
1063 	DCHUBP_CNTL:HUBP_DISABLE=0
1064 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1065 	DCSURF_PRIMARY_SURFACE_ADDRESS
1066 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1067 	*/
1068 
1069 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1070 		struct pipe_ctx *pipe_ctx =
1071 			&dc->current_state->res_ctx.pipe_ctx[i];
1072 		if (pipe_ctx != NULL) {
1073 			hubp = pipe_ctx->plane_res.hubp;
1074 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1075 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1076 				hubp->funcs->set_hubp_blank_en(hubp, true);
1077 		}
1078 	}
1079 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1080 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1081 
1082 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1083 		struct pipe_ctx *pipe_ctx =
1084 			&dc->current_state->res_ctx.pipe_ctx[i];
1085 		if (pipe_ctx != NULL) {
1086 			hubp = pipe_ctx->plane_res.hubp;
1087 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1088 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1089 				hubp->funcs->hubp_disable_control(hubp, true);
1090 		}
1091 	}
1092 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1093 		struct pipe_ctx *pipe_ctx =
1094 			&dc->current_state->res_ctx.pipe_ctx[i];
1095 		if (pipe_ctx != NULL) {
1096 			hubp = pipe_ctx->plane_res.hubp;
1097 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1098 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1099 				hubp->funcs->hubp_disable_control(hubp, true);
1100 		}
1101 	}
1102 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1103 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1104 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1105 		struct pipe_ctx *pipe_ctx =
1106 			&dc->current_state->res_ctx.pipe_ctx[i];
1107 		if (pipe_ctx != NULL) {
1108 			hubp = pipe_ctx->plane_res.hubp;
1109 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1110 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1111 				hubp->funcs->set_hubp_blank_en(hubp, true);
1112 		}
1113 	}
1114 	return true;
1115 
1116 }
1117 
1118 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1119 {
1120 	static bool should_log_hw_state; /* prevent hw state log by default */
1121 
1122 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1123 		int i = 0;
1124 
1125 		if (should_log_hw_state)
1126 			dcn10_log_hw_state(dc, NULL);
1127 
1128 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1129 		BREAK_TO_DEBUGGER();
1130 		if (dcn10_hw_wa_force_recovery(dc)) {
1131 		/*check again*/
1132 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1133 				BREAK_TO_DEBUGGER();
1134 		}
1135 	}
1136 }
1137 
1138 /* trigger HW to start disconnect plane from stream on the next vsync */
1139 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1140 {
1141 	struct dce_hwseq *hws = dc->hwseq;
1142 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1143 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1144 	struct mpc *mpc = dc->res_pool->mpc;
1145 	struct mpc_tree *mpc_tree_params;
1146 	struct mpcc *mpcc_to_remove = NULL;
1147 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1148 
1149 	mpc_tree_params = &(opp->mpc_tree_params);
1150 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1151 
1152 	/*Already reset*/
1153 	if (mpcc_to_remove == NULL)
1154 		return;
1155 
1156 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1157 	if (opp != NULL)
1158 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1159 
1160 	dc->optimized_required = true;
1161 
1162 	if (hubp->funcs->hubp_disconnect)
1163 		hubp->funcs->hubp_disconnect(hubp);
1164 
1165 	if (dc->debug.sanity_checks)
1166 		hws->funcs.verify_allow_pstate_change_high(dc);
1167 }
1168 
1169 /**
1170  * dcn10_plane_atomic_power_down - Power down plane components.
1171  *
1172  * @dc: dc struct reference. used for grab hwseq.
1173  * @dpp: dpp struct reference.
1174  * @hubp: hubp struct reference.
1175  *
1176  * Keep in mind that this operation requires a power gate configuration;
1177  * however, requests for switch power gate are precisely controlled to avoid
1178  * problems. For this reason, power gate request is usually disabled. This
1179  * function first needs to enable the power gate request before disabling DPP
1180  * and HUBP. Finally, it disables the power gate request again.
1181  */
1182 void dcn10_plane_atomic_power_down(struct dc *dc,
1183 		struct dpp *dpp,
1184 		struct hubp *hubp)
1185 {
1186 	struct dce_hwseq *hws = dc->hwseq;
1187 	DC_LOGGER_INIT(dc->ctx->logger);
1188 
1189 	if (REG(DC_IP_REQUEST_CNTL)) {
1190 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1191 				IP_REQUEST_EN, 1);
1192 
1193 		if (hws->funcs.dpp_pg_control)
1194 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1195 
1196 		if (hws->funcs.hubp_pg_control)
1197 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1198 
1199 		dpp->funcs->dpp_reset(dpp);
1200 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1201 				IP_REQUEST_EN, 0);
1202 		DC_LOG_DEBUG(
1203 				"Power gated front end %d\n", hubp->inst);
1204 	}
1205 }
1206 
1207 /* disable HW used by plane.
1208  * note:  cannot disable until disconnect is complete
1209  */
1210 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1211 {
1212 	struct dce_hwseq *hws = dc->hwseq;
1213 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1214 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1215 	int opp_id = hubp->opp_id;
1216 
1217 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1218 
1219 	hubp->funcs->hubp_clk_cntl(hubp, false);
1220 
1221 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1222 
1223 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1224 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1225 				pipe_ctx->stream_res.opp,
1226 				false);
1227 
1228 	hubp->power_gated = true;
1229 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1230 
1231 	hws->funcs.plane_atomic_power_down(dc,
1232 			pipe_ctx->plane_res.dpp,
1233 			pipe_ctx->plane_res.hubp);
1234 
1235 	pipe_ctx->stream = NULL;
1236 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1237 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1238 	pipe_ctx->top_pipe = NULL;
1239 	pipe_ctx->bottom_pipe = NULL;
1240 	pipe_ctx->plane_state = NULL;
1241 }
1242 
1243 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1244 {
1245 	struct dce_hwseq *hws = dc->hwseq;
1246 	DC_LOGGER_INIT(dc->ctx->logger);
1247 
1248 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1249 		return;
1250 
1251 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1252 
1253 	apply_DEGVIDCN10_253_wa(dc);
1254 
1255 	DC_LOG_DC("Power down front end %d\n",
1256 					pipe_ctx->pipe_idx);
1257 }
1258 
1259 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1260 {
1261 	int i;
1262 	struct dce_hwseq *hws = dc->hwseq;
1263 	bool can_apply_seamless_boot = false;
1264 
1265 	for (i = 0; i < context->stream_count; i++) {
1266 		if (context->streams[i]->apply_seamless_boot_optimization) {
1267 			can_apply_seamless_boot = true;
1268 			break;
1269 		}
1270 	}
1271 
1272 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1273 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1274 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1275 
1276 		/* There is assumption that pipe_ctx is not mapping irregularly
1277 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1278 		 * we will use the pipe, so don't disable
1279 		 */
1280 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1281 			continue;
1282 
1283 		/* Blank controller using driver code instead of
1284 		 * command table.
1285 		 */
1286 		if (tg->funcs->is_tg_enabled(tg)) {
1287 			if (hws->funcs.init_blank != NULL) {
1288 				hws->funcs.init_blank(dc, tg);
1289 				tg->funcs->lock(tg);
1290 			} else {
1291 				tg->funcs->lock(tg);
1292 				tg->funcs->set_blank(tg, true);
1293 				hwss_wait_for_blank_complete(tg);
1294 			}
1295 		}
1296 	}
1297 
1298 	/* num_opp will be equal to number of mpcc */
1299 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1300 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1301 
1302 		/* Cannot reset the MPC mux if seamless boot */
1303 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1304 			continue;
1305 
1306 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1307 				dc->res_pool->mpc, i);
1308 	}
1309 
1310 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1311 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1312 		struct hubp *hubp = dc->res_pool->hubps[i];
1313 		struct dpp *dpp = dc->res_pool->dpps[i];
1314 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1315 
1316 		/* There is assumption that pipe_ctx is not mapping irregularly
1317 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1318 		 * we will use the pipe, so don't disable
1319 		 */
1320 		if (can_apply_seamless_boot &&
1321 			pipe_ctx->stream != NULL &&
1322 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1323 				pipe_ctx->stream_res.tg)) {
1324 			// Enable double buffering for OTG_BLANK no matter if
1325 			// seamless boot is enabled or not to suppress global sync
1326 			// signals when OTG blanked. This is to prevent pipe from
1327 			// requesting data while in PSR.
1328 			tg->funcs->tg_init(tg);
1329 			hubp->power_gated = true;
1330 			continue;
1331 		}
1332 
1333 		/* Disable on the current state so the new one isn't cleared. */
1334 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1335 
1336 		dpp->funcs->dpp_reset(dpp);
1337 
1338 		pipe_ctx->stream_res.tg = tg;
1339 		pipe_ctx->pipe_idx = i;
1340 
1341 		pipe_ctx->plane_res.hubp = hubp;
1342 		pipe_ctx->plane_res.dpp = dpp;
1343 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1344 		hubp->mpcc_id = dpp->inst;
1345 		hubp->opp_id = OPP_ID_INVALID;
1346 		hubp->power_gated = false;
1347 
1348 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1349 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1350 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1351 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1352 
1353 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1354 
1355 		if (tg->funcs->is_tg_enabled(tg))
1356 			tg->funcs->unlock(tg);
1357 
1358 		dc->hwss.disable_plane(dc, pipe_ctx);
1359 
1360 		pipe_ctx->stream_res.tg = NULL;
1361 		pipe_ctx->plane_res.hubp = NULL;
1362 
1363 		tg->funcs->tg_init(tg);
1364 	}
1365 }
1366 
1367 void dcn10_init_hw(struct dc *dc)
1368 {
1369 	int i, j;
1370 	struct abm *abm = dc->res_pool->abm;
1371 	struct dmcu *dmcu = dc->res_pool->dmcu;
1372 	struct dce_hwseq *hws = dc->hwseq;
1373 	struct dc_bios *dcb = dc->ctx->dc_bios;
1374 	struct resource_pool *res_pool = dc->res_pool;
1375 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1376 	bool   is_optimized_init_done = false;
1377 
1378 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1379 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1380 
1381 	/* Align bw context with hw config when system resume. */
1382 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1383 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1384 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1385 	}
1386 
1387 	// Initialize the dccg
1388 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1389 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1390 
1391 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1392 
1393 		REG_WRITE(REFCLK_CNTL, 0);
1394 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1395 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1396 
1397 		if (!dc->debug.disable_clock_gate) {
1398 			/* enable all DCN clock gating */
1399 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1400 
1401 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1402 
1403 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1404 		}
1405 
1406 		//Enable ability to power gate / don't force power on permanently
1407 		if (hws->funcs.enable_power_gating_plane)
1408 			hws->funcs.enable_power_gating_plane(hws, true);
1409 
1410 		return;
1411 	}
1412 
1413 	if (!dcb->funcs->is_accelerated_mode(dcb))
1414 		hws->funcs.disable_vga(dc->hwseq);
1415 
1416 	hws->funcs.bios_golden_init(dc);
1417 
1418 	if (dc->ctx->dc_bios->fw_info_valid) {
1419 		res_pool->ref_clocks.xtalin_clock_inKhz =
1420 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1421 
1422 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1423 			if (res_pool->dccg && res_pool->hubbub) {
1424 
1425 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1426 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1427 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1428 
1429 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1430 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1431 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1432 			} else {
1433 				// Not all ASICs have DCCG sw component
1434 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1435 						res_pool->ref_clocks.xtalin_clock_inKhz;
1436 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1437 						res_pool->ref_clocks.xtalin_clock_inKhz;
1438 			}
1439 		}
1440 	} else
1441 		ASSERT_CRITICAL(false);
1442 
1443 	for (i = 0; i < dc->link_count; i++) {
1444 		/* Power up AND update implementation according to the
1445 		 * required signal (which may be different from the
1446 		 * default signal on connector).
1447 		 */
1448 		struct dc_link *link = dc->links[i];
1449 
1450 		if (!is_optimized_init_done)
1451 			link->link_enc->funcs->hw_init(link->link_enc);
1452 
1453 		/* Check for enabled DIG to identify enabled display */
1454 		if (link->link_enc->funcs->is_dig_enabled &&
1455 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1456 			link->link_status.link_active = true;
1457 	}
1458 
1459 	/* Power gate DSCs */
1460 	if (!is_optimized_init_done) {
1461 		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1462 			if (hws->funcs.dsc_pg_control != NULL)
1463 				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1464 	}
1465 
1466 	/* Enable outbox notification feature of dmub */
1467 	if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1468 		dmub_enable_outbox_notification(dc);
1469 
1470 	/* we want to turn off all dp displays before doing detection */
1471 	if (dc->config.power_down_display_on_boot) {
1472 		uint8_t dpcd_power_state = '\0';
1473 		enum dc_status status = DC_ERROR_UNEXPECTED;
1474 
1475 		for (i = 0; i < dc->link_count; i++) {
1476 			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1477 				continue;
1478 
1479 			/* DP 2.0 requires that LTTPR Caps be read first */
1480 			dp_retrieve_lttpr_cap(dc->links[i]);
1481 
1482 			/*
1483 			 * If any of the displays are lit up turn them off.
1484 			 * The reason is that some MST hubs cannot be turned off
1485 			 * completely until we tell them to do so.
1486 			 * If not turned off, then displays connected to MST hub
1487 			 * won't light up.
1488 			 */
1489 			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1490 							&dpcd_power_state, sizeof(dpcd_power_state));
1491 			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1492 				/* blank dp stream before power off receiver*/
1493 				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1494 					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1495 
1496 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1497 						if (fe == dc->res_pool->stream_enc[j]->id) {
1498 							dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
1499 										dc->res_pool->stream_enc[j]);
1500 							break;
1501 						}
1502 					}
1503 				}
1504 				dp_receiver_power_ctrl(dc->links[i], false);
1505 			}
1506 		}
1507 	}
1508 
1509 	/* If taking control over from VBIOS, we may want to optimize our first
1510 	 * mode set, so we need to skip powering down pipes until we know which
1511 	 * pipes we want to use.
1512 	 * Otherwise, if taking control is not possible, we need to power
1513 	 * everything down.
1514 	 */
1515 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1516 		if (!is_optimized_init_done) {
1517 			hws->funcs.init_pipes(dc, dc->current_state);
1518 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1519 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1520 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1521 		}
1522 	}
1523 
1524 	if (!is_optimized_init_done) {
1525 
1526 		for (i = 0; i < res_pool->audio_count; i++) {
1527 			struct audio *audio = res_pool->audios[i];
1528 
1529 			audio->funcs->hw_init(audio);
1530 		}
1531 
1532 		for (i = 0; i < dc->link_count; i++) {
1533 			struct dc_link *link = dc->links[i];
1534 
1535 			if (link->panel_cntl)
1536 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1537 		}
1538 
1539 		if (abm != NULL)
1540 			abm->funcs->abm_init(abm, backlight);
1541 
1542 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1543 			dmcu->funcs->dmcu_init(dmcu);
1544 	}
1545 
1546 	if (abm != NULL && dmcu != NULL)
1547 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1548 
1549 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1550 	if (!is_optimized_init_done)
1551 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1552 
1553 	if (!dc->debug.disable_clock_gate) {
1554 		/* enable all DCN clock gating */
1555 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1556 
1557 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1558 
1559 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1560 	}
1561 	if (hws->funcs.enable_power_gating_plane)
1562 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1563 
1564 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1565 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1566 }
1567 
1568 /* In headless boot cases, DIG may be turned
1569  * on which causes HW/SW discrepancies.
1570  * To avoid this, power down hardware on boot
1571  * if DIG is turned on
1572  */
1573 void dcn10_power_down_on_boot(struct dc *dc)
1574 {
1575 	struct dc_link *edp_links[MAX_NUM_EDP];
1576 	struct dc_link *edp_link = NULL;
1577 	int edp_num;
1578 	int i = 0;
1579 
1580 	get_edp_links(dc, edp_links, &edp_num);
1581 	if (edp_num)
1582 		edp_link = edp_links[0];
1583 
1584 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1585 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1586 			dc->hwseq->funcs.edp_backlight_control &&
1587 			dc->hwss.power_down &&
1588 			dc->hwss.edp_power_control) {
1589 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1590 		dc->hwss.power_down(dc);
1591 		dc->hwss.edp_power_control(edp_link, false);
1592 	} else {
1593 		for (i = 0; i < dc->link_count; i++) {
1594 			struct dc_link *link = dc->links[i];
1595 
1596 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1597 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1598 					dc->hwss.power_down) {
1599 				dc->hwss.power_down(dc);
1600 				break;
1601 			}
1602 
1603 		}
1604 	}
1605 
1606 	/*
1607 	 * Call update_clocks with empty context
1608 	 * to send DISPLAY_OFF
1609 	 * Otherwise DISPLAY_OFF may not be asserted
1610 	 */
1611 	if (dc->clk_mgr->funcs->set_low_power_state)
1612 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1613 }
1614 
1615 void dcn10_reset_hw_ctx_wrap(
1616 		struct dc *dc,
1617 		struct dc_state *context)
1618 {
1619 	int i;
1620 	struct dce_hwseq *hws = dc->hwseq;
1621 
1622 	/* Reset Back End*/
1623 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1624 		struct pipe_ctx *pipe_ctx_old =
1625 			&dc->current_state->res_ctx.pipe_ctx[i];
1626 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1627 
1628 		if (!pipe_ctx_old->stream)
1629 			continue;
1630 
1631 		if (pipe_ctx_old->top_pipe)
1632 			continue;
1633 
1634 		if (!pipe_ctx->stream ||
1635 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1636 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1637 
1638 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1639 			if (hws->funcs.enable_stream_gating)
1640 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1641 			if (old_clk)
1642 				old_clk->funcs->cs_power_down(old_clk);
1643 		}
1644 	}
1645 }
1646 
1647 static bool patch_address_for_sbs_tb_stereo(
1648 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1649 {
1650 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1651 	bool sec_split = pipe_ctx->top_pipe &&
1652 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1653 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1654 		(pipe_ctx->stream->timing.timing_3d_format ==
1655 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1656 		 pipe_ctx->stream->timing.timing_3d_format ==
1657 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1658 		*addr = plane_state->address.grph_stereo.left_addr;
1659 		plane_state->address.grph_stereo.left_addr =
1660 		plane_state->address.grph_stereo.right_addr;
1661 		return true;
1662 	} else {
1663 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1664 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1665 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1666 			plane_state->address.grph_stereo.right_addr =
1667 			plane_state->address.grph_stereo.left_addr;
1668 			plane_state->address.grph_stereo.right_meta_addr =
1669 			plane_state->address.grph_stereo.left_meta_addr;
1670 		}
1671 	}
1672 	return false;
1673 }
1674 
1675 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1676 {
1677 	bool addr_patched = false;
1678 	PHYSICAL_ADDRESS_LOC addr;
1679 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1680 
1681 	if (plane_state == NULL)
1682 		return;
1683 
1684 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1685 
1686 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1687 			pipe_ctx->plane_res.hubp,
1688 			&plane_state->address,
1689 			plane_state->flip_immediate);
1690 
1691 	plane_state->status.requested_address = plane_state->address;
1692 
1693 	if (plane_state->flip_immediate)
1694 		plane_state->status.current_address = plane_state->address;
1695 
1696 	if (addr_patched)
1697 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1698 }
1699 
1700 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1701 			const struct dc_plane_state *plane_state)
1702 {
1703 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1704 	const struct dc_transfer_func *tf = NULL;
1705 	bool result = true;
1706 
1707 	if (dpp_base == NULL)
1708 		return false;
1709 
1710 	if (plane_state->in_transfer_func)
1711 		tf = plane_state->in_transfer_func;
1712 
1713 	if (plane_state->gamma_correction &&
1714 		!dpp_base->ctx->dc->debug.always_use_regamma
1715 		&& !plane_state->gamma_correction->is_identity
1716 			&& dce_use_lut(plane_state->format))
1717 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1718 
1719 	if (tf == NULL)
1720 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1721 	else if (tf->type == TF_TYPE_PREDEFINED) {
1722 		switch (tf->tf) {
1723 		case TRANSFER_FUNCTION_SRGB:
1724 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1725 			break;
1726 		case TRANSFER_FUNCTION_BT709:
1727 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1728 			break;
1729 		case TRANSFER_FUNCTION_LINEAR:
1730 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1731 			break;
1732 		case TRANSFER_FUNCTION_PQ:
1733 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1734 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1735 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1736 			result = true;
1737 			break;
1738 		default:
1739 			result = false;
1740 			break;
1741 		}
1742 	} else if (tf->type == TF_TYPE_BYPASS) {
1743 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1744 	} else {
1745 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1746 					&dpp_base->degamma_params);
1747 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1748 				&dpp_base->degamma_params);
1749 		result = true;
1750 	}
1751 
1752 	return result;
1753 }
1754 
1755 #define MAX_NUM_HW_POINTS 0x200
1756 
1757 static void log_tf(struct dc_context *ctx,
1758 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1759 {
1760 	// DC_LOG_GAMMA is default logging of all hw points
1761 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1762 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1763 	int i = 0;
1764 
1765 	DC_LOGGER_INIT(ctx->logger);
1766 	DC_LOG_GAMMA("Gamma Correction TF");
1767 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1768 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1769 
1770 	for (i = 0; i < hw_points_num; i++) {
1771 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1772 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1773 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1774 	}
1775 
1776 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1777 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1778 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1779 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1780 	}
1781 }
1782 
1783 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1784 				const struct dc_stream_state *stream)
1785 {
1786 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1787 
1788 	if (dpp == NULL)
1789 		return false;
1790 
1791 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1792 
1793 	if (stream->out_transfer_func &&
1794 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1795 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1796 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1797 
1798 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1799 	 * update.
1800 	 */
1801 	else if (cm_helper_translate_curve_to_hw_format(
1802 			stream->out_transfer_func,
1803 			&dpp->regamma_params, false)) {
1804 		dpp->funcs->dpp_program_regamma_pwl(
1805 				dpp,
1806 				&dpp->regamma_params, OPP_REGAMMA_USER);
1807 	} else
1808 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1809 
1810 	if (stream != NULL && stream->ctx != NULL &&
1811 			stream->out_transfer_func != NULL) {
1812 		log_tf(stream->ctx,
1813 				stream->out_transfer_func,
1814 				dpp->regamma_params.hw_points_num);
1815 	}
1816 
1817 	return true;
1818 }
1819 
1820 void dcn10_pipe_control_lock(
1821 	struct dc *dc,
1822 	struct pipe_ctx *pipe,
1823 	bool lock)
1824 {
1825 	struct dce_hwseq *hws = dc->hwseq;
1826 
1827 	/* use TG master update lock to lock everything on the TG
1828 	 * therefore only top pipe need to lock
1829 	 */
1830 	if (!pipe || pipe->top_pipe)
1831 		return;
1832 
1833 	if (dc->debug.sanity_checks)
1834 		hws->funcs.verify_allow_pstate_change_high(dc);
1835 
1836 	if (lock)
1837 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1838 	else
1839 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1840 
1841 	if (dc->debug.sanity_checks)
1842 		hws->funcs.verify_allow_pstate_change_high(dc);
1843 }
1844 
1845 /**
1846  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1847  *
1848  * Software keepout workaround to prevent cursor update locking from stalling
1849  * out cursor updates indefinitely or from old values from being retained in
1850  * the case where the viewport changes in the same frame as the cursor.
1851  *
1852  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1853  * too close to VUPDATE, then stall out until VUPDATE finishes.
1854  *
1855  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1856  *       to avoid the need for this workaround.
1857  */
1858 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1859 {
1860 	struct dc_stream_state *stream = pipe_ctx->stream;
1861 	struct crtc_position position;
1862 	uint32_t vupdate_start, vupdate_end;
1863 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1864 	unsigned int us_per_line, us_vupdate;
1865 
1866 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1867 		return;
1868 
1869 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1870 		return;
1871 
1872 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1873 				       &vupdate_end);
1874 
1875 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1876 	vpos = position.vertical_count;
1877 
1878 	/* Avoid wraparound calculation issues */
1879 	vupdate_start += stream->timing.v_total;
1880 	vupdate_end += stream->timing.v_total;
1881 	vpos += stream->timing.v_total;
1882 
1883 	if (vpos <= vupdate_start) {
1884 		/* VPOS is in VACTIVE or back porch. */
1885 		lines_to_vupdate = vupdate_start - vpos;
1886 	} else if (vpos > vupdate_end) {
1887 		/* VPOS is in the front porch. */
1888 		return;
1889 	} else {
1890 		/* VPOS is in VUPDATE. */
1891 		lines_to_vupdate = 0;
1892 	}
1893 
1894 	/* Calculate time until VUPDATE in microseconds. */
1895 	us_per_line =
1896 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1897 	us_to_vupdate = lines_to_vupdate * us_per_line;
1898 
1899 	/* 70 us is a conservative estimate of cursor update time*/
1900 	if (us_to_vupdate > 70)
1901 		return;
1902 
1903 	/* Stall out until the cursor update completes. */
1904 	if (vupdate_end < vupdate_start)
1905 		vupdate_end += stream->timing.v_total;
1906 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1907 	udelay(us_to_vupdate + us_vupdate);
1908 }
1909 
1910 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1911 {
1912 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1913 	if (!pipe || pipe->top_pipe)
1914 		return;
1915 
1916 	/* Prevent cursor lock from stalling out cursor updates. */
1917 	if (lock)
1918 		delay_cursor_until_vupdate(dc, pipe);
1919 
1920 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1921 		union dmub_hw_lock_flags hw_locks = { 0 };
1922 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1923 
1924 		hw_locks.bits.lock_cursor = 1;
1925 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1926 
1927 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1928 					lock,
1929 					&hw_locks,
1930 					&inst_flags);
1931 	} else
1932 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1933 				pipe->stream_res.opp->inst, lock);
1934 }
1935 
1936 static bool wait_for_reset_trigger_to_occur(
1937 	struct dc_context *dc_ctx,
1938 	struct timing_generator *tg)
1939 {
1940 	bool rc = false;
1941 
1942 	/* To avoid endless loop we wait at most
1943 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1944 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1945 	int i;
1946 
1947 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1948 
1949 		if (!tg->funcs->is_counter_moving(tg)) {
1950 			DC_ERROR("TG counter is not moving!\n");
1951 			break;
1952 		}
1953 
1954 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1955 			rc = true;
1956 			/* usually occurs at i=1 */
1957 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1958 					i);
1959 			break;
1960 		}
1961 
1962 		/* Wait for one frame. */
1963 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1964 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1965 	}
1966 
1967 	if (false == rc)
1968 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1969 
1970 	return rc;
1971 }
1972 
1973 uint64_t reduceSizeAndFraction(
1974 	uint64_t *numerator,
1975 	uint64_t *denominator,
1976 	bool checkUint32Bounary)
1977 {
1978 	int i;
1979 	bool ret = checkUint32Bounary == false;
1980 	uint64_t max_int32 = 0xffffffff;
1981 	uint64_t num, denom;
1982 	static const uint16_t prime_numbers[] = {
1983 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1984 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1985 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1986 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1987 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1988 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1989 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1990 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1991 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1992 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1993 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1994 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1995 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1996 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1997 		941, 947, 953, 967, 971, 977, 983, 991, 997};
1998 	int count = ARRAY_SIZE(prime_numbers);
1999 
2000 	num = *numerator;
2001 	denom = *denominator;
2002 	for (i = 0; i < count; i++) {
2003 		uint32_t num_remainder, denom_remainder;
2004 		uint64_t num_result, denom_result;
2005 		if (checkUint32Bounary &&
2006 			num <= max_int32 && denom <= max_int32) {
2007 			ret = true;
2008 			break;
2009 		}
2010 		do {
2011 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2012 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2013 			if (num_remainder == 0 && denom_remainder == 0) {
2014 				num = num_result;
2015 				denom = denom_result;
2016 			}
2017 		} while (num_remainder == 0 && denom_remainder == 0);
2018 	}
2019 	*numerator = num;
2020 	*denominator = denom;
2021 	return ret;
2022 }
2023 
2024 bool is_low_refresh_rate(struct pipe_ctx *pipe)
2025 {
2026 	uint32_t master_pipe_refresh_rate =
2027 		pipe->stream->timing.pix_clk_100hz * 100 /
2028 		pipe->stream->timing.h_total /
2029 		pipe->stream->timing.v_total;
2030 	return master_pipe_refresh_rate <= 30;
2031 }
2032 
2033 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
2034 {
2035 	uint32_t clock_divider = 1;
2036 	uint32_t numpipes = 1;
2037 
2038 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2039 		clock_divider *= 2;
2040 
2041 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2042 		clock_divider *= 2;
2043 
2044 	while (pipe->next_odm_pipe) {
2045 		pipe = pipe->next_odm_pipe;
2046 		numpipes++;
2047 	}
2048 	clock_divider *= numpipes;
2049 
2050 	return clock_divider;
2051 }
2052 
2053 int dcn10_align_pixel_clocks(
2054 	struct dc *dc,
2055 	int group_size,
2056 	struct pipe_ctx *grouped_pipes[])
2057 {
2058 	struct dc_context *dc_ctx = dc->ctx;
2059 	int i, master = -1, embedded = -1;
2060 	struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
2061 	uint64_t phase[MAX_PIPES];
2062 	uint64_t modulo[MAX_PIPES];
2063 	unsigned int pclk;
2064 
2065 	uint32_t embedded_pix_clk_100hz;
2066 	uint16_t embedded_h_total;
2067 	uint16_t embedded_v_total;
2068 	bool clamshell_closed = false;
2069 	uint32_t dp_ref_clk_100hz =
2070 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2071 
2072 	if (dc->config.vblank_alignment_dto_params &&
2073 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2074 		clamshell_closed =
2075 			(dc->config.vblank_alignment_dto_params >> 63);
2076 		embedded_h_total =
2077 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2078 		embedded_v_total =
2079 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2080 		embedded_pix_clk_100hz =
2081 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2082 
2083 		for (i = 0; i < group_size; i++) {
2084 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2085 					grouped_pipes[i]->stream_res.tg,
2086 					&hw_crtc_timing[i]);
2087 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2088 				dc->res_pool->dp_clock_source,
2089 				grouped_pipes[i]->stream_res.tg->inst,
2090 				&pclk);
2091 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2092 			if (dc_is_embedded_signal(
2093 					grouped_pipes[i]->stream->signal)) {
2094 				embedded = i;
2095 				master = i;
2096 				phase[i] = embedded_pix_clk_100hz*100;
2097 				modulo[i] = dp_ref_clk_100hz*100;
2098 			} else {
2099 
2100 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2101 					hw_crtc_timing[i].h_total*
2102 					hw_crtc_timing[i].v_total;
2103 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2104 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2105 					embedded_h_total*
2106 					embedded_v_total;
2107 
2108 				if (reduceSizeAndFraction(&phase[i],
2109 						&modulo[i], true) == false) {
2110 					/*
2111 					 * this will help to stop reporting
2112 					 * this timing synchronizable
2113 					 */
2114 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2115 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2116 				}
2117 			}
2118 		}
2119 
2120 		for (i = 0; i < group_size; i++) {
2121 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2122 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2123 					dc->res_pool->dp_clock_source,
2124 					grouped_pipes[i]->stream_res.tg->inst,
2125 					phase[i], modulo[i]);
2126 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2127 					dc->res_pool->dp_clock_source,
2128 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2129 					grouped_pipes[i]->stream->timing.pix_clk_100hz =
2130 						pclk*get_clock_divider(grouped_pipes[i], false);
2131 				if (master == -1)
2132 					master = i;
2133 			}
2134 		}
2135 
2136 	}
2137 	return master;
2138 }
2139 
2140 void dcn10_enable_vblanks_synchronization(
2141 	struct dc *dc,
2142 	int group_index,
2143 	int group_size,
2144 	struct pipe_ctx *grouped_pipes[])
2145 {
2146 	struct dc_context *dc_ctx = dc->ctx;
2147 	struct output_pixel_processor *opp;
2148 	struct timing_generator *tg;
2149 	int i, width, height, master;
2150 
2151 	for (i = 1; i < group_size; i++) {
2152 		opp = grouped_pipes[i]->stream_res.opp;
2153 		tg = grouped_pipes[i]->stream_res.tg;
2154 		tg->funcs->get_otg_active_size(tg, &width, &height);
2155 		if (opp->funcs->opp_program_dpg_dimensions)
2156 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2157 	}
2158 
2159 	for (i = 0; i < group_size; i++) {
2160 		if (grouped_pipes[i]->stream == NULL)
2161 			continue;
2162 		grouped_pipes[i]->stream->vblank_synchronized = false;
2163 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2164 	}
2165 
2166 	DC_SYNC_INFO("Aligning DP DTOs\n");
2167 
2168 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2169 
2170 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2171 
2172 	if (master >= 0) {
2173 		for (i = 0; i < group_size; i++) {
2174 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2175 			grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2176 				grouped_pipes[master]->stream_res.tg,
2177 				grouped_pipes[i]->stream_res.tg,
2178 				grouped_pipes[master]->stream->timing.pix_clk_100hz,
2179 				grouped_pipes[i]->stream->timing.pix_clk_100hz,
2180 				get_clock_divider(grouped_pipes[master], false),
2181 				get_clock_divider(grouped_pipes[i], false));
2182 				grouped_pipes[i]->stream->vblank_synchronized = true;
2183 		}
2184 		grouped_pipes[master]->stream->vblank_synchronized = true;
2185 		DC_SYNC_INFO("Sync complete\n");
2186 	}
2187 
2188 	for (i = 1; i < group_size; i++) {
2189 		opp = grouped_pipes[i]->stream_res.opp;
2190 		tg = grouped_pipes[i]->stream_res.tg;
2191 		tg->funcs->get_otg_active_size(tg, &width, &height);
2192 		if (opp->funcs->opp_program_dpg_dimensions)
2193 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2194 	}
2195 }
2196 
2197 void dcn10_enable_timing_synchronization(
2198 	struct dc *dc,
2199 	int group_index,
2200 	int group_size,
2201 	struct pipe_ctx *grouped_pipes[])
2202 {
2203 	struct dc_context *dc_ctx = dc->ctx;
2204 	struct output_pixel_processor *opp;
2205 	struct timing_generator *tg;
2206 	int i, width, height;
2207 
2208 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2209 
2210 	for (i = 1; i < group_size; i++) {
2211 		opp = grouped_pipes[i]->stream_res.opp;
2212 		tg = grouped_pipes[i]->stream_res.tg;
2213 		tg->funcs->get_otg_active_size(tg, &width, &height);
2214 		if (opp->funcs->opp_program_dpg_dimensions)
2215 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2216 	}
2217 
2218 	for (i = 0; i < group_size; i++) {
2219 		if (grouped_pipes[i]->stream == NULL)
2220 			continue;
2221 		grouped_pipes[i]->stream->vblank_synchronized = false;
2222 	}
2223 
2224 	for (i = 1; i < group_size; i++)
2225 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2226 				grouped_pipes[i]->stream_res.tg,
2227 				grouped_pipes[0]->stream_res.tg->inst);
2228 
2229 	DC_SYNC_INFO("Waiting for trigger\n");
2230 
2231 	/* Need to get only check 1 pipe for having reset as all the others are
2232 	 * synchronized. Look at last pipe programmed to reset.
2233 	 */
2234 
2235 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2236 	for (i = 1; i < group_size; i++)
2237 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2238 				grouped_pipes[i]->stream_res.tg);
2239 
2240 	for (i = 1; i < group_size; i++) {
2241 		opp = grouped_pipes[i]->stream_res.opp;
2242 		tg = grouped_pipes[i]->stream_res.tg;
2243 		tg->funcs->get_otg_active_size(tg, &width, &height);
2244 		if (opp->funcs->opp_program_dpg_dimensions)
2245 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2246 	}
2247 
2248 	DC_SYNC_INFO("Sync complete\n");
2249 }
2250 
2251 void dcn10_enable_per_frame_crtc_position_reset(
2252 	struct dc *dc,
2253 	int group_size,
2254 	struct pipe_ctx *grouped_pipes[])
2255 {
2256 	struct dc_context *dc_ctx = dc->ctx;
2257 	int i;
2258 
2259 	DC_SYNC_INFO("Setting up\n");
2260 	for (i = 0; i < group_size; i++)
2261 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2262 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2263 					grouped_pipes[i]->stream_res.tg,
2264 					0,
2265 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2266 
2267 	DC_SYNC_INFO("Waiting for trigger\n");
2268 
2269 	for (i = 0; i < group_size; i++)
2270 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2271 
2272 	DC_SYNC_INFO("Multi-display sync is complete\n");
2273 }
2274 
2275 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2276 		struct vm_system_aperture_param *apt,
2277 		struct dce_hwseq *hws)
2278 {
2279 	PHYSICAL_ADDRESS_LOC physical_page_number;
2280 	uint32_t logical_addr_low;
2281 	uint32_t logical_addr_high;
2282 
2283 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2284 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2285 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2286 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2287 
2288 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2289 			LOGICAL_ADDR, &logical_addr_low);
2290 
2291 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2292 			LOGICAL_ADDR, &logical_addr_high);
2293 
2294 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2295 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2296 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2297 }
2298 
2299 /* Temporary read settings, future will get values from kmd directly */
2300 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2301 		struct vm_context0_param *vm0,
2302 		struct dce_hwseq *hws)
2303 {
2304 	PHYSICAL_ADDRESS_LOC fb_base;
2305 	PHYSICAL_ADDRESS_LOC fb_offset;
2306 	uint32_t fb_base_value;
2307 	uint32_t fb_offset_value;
2308 
2309 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2310 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2311 
2312 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2313 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2314 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2315 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2316 
2317 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2318 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2319 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2320 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2321 
2322 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2323 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2324 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2325 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2326 
2327 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2328 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2329 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2330 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2331 
2332 	/*
2333 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2334 	 * Therefore we need to do
2335 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2336 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2337 	 */
2338 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2339 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2340 	vm0->pte_base.quad_part += fb_base.quad_part;
2341 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2342 }
2343 
2344 
2345 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2346 {
2347 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2348 	struct vm_system_aperture_param apt = {0};
2349 	struct vm_context0_param vm0 = {0};
2350 
2351 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2352 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2353 
2354 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2355 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2356 }
2357 
2358 static void dcn10_enable_plane(
2359 	struct dc *dc,
2360 	struct pipe_ctx *pipe_ctx,
2361 	struct dc_state *context)
2362 {
2363 	struct dce_hwseq *hws = dc->hwseq;
2364 
2365 	if (dc->debug.sanity_checks) {
2366 		hws->funcs.verify_allow_pstate_change_high(dc);
2367 	}
2368 
2369 	undo_DEGVIDCN10_253_wa(dc);
2370 
2371 	power_on_plane(dc->hwseq,
2372 		pipe_ctx->plane_res.hubp->inst);
2373 
2374 	/* enable DCFCLK current DCHUB */
2375 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2376 
2377 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2378 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2379 			pipe_ctx->stream_res.opp,
2380 			true);
2381 
2382 	if (dc->config.gpu_vm_support)
2383 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2384 
2385 	if (dc->debug.sanity_checks) {
2386 		hws->funcs.verify_allow_pstate_change_high(dc);
2387 	}
2388 
2389 	if (!pipe_ctx->top_pipe
2390 		&& pipe_ctx->plane_state
2391 		&& pipe_ctx->plane_state->flip_int_enabled
2392 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2393 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2394 
2395 }
2396 
2397 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2398 {
2399 	int i = 0;
2400 	struct dpp_grph_csc_adjustment adjust;
2401 	memset(&adjust, 0, sizeof(adjust));
2402 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2403 
2404 
2405 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2406 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2407 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2408 			adjust.temperature_matrix[i] =
2409 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2410 	} else if (pipe_ctx->plane_state &&
2411 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2412 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2413 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2414 			adjust.temperature_matrix[i] =
2415 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2416 	}
2417 
2418 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2419 }
2420 
2421 
2422 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2423 {
2424 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2425 		if (pipe_ctx->top_pipe) {
2426 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2427 
2428 			while (top->top_pipe)
2429 				top = top->top_pipe; // Traverse to top pipe_ctx
2430 			if (top->plane_state && top->plane_state->layer_index == 0)
2431 				return true; // Front MPO plane not hidden
2432 		}
2433 	}
2434 	return false;
2435 }
2436 
2437 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2438 {
2439 	// Override rear plane RGB bias to fix MPO brightness
2440 	uint16_t rgb_bias = matrix[3];
2441 
2442 	matrix[3] = 0;
2443 	matrix[7] = 0;
2444 	matrix[11] = 0;
2445 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2446 	matrix[3] = rgb_bias;
2447 	matrix[7] = rgb_bias;
2448 	matrix[11] = rgb_bias;
2449 }
2450 
2451 void dcn10_program_output_csc(struct dc *dc,
2452 		struct pipe_ctx *pipe_ctx,
2453 		enum dc_color_space colorspace,
2454 		uint16_t *matrix,
2455 		int opp_id)
2456 {
2457 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2458 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2459 
2460 			/* MPO is broken with RGB colorspaces when OCSC matrix
2461 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2462 			 * Blending adds offsets from front + rear to rear plane
2463 			 *
2464 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2465 			 * black value pixels add offset instead of rear + front
2466 			 */
2467 
2468 			int16_t rgb_bias = matrix[3];
2469 			// matrix[3/7/11] are all the same offset value
2470 
2471 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2472 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2473 			} else {
2474 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2475 			}
2476 		}
2477 	} else {
2478 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2479 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2480 	}
2481 }
2482 
2483 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2484 {
2485 	struct dc_bias_and_scale bns_params = {0};
2486 
2487 	// program the input csc
2488 	dpp->funcs->dpp_setup(dpp,
2489 			plane_state->format,
2490 			EXPANSION_MODE_ZERO,
2491 			plane_state->input_csc_color_matrix,
2492 			plane_state->color_space,
2493 			NULL);
2494 
2495 	//set scale and bias registers
2496 	build_prescale_params(&bns_params, plane_state);
2497 	if (dpp->funcs->dpp_program_bias_and_scale)
2498 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2499 }
2500 
2501 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2502 {
2503 	struct mpc *mpc = dc->res_pool->mpc;
2504 
2505 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2506 		get_hdr_visual_confirm_color(pipe_ctx, color);
2507 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2508 		get_surface_visual_confirm_color(pipe_ctx, color);
2509 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2510 		get_surface_tile_visual_confirm_color(pipe_ctx, color);
2511 	else
2512 		color_space_to_black_color(
2513 				dc, pipe_ctx->stream->output_color_space, color);
2514 
2515 	if (mpc->funcs->set_bg_color)
2516 		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2517 }
2518 
2519 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2520 {
2521 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2522 	struct mpcc_blnd_cfg blnd_cfg = {0};
2523 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2524 	int mpcc_id;
2525 	struct mpcc *new_mpcc;
2526 	struct mpc *mpc = dc->res_pool->mpc;
2527 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2528 
2529 	if (per_pixel_alpha)
2530 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2531 	else
2532 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2533 
2534 	blnd_cfg.overlap_only = false;
2535 	blnd_cfg.global_gain = 0xff;
2536 
2537 	if (pipe_ctx->plane_state->global_alpha)
2538 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2539 	else
2540 		blnd_cfg.global_alpha = 0xff;
2541 
2542 	/* DCN1.0 has output CM before MPC which seems to screw with
2543 	 * pre-multiplied alpha.
2544 	 */
2545 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2546 			pipe_ctx->stream->output_color_space)
2547 					&& per_pixel_alpha;
2548 
2549 
2550 	/*
2551 	 * TODO: remove hack
2552 	 * Note: currently there is a bug in init_hw such that
2553 	 * on resume from hibernate, BIOS sets up MPCC0, and
2554 	 * we do mpcc_remove but the mpcc cannot go to idle
2555 	 * after remove. This cause us to pick mpcc1 here,
2556 	 * which causes a pstate hang for yet unknown reason.
2557 	 */
2558 	mpcc_id = hubp->inst;
2559 
2560 	/* If there is no full update, don't need to touch MPC tree*/
2561 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2562 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2563 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2564 		return;
2565 	}
2566 
2567 	/* check if this MPCC is already being used */
2568 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2569 	/* remove MPCC if being used */
2570 	if (new_mpcc != NULL)
2571 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2572 	else
2573 		if (dc->debug.sanity_checks)
2574 			mpc->funcs->assert_mpcc_idle_before_connect(
2575 					dc->res_pool->mpc, mpcc_id);
2576 
2577 	/* Call MPC to insert new plane */
2578 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2579 			mpc_tree_params,
2580 			&blnd_cfg,
2581 			NULL,
2582 			NULL,
2583 			hubp->inst,
2584 			mpcc_id);
2585 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2586 
2587 	ASSERT(new_mpcc != NULL);
2588 
2589 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2590 	hubp->mpcc_id = mpcc_id;
2591 }
2592 
2593 static void update_scaler(struct pipe_ctx *pipe_ctx)
2594 {
2595 	bool per_pixel_alpha =
2596 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2597 
2598 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2599 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2600 	/* scaler configuration */
2601 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2602 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2603 }
2604 
2605 static void dcn10_update_dchubp_dpp(
2606 	struct dc *dc,
2607 	struct pipe_ctx *pipe_ctx,
2608 	struct dc_state *context)
2609 {
2610 	struct dce_hwseq *hws = dc->hwseq;
2611 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2612 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2613 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2614 	struct plane_size size = plane_state->plane_size;
2615 	unsigned int compat_level = 0;
2616 	bool should_divided_by_2 = false;
2617 
2618 	/* depends on DML calculation, DPP clock value may change dynamically */
2619 	/* If request max dpp clk is lower than current dispclk, no need to
2620 	 * divided by 2
2621 	 */
2622 	if (plane_state->update_flags.bits.full_update) {
2623 
2624 		/* new calculated dispclk, dppclk are stored in
2625 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2626 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2627 		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2628 		 * dispclk will put in use after optimize_bandwidth when
2629 		 * ramp_up_dispclk_with_dpp is called.
2630 		 * there are two places for dppclk be put in use. One location
2631 		 * is the same as the location as dispclk. Another is within
2632 		 * update_dchubp_dpp which happens between pre_bandwidth and
2633 		 * optimize_bandwidth.
2634 		 * dppclk updated within update_dchubp_dpp will cause new
2635 		 * clock values of dispclk and dppclk not be in use at the same
2636 		 * time. when clocks are decreased, this may cause dppclk is
2637 		 * lower than previous configuration and let pipe stuck.
2638 		 * for example, eDP + external dp,  change resolution of DP from
2639 		 * 1920x1080x144hz to 1280x960x60hz.
2640 		 * before change: dispclk = 337889 dppclk = 337889
2641 		 * change mode, dcn_validate_bandwidth calculate
2642 		 *                dispclk = 143122 dppclk = 143122
2643 		 * update_dchubp_dpp be executed before dispclk be updated,
2644 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2645 		 * 168944. this will cause pipe pstate warning issue.
2646 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2647 		 * dispclk is going to be decreased, keep dppclk = dispclk
2648 		 **/
2649 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2650 				dc->clk_mgr->clks.dispclk_khz)
2651 			should_divided_by_2 = false;
2652 		else
2653 			should_divided_by_2 =
2654 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2655 					dc->clk_mgr->clks.dispclk_khz / 2;
2656 
2657 		dpp->funcs->dpp_dppclk_control(
2658 				dpp,
2659 				should_divided_by_2,
2660 				true);
2661 
2662 		if (dc->res_pool->dccg)
2663 			dc->res_pool->dccg->funcs->update_dpp_dto(
2664 					dc->res_pool->dccg,
2665 					dpp->inst,
2666 					pipe_ctx->plane_res.bw.dppclk_khz);
2667 		else
2668 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2669 						dc->clk_mgr->clks.dispclk_khz / 2 :
2670 							dc->clk_mgr->clks.dispclk_khz;
2671 	}
2672 
2673 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2674 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2675 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2676 	 */
2677 	if (plane_state->update_flags.bits.full_update) {
2678 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2679 
2680 		hubp->funcs->hubp_setup(
2681 			hubp,
2682 			&pipe_ctx->dlg_regs,
2683 			&pipe_ctx->ttu_regs,
2684 			&pipe_ctx->rq_regs,
2685 			&pipe_ctx->pipe_dlg_param);
2686 		hubp->funcs->hubp_setup_interdependent(
2687 			hubp,
2688 			&pipe_ctx->dlg_regs,
2689 			&pipe_ctx->ttu_regs);
2690 	}
2691 
2692 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2693 
2694 	if (plane_state->update_flags.bits.full_update ||
2695 		plane_state->update_flags.bits.bpp_change)
2696 		dcn10_update_dpp(dpp, plane_state);
2697 
2698 	if (plane_state->update_flags.bits.full_update ||
2699 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2700 		plane_state->update_flags.bits.global_alpha_change)
2701 		hws->funcs.update_mpcc(dc, pipe_ctx);
2702 
2703 	if (plane_state->update_flags.bits.full_update ||
2704 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2705 		plane_state->update_flags.bits.global_alpha_change ||
2706 		plane_state->update_flags.bits.scaling_change ||
2707 		plane_state->update_flags.bits.position_change) {
2708 		update_scaler(pipe_ctx);
2709 	}
2710 
2711 	if (plane_state->update_flags.bits.full_update ||
2712 		plane_state->update_flags.bits.scaling_change ||
2713 		plane_state->update_flags.bits.position_change) {
2714 		hubp->funcs->mem_program_viewport(
2715 			hubp,
2716 			&pipe_ctx->plane_res.scl_data.viewport,
2717 			&pipe_ctx->plane_res.scl_data.viewport_c);
2718 	}
2719 
2720 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2721 		dc->hwss.set_cursor_position(pipe_ctx);
2722 		dc->hwss.set_cursor_attribute(pipe_ctx);
2723 
2724 		if (dc->hwss.set_cursor_sdr_white_level)
2725 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2726 	}
2727 
2728 	if (plane_state->update_flags.bits.full_update) {
2729 		/*gamut remap*/
2730 		dc->hwss.program_gamut_remap(pipe_ctx);
2731 
2732 		dc->hwss.program_output_csc(dc,
2733 				pipe_ctx,
2734 				pipe_ctx->stream->output_color_space,
2735 				pipe_ctx->stream->csc_color_matrix.matrix,
2736 				pipe_ctx->stream_res.opp->inst);
2737 	}
2738 
2739 	if (plane_state->update_flags.bits.full_update ||
2740 		plane_state->update_flags.bits.pixel_format_change ||
2741 		plane_state->update_flags.bits.horizontal_mirror_change ||
2742 		plane_state->update_flags.bits.rotation_change ||
2743 		plane_state->update_flags.bits.swizzle_change ||
2744 		plane_state->update_flags.bits.dcc_change ||
2745 		plane_state->update_flags.bits.bpp_change ||
2746 		plane_state->update_flags.bits.scaling_change ||
2747 		plane_state->update_flags.bits.plane_size_change) {
2748 		hubp->funcs->hubp_program_surface_config(
2749 			hubp,
2750 			plane_state->format,
2751 			&plane_state->tiling_info,
2752 			&size,
2753 			plane_state->rotation,
2754 			&plane_state->dcc,
2755 			plane_state->horizontal_mirror,
2756 			compat_level);
2757 	}
2758 
2759 	hubp->power_gated = false;
2760 
2761 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2762 
2763 	if (is_pipe_tree_visible(pipe_ctx))
2764 		hubp->funcs->set_blank(hubp, false);
2765 }
2766 
2767 void dcn10_blank_pixel_data(
2768 		struct dc *dc,
2769 		struct pipe_ctx *pipe_ctx,
2770 		bool blank)
2771 {
2772 	enum dc_color_space color_space;
2773 	struct tg_color black_color = {0};
2774 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2775 	struct dc_stream_state *stream = pipe_ctx->stream;
2776 
2777 	/* program otg blank color */
2778 	color_space = stream->output_color_space;
2779 	color_space_to_black_color(dc, color_space, &black_color);
2780 
2781 	/*
2782 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2783 	 * alternate between Cb and Cr, so both channels need the pixel
2784 	 * value for Y
2785 	 */
2786 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2787 		black_color.color_r_cr = black_color.color_g_y;
2788 
2789 
2790 	if (stream_res->tg->funcs->set_blank_color)
2791 		stream_res->tg->funcs->set_blank_color(
2792 				stream_res->tg,
2793 				&black_color);
2794 
2795 	if (!blank) {
2796 		if (stream_res->tg->funcs->set_blank)
2797 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2798 		if (stream_res->abm) {
2799 			dc->hwss.set_pipe(pipe_ctx);
2800 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2801 		}
2802 	} else if (blank) {
2803 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2804 		if (stream_res->tg->funcs->set_blank) {
2805 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2806 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2807 		}
2808 	}
2809 }
2810 
2811 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2812 {
2813 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2814 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2815 	struct custom_float_format fmt;
2816 
2817 	fmt.exponenta_bits = 6;
2818 	fmt.mantissa_bits = 12;
2819 	fmt.sign = true;
2820 
2821 
2822 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2823 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2824 
2825 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2826 			pipe_ctx->plane_res.dpp, hw_mult);
2827 }
2828 
2829 void dcn10_program_pipe(
2830 		struct dc *dc,
2831 		struct pipe_ctx *pipe_ctx,
2832 		struct dc_state *context)
2833 {
2834 	struct dce_hwseq *hws = dc->hwseq;
2835 
2836 	if (pipe_ctx->top_pipe == NULL) {
2837 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2838 
2839 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2840 				pipe_ctx->stream_res.tg,
2841 				pipe_ctx->pipe_dlg_param.vready_offset,
2842 				pipe_ctx->pipe_dlg_param.vstartup_start,
2843 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2844 				pipe_ctx->pipe_dlg_param.vupdate_width);
2845 
2846 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2847 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2848 
2849 		if (hws->funcs.setup_vupdate_interrupt)
2850 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2851 
2852 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2853 	}
2854 
2855 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2856 		dcn10_enable_plane(dc, pipe_ctx, context);
2857 
2858 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2859 
2860 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2861 
2862 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2863 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2864 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2865 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2866 
2867 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2868 	 * only do gamma programming for full update.
2869 	 * TODO: This can be further optimized/cleaned up
2870 	 * Always call this for now since it does memcmp inside before
2871 	 * doing heavy calculation and programming
2872 	 */
2873 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2874 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2875 }
2876 
2877 void dcn10_wait_for_pending_cleared(struct dc *dc,
2878 		struct dc_state *context)
2879 {
2880 		struct pipe_ctx *pipe_ctx;
2881 		struct timing_generator *tg;
2882 		int i;
2883 
2884 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2885 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2886 			tg = pipe_ctx->stream_res.tg;
2887 
2888 			/*
2889 			 * Only wait for top pipe's tg penindg bit
2890 			 * Also skip if pipe is disabled.
2891 			 */
2892 			if (pipe_ctx->top_pipe ||
2893 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2894 			    !tg->funcs->is_tg_enabled(tg))
2895 				continue;
2896 
2897 			/*
2898 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2899 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2900 			 * seems to not trigger the update right away, and if we
2901 			 * lock again before VUPDATE then we don't get a separated
2902 			 * operation.
2903 			 */
2904 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2905 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2906 		}
2907 }
2908 
2909 void dcn10_post_unlock_program_front_end(
2910 		struct dc *dc,
2911 		struct dc_state *context)
2912 {
2913 	int i;
2914 
2915 	DC_LOGGER_INIT(dc->ctx->logger);
2916 
2917 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2918 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2919 
2920 		if (!pipe_ctx->top_pipe &&
2921 			!pipe_ctx->prev_odm_pipe &&
2922 			pipe_ctx->stream) {
2923 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
2924 
2925 			if (context->stream_status[i].plane_count == 0)
2926 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2927 		}
2928 	}
2929 
2930 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2931 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2932 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2933 
2934 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2935 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2936 			dc->hwss.optimize_bandwidth(dc, context);
2937 			break;
2938 		}
2939 
2940 	if (dc->hwseq->wa.DEGVIDCN10_254)
2941 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2942 }
2943 
2944 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2945 {
2946 	uint8_t i;
2947 
2948 	for (i = 0; i < context->stream_count; i++) {
2949 		if (context->streams[i]->timing.timing_3d_format
2950 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2951 			/*
2952 			 * Disable stutter
2953 			 */
2954 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2955 			break;
2956 		}
2957 	}
2958 }
2959 
2960 void dcn10_prepare_bandwidth(
2961 		struct dc *dc,
2962 		struct dc_state *context)
2963 {
2964 	struct dce_hwseq *hws = dc->hwseq;
2965 	struct hubbub *hubbub = dc->res_pool->hubbub;
2966 
2967 	if (dc->debug.sanity_checks)
2968 		hws->funcs.verify_allow_pstate_change_high(dc);
2969 
2970 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2971 		if (context->stream_count == 0)
2972 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2973 
2974 		dc->clk_mgr->funcs->update_clocks(
2975 				dc->clk_mgr,
2976 				context,
2977 				false);
2978 	}
2979 
2980 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2981 			&context->bw_ctx.bw.dcn.watermarks,
2982 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2983 			true);
2984 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2985 
2986 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2987 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2988 
2989 	if (dc->debug.sanity_checks)
2990 		hws->funcs.verify_allow_pstate_change_high(dc);
2991 }
2992 
2993 void dcn10_optimize_bandwidth(
2994 		struct dc *dc,
2995 		struct dc_state *context)
2996 {
2997 	struct dce_hwseq *hws = dc->hwseq;
2998 	struct hubbub *hubbub = dc->res_pool->hubbub;
2999 
3000 	if (dc->debug.sanity_checks)
3001 		hws->funcs.verify_allow_pstate_change_high(dc);
3002 
3003 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3004 		if (context->stream_count == 0)
3005 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3006 
3007 		dc->clk_mgr->funcs->update_clocks(
3008 				dc->clk_mgr,
3009 				context,
3010 				true);
3011 	}
3012 
3013 	hubbub->funcs->program_watermarks(hubbub,
3014 			&context->bw_ctx.bw.dcn.watermarks,
3015 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3016 			true);
3017 
3018 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3019 
3020 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3021 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3022 
3023 	if (dc->debug.sanity_checks)
3024 		hws->funcs.verify_allow_pstate_change_high(dc);
3025 }
3026 
3027 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3028 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3029 {
3030 	int i = 0;
3031 	struct drr_params params = {0};
3032 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3033 	unsigned int event_triggers = 0x800;
3034 	// Note DRR trigger events are generated regardless of whether num frames met.
3035 	unsigned int num_frames = 2;
3036 
3037 	params.vertical_total_max = adjust.v_total_max;
3038 	params.vertical_total_min = adjust.v_total_min;
3039 	params.vertical_total_mid = adjust.v_total_mid;
3040 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3041 	/* TODO: If multiple pipes are to be supported, you need
3042 	 * some GSL stuff. Static screen triggers may be programmed differently
3043 	 * as well.
3044 	 */
3045 	for (i = 0; i < num_pipes; i++) {
3046 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3047 			pipe_ctx[i]->stream_res.tg, &params);
3048 		if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3049 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3050 					pipe_ctx[i]->stream_res.tg,
3051 					event_triggers, num_frames);
3052 	}
3053 }
3054 
3055 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3056 		int num_pipes,
3057 		struct crtc_position *position)
3058 {
3059 	int i = 0;
3060 
3061 	/* TODO: handle pipes > 1
3062 	 */
3063 	for (i = 0; i < num_pipes; i++)
3064 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3065 }
3066 
3067 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3068 		int num_pipes, const struct dc_static_screen_params *params)
3069 {
3070 	unsigned int i;
3071 	unsigned int triggers = 0;
3072 
3073 	if (params->triggers.surface_update)
3074 		triggers |= 0x80;
3075 	if (params->triggers.cursor_update)
3076 		triggers |= 0x2;
3077 	if (params->triggers.force_trigger)
3078 		triggers |= 0x1;
3079 
3080 	for (i = 0; i < num_pipes; i++)
3081 		pipe_ctx[i]->stream_res.tg->funcs->
3082 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3083 					triggers, params->num_frames);
3084 }
3085 
3086 static void dcn10_config_stereo_parameters(
3087 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3088 {
3089 	enum view_3d_format view_format = stream->view_format;
3090 	enum dc_timing_3d_format timing_3d_format =\
3091 			stream->timing.timing_3d_format;
3092 	bool non_stereo_timing = false;
3093 
3094 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3095 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3096 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3097 		non_stereo_timing = true;
3098 
3099 	if (non_stereo_timing == false &&
3100 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3101 
3102 		flags->PROGRAM_STEREO         = 1;
3103 		flags->PROGRAM_POLARITY       = 1;
3104 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3105 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3106 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3107 			enum display_dongle_type dongle = \
3108 					stream->link->ddc->dongle_type;
3109 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3110 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3111 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3112 				flags->DISABLE_STEREO_DP_SYNC = 1;
3113 		}
3114 		flags->RIGHT_EYE_POLARITY =\
3115 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3116 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3117 			flags->FRAME_PACKED = 1;
3118 	}
3119 
3120 	return;
3121 }
3122 
3123 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3124 {
3125 	struct crtc_stereo_flags flags = { 0 };
3126 	struct dc_stream_state *stream = pipe_ctx->stream;
3127 
3128 	dcn10_config_stereo_parameters(stream, &flags);
3129 
3130 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3131 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3132 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3133 	} else {
3134 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3135 	}
3136 
3137 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3138 		pipe_ctx->stream_res.opp,
3139 		flags.PROGRAM_STEREO == 1,
3140 		&stream->timing);
3141 
3142 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3143 		pipe_ctx->stream_res.tg,
3144 		&stream->timing,
3145 		&flags);
3146 
3147 	return;
3148 }
3149 
3150 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3151 {
3152 	int i;
3153 
3154 	for (i = 0; i < res_pool->pipe_count; i++) {
3155 		if (res_pool->hubps[i]->inst == mpcc_inst)
3156 			return res_pool->hubps[i];
3157 	}
3158 	ASSERT(false);
3159 	return NULL;
3160 }
3161 
3162 void dcn10_wait_for_mpcc_disconnect(
3163 		struct dc *dc,
3164 		struct resource_pool *res_pool,
3165 		struct pipe_ctx *pipe_ctx)
3166 {
3167 	struct dce_hwseq *hws = dc->hwseq;
3168 	int mpcc_inst;
3169 
3170 	if (dc->debug.sanity_checks) {
3171 		hws->funcs.verify_allow_pstate_change_high(dc);
3172 	}
3173 
3174 	if (!pipe_ctx->stream_res.opp)
3175 		return;
3176 
3177 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3178 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3179 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3180 
3181 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3182 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3183 			hubp->funcs->set_blank(hubp, true);
3184 		}
3185 	}
3186 
3187 	if (dc->debug.sanity_checks) {
3188 		hws->funcs.verify_allow_pstate_change_high(dc);
3189 	}
3190 
3191 }
3192 
3193 bool dcn10_dummy_display_power_gating(
3194 	struct dc *dc,
3195 	uint8_t controller_id,
3196 	struct dc_bios *dcb,
3197 	enum pipe_gating_control power_gating)
3198 {
3199 	return true;
3200 }
3201 
3202 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3203 {
3204 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3205 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3206 	bool flip_pending;
3207 	struct dc *dc = plane_state->ctx->dc;
3208 
3209 	if (plane_state == NULL)
3210 		return;
3211 
3212 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3213 					pipe_ctx->plane_res.hubp);
3214 
3215 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3216 
3217 	if (!flip_pending)
3218 		plane_state->status.current_address = plane_state->status.requested_address;
3219 
3220 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3221 			tg->funcs->is_stereo_left_eye) {
3222 		plane_state->status.is_right_eye =
3223 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3224 	}
3225 
3226 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3227 		struct dce_hwseq *hwseq = dc->hwseq;
3228 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3229 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3230 
3231 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3232 			struct hubbub *hubbub = dc->res_pool->hubbub;
3233 
3234 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3235 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3236 		}
3237 	}
3238 }
3239 
3240 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3241 {
3242 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3243 
3244 	/* In DCN, this programming sequence is owned by the hubbub */
3245 	hubbub->funcs->update_dchub(hubbub, dh_data);
3246 }
3247 
3248 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3249 {
3250 	struct pipe_ctx *test_pipe, *split_pipe;
3251 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3252 	struct rect r1 = scl_data->recout, r2, r2_half;
3253 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3254 	int cur_layer = pipe_ctx->plane_state->layer_index;
3255 
3256 	/**
3257 	 * Disable the cursor if there's another pipe above this with a
3258 	 * plane that contains this pipe's viewport to prevent double cursor
3259 	 * and incorrect scaling artifacts.
3260 	 */
3261 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3262 	     test_pipe = test_pipe->top_pipe) {
3263 		// Skip invisible layer and pipe-split plane on same layer
3264 		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3265 			continue;
3266 
3267 		r2 = test_pipe->plane_res.scl_data.recout;
3268 		r2_r = r2.x + r2.width;
3269 		r2_b = r2.y + r2.height;
3270 		split_pipe = test_pipe;
3271 
3272 		/**
3273 		 * There is another half plane on same layer because of
3274 		 * pipe-split, merge together per same height.
3275 		 */
3276 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3277 		     split_pipe = split_pipe->top_pipe)
3278 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3279 				r2_half = split_pipe->plane_res.scl_data.recout;
3280 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3281 				r2.width = r2.width + r2_half.width;
3282 				r2_r = r2.x + r2.width;
3283 				break;
3284 			}
3285 
3286 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3287 			return true;
3288 	}
3289 
3290 	return false;
3291 }
3292 
3293 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3294 {
3295 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3296 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3297 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3298 	struct dc_cursor_mi_param param = {
3299 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3300 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3301 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3302 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3303 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3304 		.rotation = pipe_ctx->plane_state->rotation,
3305 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3306 	};
3307 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3308 		(pipe_ctx->bottom_pipe != NULL);
3309 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3310 		(pipe_ctx->prev_odm_pipe != NULL);
3311 
3312 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3313 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3314 	int x_pos = pos_cpy.x;
3315 	int y_pos = pos_cpy.y;
3316 
3317 	/**
3318 	 * DC cursor is stream space, HW cursor is plane space and drawn
3319 	 * as part of the framebuffer.
3320 	 *
3321 	 * Cursor position can't be negative, but hotspot can be used to
3322 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3323 	 * than the cursor size.
3324 	 */
3325 
3326 	/**
3327 	 * Translate cursor from stream space to plane space.
3328 	 *
3329 	 * If the cursor is scaled then we need to scale the position
3330 	 * to be in the approximately correct place. We can't do anything
3331 	 * about the actual size being incorrect, that's a limitation of
3332 	 * the hardware.
3333 	 */
3334 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3335 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3336 				pipe_ctx->plane_state->dst_rect.width;
3337 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3338 				pipe_ctx->plane_state->dst_rect.height;
3339 	} else {
3340 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3341 				pipe_ctx->plane_state->dst_rect.width;
3342 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3343 				pipe_ctx->plane_state->dst_rect.height;
3344 	}
3345 
3346 	/**
3347 	 * If the cursor's source viewport is clipped then we need to
3348 	 * translate the cursor to appear in the correct position on
3349 	 * the screen.
3350 	 *
3351 	 * This translation isn't affected by scaling so it needs to be
3352 	 * done *after* we adjust the position for the scale factor.
3353 	 *
3354 	 * This is only done by opt-in for now since there are still
3355 	 * some usecases like tiled display that might enable the
3356 	 * cursor on both streams while expecting dc to clip it.
3357 	 */
3358 	if (pos_cpy.translate_by_source) {
3359 		x_pos += pipe_ctx->plane_state->src_rect.x;
3360 		y_pos += pipe_ctx->plane_state->src_rect.y;
3361 	}
3362 
3363 	/**
3364 	 * If the position is negative then we need to add to the hotspot
3365 	 * to shift the cursor outside the plane.
3366 	 */
3367 
3368 	if (x_pos < 0) {
3369 		pos_cpy.x_hotspot -= x_pos;
3370 		x_pos = 0;
3371 	}
3372 
3373 	if (y_pos < 0) {
3374 		pos_cpy.y_hotspot -= y_pos;
3375 		y_pos = 0;
3376 	}
3377 
3378 	pos_cpy.x = (uint32_t)x_pos;
3379 	pos_cpy.y = (uint32_t)y_pos;
3380 
3381 	if (pipe_ctx->plane_state->address.type
3382 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3383 		pos_cpy.enable = false;
3384 
3385 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3386 		pos_cpy.enable = false;
3387 
3388 	// Swap axis and mirror horizontally
3389 	if (param.rotation == ROTATION_ANGLE_90) {
3390 		uint32_t temp_x = pos_cpy.x;
3391 
3392 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3393 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3394 		pos_cpy.y = temp_x;
3395 	}
3396 	// Swap axis and mirror vertically
3397 	else if (param.rotation == ROTATION_ANGLE_270) {
3398 		uint32_t temp_y = pos_cpy.y;
3399 		int viewport_height =
3400 			pipe_ctx->plane_res.scl_data.viewport.height;
3401 		int viewport_y =
3402 			pipe_ctx->plane_res.scl_data.viewport.y;
3403 
3404 		/**
3405 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3406 		 * For pipe split cases:
3407 		 * - apply offset of viewport.y to normalize pos_cpy.x
3408 		 * - calculate the pos_cpy.y as before
3409 		 * - shift pos_cpy.y back by same offset to get final value
3410 		 * - since we iterate through both pipes, use the lower
3411 		 *   viewport.y for offset
3412 		 * For non pipe split cases, use the same calculation for
3413 		 *  pos_cpy.y as the 180 degree rotation case below,
3414 		 *  but use pos_cpy.x as our input because we are rotating
3415 		 *  270 degrees
3416 		 */
3417 		if (pipe_split_on || odm_combine_on) {
3418 			int pos_cpy_x_offset;
3419 			int other_pipe_viewport_y;
3420 
3421 			if (pipe_split_on) {
3422 				if (pipe_ctx->bottom_pipe) {
3423 					other_pipe_viewport_y =
3424 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3425 				} else {
3426 					other_pipe_viewport_y =
3427 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3428 				}
3429 			} else {
3430 				if (pipe_ctx->next_odm_pipe) {
3431 					other_pipe_viewport_y =
3432 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3433 				} else {
3434 					other_pipe_viewport_y =
3435 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3436 				}
3437 			}
3438 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3439 				other_pipe_viewport_y : viewport_y;
3440 			pos_cpy.x -= pos_cpy_x_offset;
3441 			if (pos_cpy.x > viewport_height) {
3442 				pos_cpy.x = pos_cpy.x - viewport_height;
3443 				pos_cpy.y = viewport_height - pos_cpy.x;
3444 			} else {
3445 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3446 			}
3447 			pos_cpy.y += pos_cpy_x_offset;
3448 		} else {
3449 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3450 		}
3451 		pos_cpy.x = temp_y;
3452 	}
3453 	// Mirror horizontally and vertically
3454 	else if (param.rotation == ROTATION_ANGLE_180) {
3455 		int viewport_width =
3456 			pipe_ctx->plane_res.scl_data.viewport.width;
3457 		int viewport_x =
3458 			pipe_ctx->plane_res.scl_data.viewport.x;
3459 
3460 		if (pipe_split_on || odm_combine_on) {
3461 			if (pos_cpy.x >= viewport_width + viewport_x) {
3462 				pos_cpy.x = 2 * viewport_width
3463 						- pos_cpy.x + 2 * viewport_x;
3464 			} else {
3465 				uint32_t temp_x = pos_cpy.x;
3466 
3467 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3468 				if (temp_x >= viewport_x +
3469 					(int)hubp->curs_attr.width || pos_cpy.x
3470 					<= (int)hubp->curs_attr.width +
3471 					pipe_ctx->plane_state->src_rect.x) {
3472 					pos_cpy.x = temp_x + viewport_width;
3473 				}
3474 			}
3475 		} else {
3476 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3477 		}
3478 
3479 		/**
3480 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3481 		 * Calculation:
3482 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3483 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3484 		 * Simplify it as:
3485 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3486 		 */
3487 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3488 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3489 	}
3490 
3491 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3492 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3493 }
3494 
3495 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3496 {
3497 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3498 
3499 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3500 			pipe_ctx->plane_res.hubp, attributes);
3501 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3502 		pipe_ctx->plane_res.dpp, attributes);
3503 }
3504 
3505 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3506 {
3507 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3508 	struct fixed31_32 multiplier;
3509 	struct dpp_cursor_attributes opt_attr = { 0 };
3510 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3511 	struct custom_float_format fmt;
3512 
3513 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3514 		return;
3515 
3516 	fmt.exponenta_bits = 5;
3517 	fmt.mantissa_bits = 10;
3518 	fmt.sign = true;
3519 
3520 	if (sdr_white_level > 80) {
3521 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3522 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3523 	}
3524 
3525 	opt_attr.scale = hw_scale;
3526 	opt_attr.bias = 0;
3527 
3528 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3529 			pipe_ctx->plane_res.dpp, &opt_attr);
3530 }
3531 
3532 /*
3533  * apply_front_porch_workaround  TODO FPGA still need?
3534  *
3535  * This is a workaround for a bug that has existed since R5xx and has not been
3536  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3537  */
3538 static void apply_front_porch_workaround(
3539 	struct dc_crtc_timing *timing)
3540 {
3541 	if (timing->flags.INTERLACE == 1) {
3542 		if (timing->v_front_porch < 2)
3543 			timing->v_front_porch = 2;
3544 	} else {
3545 		if (timing->v_front_porch < 1)
3546 			timing->v_front_porch = 1;
3547 	}
3548 }
3549 
3550 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3551 {
3552 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3553 	struct dc_crtc_timing patched_crtc_timing;
3554 	int vesa_sync_start;
3555 	int asic_blank_end;
3556 	int interlace_factor;
3557 	int vertical_line_start;
3558 
3559 	patched_crtc_timing = *dc_crtc_timing;
3560 	apply_front_porch_workaround(&patched_crtc_timing);
3561 
3562 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3563 
3564 	vesa_sync_start = patched_crtc_timing.v_addressable +
3565 			patched_crtc_timing.v_border_bottom +
3566 			patched_crtc_timing.v_front_porch;
3567 
3568 	asic_blank_end = (patched_crtc_timing.v_total -
3569 			vesa_sync_start -
3570 			patched_crtc_timing.v_border_top)
3571 			* interlace_factor;
3572 
3573 	vertical_line_start = asic_blank_end -
3574 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3575 
3576 	return vertical_line_start;
3577 }
3578 
3579 void dcn10_calc_vupdate_position(
3580 		struct dc *dc,
3581 		struct pipe_ctx *pipe_ctx,
3582 		uint32_t *start_line,
3583 		uint32_t *end_line)
3584 {
3585 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3586 	int vline_int_offset_from_vupdate =
3587 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3588 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3589 	int start_position;
3590 
3591 	if (vline_int_offset_from_vupdate > 0)
3592 		vline_int_offset_from_vupdate--;
3593 	else if (vline_int_offset_from_vupdate < 0)
3594 		vline_int_offset_from_vupdate++;
3595 
3596 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3597 
3598 	if (start_position >= 0)
3599 		*start_line = start_position;
3600 	else
3601 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3602 
3603 	*end_line = *start_line + 2;
3604 
3605 	if (*end_line >= dc_crtc_timing->v_total)
3606 		*end_line = 2;
3607 }
3608 
3609 static void dcn10_cal_vline_position(
3610 		struct dc *dc,
3611 		struct pipe_ctx *pipe_ctx,
3612 		enum vline_select vline,
3613 		uint32_t *start_line,
3614 		uint32_t *end_line)
3615 {
3616 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3617 
3618 	if (vline == VLINE0)
3619 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3620 	else if (vline == VLINE1)
3621 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3622 
3623 	switch (ref_point) {
3624 	case START_V_UPDATE:
3625 		dcn10_calc_vupdate_position(
3626 				dc,
3627 				pipe_ctx,
3628 				start_line,
3629 				end_line);
3630 		break;
3631 	case START_V_SYNC:
3632 		// Suppose to do nothing because vsync is 0;
3633 		break;
3634 	default:
3635 		ASSERT(0);
3636 		break;
3637 	}
3638 }
3639 
3640 void dcn10_setup_periodic_interrupt(
3641 		struct dc *dc,
3642 		struct pipe_ctx *pipe_ctx,
3643 		enum vline_select vline)
3644 {
3645 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3646 
3647 	if (vline == VLINE0) {
3648 		uint32_t start_line = 0;
3649 		uint32_t end_line = 0;
3650 
3651 		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3652 
3653 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3654 
3655 	} else if (vline == VLINE1) {
3656 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3657 				tg,
3658 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3659 	}
3660 }
3661 
3662 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3663 {
3664 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3665 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3666 
3667 	if (start_line < 0) {
3668 		ASSERT(0);
3669 		start_line = 0;
3670 	}
3671 
3672 	if (tg->funcs->setup_vertical_interrupt2)
3673 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3674 }
3675 
3676 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3677 		struct dc_link_settings *link_settings)
3678 {
3679 	struct encoder_unblank_param params = {0};
3680 	struct dc_stream_state *stream = pipe_ctx->stream;
3681 	struct dc_link *link = stream->link;
3682 	struct dce_hwseq *hws = link->dc->hwseq;
3683 
3684 	/* only 3 items below are used by unblank */
3685 	params.timing = pipe_ctx->stream->timing;
3686 
3687 	params.link_settings.link_rate = link_settings->link_rate;
3688 
3689 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3690 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3691 			params.timing.pix_clk_100hz /= 2;
3692 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3693 	}
3694 
3695 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3696 		hws->funcs.edp_backlight_control(link, true);
3697 	}
3698 }
3699 
3700 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3701 				const uint8_t *custom_sdp_message,
3702 				unsigned int sdp_message_size)
3703 {
3704 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3705 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3706 				pipe_ctx->stream_res.stream_enc,
3707 				custom_sdp_message,
3708 				sdp_message_size);
3709 	}
3710 }
3711 enum dc_status dcn10_set_clock(struct dc *dc,
3712 			enum dc_clock_type clock_type,
3713 			uint32_t clk_khz,
3714 			uint32_t stepping)
3715 {
3716 	struct dc_state *context = dc->current_state;
3717 	struct dc_clock_config clock_cfg = {0};
3718 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3719 
3720 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3721 		return DC_FAIL_UNSUPPORTED_1;
3722 
3723 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3724 		context, clock_type, &clock_cfg);
3725 
3726 	if (clk_khz > clock_cfg.max_clock_khz)
3727 		return DC_FAIL_CLK_EXCEED_MAX;
3728 
3729 	if (clk_khz < clock_cfg.min_clock_khz)
3730 		return DC_FAIL_CLK_BELOW_MIN;
3731 
3732 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3733 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3734 
3735 	/*update internal request clock for update clock use*/
3736 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3737 		current_clocks->dispclk_khz = clk_khz;
3738 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3739 		current_clocks->dppclk_khz = clk_khz;
3740 	else
3741 		return DC_ERROR_UNEXPECTED;
3742 
3743 	if (dc->clk_mgr->funcs->update_clocks)
3744 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3745 				context, true);
3746 	return DC_OK;
3747 
3748 }
3749 
3750 void dcn10_get_clock(struct dc *dc,
3751 			enum dc_clock_type clock_type,
3752 			struct dc_clock_config *clock_cfg)
3753 {
3754 	struct dc_state *context = dc->current_state;
3755 
3756 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3757 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3758 
3759 }
3760 
3761 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3762 {
3763 	struct resource_pool *pool = dc->res_pool;
3764 	int i;
3765 
3766 	for (i = 0; i < pool->pipe_count; i++) {
3767 		struct hubp *hubp = pool->hubps[i];
3768 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3769 
3770 		hubp->funcs->hubp_read_state(hubp);
3771 
3772 		if (!s->blank_en)
3773 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3774 	}
3775 }
3776