1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 #include "dc_trace.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
58 #include "inc/link_dpcd.h"
59 
60 #define DC_LOGGER_INIT(logger)
61 
62 #define CTX \
63 	hws->ctx
64 #define REG(reg)\
65 	hws->regs->reg
66 
67 #undef FN
68 #define FN(reg_name, field_name) \
69 	hws->shifts->field_name, hws->masks->field_name
70 
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 	print_microsec(dc_ctx, log_ctx, ref_cycle)
74 
75 #define GAMMA_HW_POINTS_NUM 256
76 
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79 
80 void print_microsec(struct dc_context *dc_ctx,
81 	struct dc_log_buffer_ctx *log_ctx,
82 	uint32_t ref_cycle)
83 {
84 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 	static const unsigned int frac = 1000;
86 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87 
88 	DTN_INFO("  %11d.%03d",
89 			us_x10 / frac,
90 			us_x10 % frac);
91 }
92 
93 void dcn10_lock_all_pipes(struct dc *dc,
94 	struct dc_state *context,
95 	bool lock)
96 {
97 	struct pipe_ctx *pipe_ctx;
98 	struct timing_generator *tg;
99 	int i;
100 
101 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
102 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
103 		tg = pipe_ctx->stream_res.tg;
104 
105 		/*
106 		 * Only lock the top pipe's tg to prevent redundant
107 		 * (un)locking. Also skip if pipe is disabled.
108 		 */
109 		if (pipe_ctx->top_pipe ||
110 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
111 		    !tg->funcs->is_tg_enabled(tg))
112 			continue;
113 
114 		if (lock)
115 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
116 		else
117 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
118 	}
119 }
120 
121 static void log_mpc_crc(struct dc *dc,
122 	struct dc_log_buffer_ctx *log_ctx)
123 {
124 	struct dc_context *dc_ctx = dc->ctx;
125 	struct dce_hwseq *hws = dc->hwseq;
126 
127 	if (REG(MPC_CRC_RESULT_GB))
128 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
129 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
130 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
131 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
132 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
133 }
134 
135 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
136 {
137 	struct dc_context *dc_ctx = dc->ctx;
138 	struct dcn_hubbub_wm wm;
139 	int i;
140 
141 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
142 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
143 
144 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
145 			"         sr_enter          sr_exit  dram_clk_change\n");
146 
147 	for (i = 0; i < 4; i++) {
148 		struct dcn_hubbub_wm_set *s;
149 
150 		s = &wm.sets[i];
151 		DTN_INFO("WM_Set[%d]:", s->wm_set);
152 		DTN_INFO_MICRO_SEC(s->data_urgent);
153 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
154 		DTN_INFO_MICRO_SEC(s->sr_enter);
155 		DTN_INFO_MICRO_SEC(s->sr_exit);
156 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
157 		DTN_INFO("\n");
158 	}
159 
160 	DTN_INFO("\n");
161 }
162 
163 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
164 {
165 	struct dc_context *dc_ctx = dc->ctx;
166 	struct resource_pool *pool = dc->res_pool;
167 	int i;
168 
169 	DTN_INFO(
170 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
171 	for (i = 0; i < pool->pipe_count; i++) {
172 		struct hubp *hubp = pool->hubps[i];
173 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
174 
175 		hubp->funcs->hubp_read_state(hubp);
176 
177 		if (!s->blank_en) {
178 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
179 					hubp->inst,
180 					s->pixel_format,
181 					s->inuse_addr_hi,
182 					s->viewport_width,
183 					s->viewport_height,
184 					s->rotation_angle,
185 					s->h_mirror_en,
186 					s->sw_mode,
187 					s->dcc_en,
188 					s->blank_en,
189 					s->clock_en,
190 					s->ttu_disable,
191 					s->underflow_status);
192 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
193 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
194 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
195 			DTN_INFO("\n");
196 		}
197 	}
198 
199 	DTN_INFO("\n=========RQ========\n");
200 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
201 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
202 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
203 	for (i = 0; i < pool->pipe_count; i++) {
204 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
205 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
206 
207 		if (!s->blank_en)
208 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
209 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
210 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
211 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
212 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
213 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
214 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
215 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
216 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
217 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
218 	}
219 
220 	DTN_INFO("========DLG========\n");
221 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
222 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
223 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
224 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
225 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
226 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
227 			"  x_rp_dlay  x_rr_sfl\n");
228 	for (i = 0; i < pool->pipe_count; i++) {
229 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
230 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
231 
232 		if (!s->blank_en)
233 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
234 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
235 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
236 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
237 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
238 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
239 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
240 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
241 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
242 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
243 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
244 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
245 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
246 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
247 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
248 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
249 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
250 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
251 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
252 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
253 				dlg_regs->xfc_reg_remote_surface_flip_latency);
254 	}
255 
256 	DTN_INFO("========TTU========\n");
257 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
258 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
259 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
260 	for (i = 0; i < pool->pipe_count; i++) {
261 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
262 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
263 
264 		if (!s->blank_en)
265 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
266 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
267 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
268 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
269 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
270 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
271 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
272 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
273 	}
274 	DTN_INFO("\n");
275 }
276 
277 void dcn10_log_hw_state(struct dc *dc,
278 	struct dc_log_buffer_ctx *log_ctx)
279 {
280 	struct dc_context *dc_ctx = dc->ctx;
281 	struct resource_pool *pool = dc->res_pool;
282 	int i;
283 
284 	DTN_INFO_BEGIN();
285 
286 	dcn10_log_hubbub_state(dc, log_ctx);
287 
288 	dcn10_log_hubp_states(dc, log_ctx);
289 
290 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
291 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
292 			"C31 C32   C33 C34\n");
293 	for (i = 0; i < pool->pipe_count; i++) {
294 		struct dpp *dpp = pool->dpps[i];
295 		struct dcn_dpp_state s = {0};
296 
297 		dpp->funcs->dpp_read_state(dpp, &s);
298 
299 		if (!s.is_enabled)
300 			continue;
301 
302 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
303 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
304 				dpp->inst,
305 				s.igam_input_format,
306 				(s.igam_lut_mode == 0) ? "BypassFixed" :
307 					((s.igam_lut_mode == 1) ? "BypassFloat" :
308 					((s.igam_lut_mode == 2) ? "RAM" :
309 					((s.igam_lut_mode == 3) ? "RAM" :
310 								 "Unknown"))),
311 				(s.dgam_lut_mode == 0) ? "Bypass" :
312 					((s.dgam_lut_mode == 1) ? "sRGB" :
313 					((s.dgam_lut_mode == 2) ? "Ycc" :
314 					((s.dgam_lut_mode == 3) ? "RAM" :
315 					((s.dgam_lut_mode == 4) ? "RAM" :
316 								 "Unknown")))),
317 				(s.rgam_lut_mode == 0) ? "Bypass" :
318 					((s.rgam_lut_mode == 1) ? "sRGB" :
319 					((s.rgam_lut_mode == 2) ? "Ycc" :
320 					((s.rgam_lut_mode == 3) ? "RAM" :
321 					((s.rgam_lut_mode == 4) ? "RAM" :
322 								 "Unknown")))),
323 				s.gamut_remap_mode,
324 				s.gamut_remap_c11_c12,
325 				s.gamut_remap_c13_c14,
326 				s.gamut_remap_c21_c22,
327 				s.gamut_remap_c23_c24,
328 				s.gamut_remap_c31_c32,
329 				s.gamut_remap_c33_c34);
330 		DTN_INFO("\n");
331 	}
332 	DTN_INFO("\n");
333 
334 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
335 	for (i = 0; i < pool->pipe_count; i++) {
336 		struct mpcc_state s = {0};
337 
338 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
339 		if (s.opp_id != 0xf)
340 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
341 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
342 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
343 				s.idle);
344 	}
345 	DTN_INFO("\n");
346 
347 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
348 
349 	for (i = 0; i < pool->timing_generator_count; i++) {
350 		struct timing_generator *tg = pool->timing_generators[i];
351 		struct dcn_otg_state s = {0};
352 		/* Read shared OTG state registers for all DCNx */
353 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
354 
355 		/*
356 		 * For DCN2 and greater, a register on the OPP is used to
357 		 * determine if the CRTC is blanked instead of the OTG. So use
358 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
359 		 *
360 		 * TODO: Implement DCN-specific read_otg_state hooks.
361 		 */
362 		if (pool->opps[i]->funcs->dpg_is_blanked)
363 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
364 		else
365 			s.blank_enabled = tg->funcs->is_blanked(tg);
366 
367 		//only print if OTG master is enabled
368 		if ((s.otg_enabled & 1) == 0)
369 			continue;
370 
371 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
372 				tg->inst,
373 				s.v_blank_start,
374 				s.v_blank_end,
375 				s.v_sync_a_start,
376 				s.v_sync_a_end,
377 				s.v_sync_a_pol,
378 				s.v_total_max,
379 				s.v_total_min,
380 				s.v_total_max_sel,
381 				s.v_total_min_sel,
382 				s.h_blank_start,
383 				s.h_blank_end,
384 				s.h_sync_a_start,
385 				s.h_sync_a_end,
386 				s.h_sync_a_pol,
387 				s.h_total,
388 				s.v_total,
389 				s.underflow_occurred_status,
390 				s.blank_enabled);
391 
392 		// Clear underflow for debug purposes
393 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
394 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
395 		// it from here without affecting the original intent.
396 		tg->funcs->clear_optc_underflow(tg);
397 	}
398 	DTN_INFO("\n");
399 
400 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
401 	// TODO: Update golden log header to reflect this name change
402 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
403 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
404 		struct display_stream_compressor *dsc = pool->dscs[i];
405 		struct dcn_dsc_state s = {0};
406 
407 		dsc->funcs->dsc_read_state(dsc, &s);
408 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
409 		dsc->inst,
410 			s.dsc_clock_en,
411 			s.dsc_slice_width,
412 			s.dsc_bits_per_pixel);
413 		DTN_INFO("\n");
414 	}
415 	DTN_INFO("\n");
416 
417 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
418 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
419 	for (i = 0; i < pool->stream_enc_count; i++) {
420 		struct stream_encoder *enc = pool->stream_enc[i];
421 		struct enc_state s = {0};
422 
423 		if (enc->funcs->enc_read_state) {
424 			enc->funcs->enc_read_state(enc, &s);
425 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
426 				enc->id,
427 				s.dsc_mode,
428 				s.sec_gsp_pps_line_num,
429 				s.vbid6_line_reference,
430 				s.vbid6_line_num,
431 				s.sec_gsp_pps_enable,
432 				s.sec_stream_enable);
433 			DTN_INFO("\n");
434 		}
435 	}
436 	DTN_INFO("\n");
437 
438 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
439 	for (i = 0; i < dc->link_count; i++) {
440 		struct link_encoder *lenc = dc->links[i]->link_enc;
441 
442 		struct link_enc_state s = {0};
443 
444 		if (lenc->funcs->read_state) {
445 			lenc->funcs->read_state(lenc, &s);
446 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
447 				i,
448 				s.dphy_fec_en,
449 				s.dphy_fec_ready_shadow,
450 				s.dphy_fec_active_status,
451 				s.dp_link_training_complete);
452 			DTN_INFO("\n");
453 		}
454 	}
455 	DTN_INFO("\n");
456 
457 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
458 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
459 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
460 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
461 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
462 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
463 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
464 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
465 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
466 
467 	log_mpc_crc(dc, log_ctx);
468 
469 	DTN_INFO_END();
470 }
471 
472 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
473 {
474 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
475 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
476 
477 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
478 		tg->funcs->clear_optc_underflow(tg);
479 		return true;
480 	}
481 
482 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
483 		hubp->funcs->hubp_clear_underflow(hubp);
484 		return true;
485 	}
486 	return false;
487 }
488 
489 void dcn10_enable_power_gating_plane(
490 	struct dce_hwseq *hws,
491 	bool enable)
492 {
493 	bool force_on = true; /* disable power gating */
494 
495 	if (enable)
496 		force_on = false;
497 
498 	/* DCHUBP0/1/2/3 */
499 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
500 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
501 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
502 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
503 
504 	/* DPP0/1/2/3 */
505 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
506 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
507 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
508 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
509 }
510 
511 void dcn10_disable_vga(
512 	struct dce_hwseq *hws)
513 {
514 	unsigned int in_vga1_mode = 0;
515 	unsigned int in_vga2_mode = 0;
516 	unsigned int in_vga3_mode = 0;
517 	unsigned int in_vga4_mode = 0;
518 
519 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
520 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
521 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
522 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
523 
524 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
525 			in_vga3_mode == 0 && in_vga4_mode == 0)
526 		return;
527 
528 	REG_WRITE(D1VGA_CONTROL, 0);
529 	REG_WRITE(D2VGA_CONTROL, 0);
530 	REG_WRITE(D3VGA_CONTROL, 0);
531 	REG_WRITE(D4VGA_CONTROL, 0);
532 
533 	/* HW Engineer's Notes:
534 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
535 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
536 	 *
537 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
538 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
539 	 */
540 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
541 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
542 }
543 
544 /**
545  * dcn10_dpp_pg_control - DPP power gate control.
546  *
547  * @hws: dce_hwseq reference.
548  * @dpp_inst: DPP instance reference.
549  * @power_on: true if we want to enable power gate, false otherwise.
550  *
551  * Enable or disable power gate in the specific DPP instance.
552  */
553 void dcn10_dpp_pg_control(
554 		struct dce_hwseq *hws,
555 		unsigned int dpp_inst,
556 		bool power_on)
557 {
558 	uint32_t power_gate = power_on ? 0 : 1;
559 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
560 
561 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
562 		return;
563 	if (REG(DOMAIN1_PG_CONFIG) == 0)
564 		return;
565 
566 	switch (dpp_inst) {
567 	case 0: /* DPP0 */
568 		REG_UPDATE(DOMAIN1_PG_CONFIG,
569 				DOMAIN1_POWER_GATE, power_gate);
570 
571 		REG_WAIT(DOMAIN1_PG_STATUS,
572 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
573 				1, 1000);
574 		break;
575 	case 1: /* DPP1 */
576 		REG_UPDATE(DOMAIN3_PG_CONFIG,
577 				DOMAIN3_POWER_GATE, power_gate);
578 
579 		REG_WAIT(DOMAIN3_PG_STATUS,
580 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
581 				1, 1000);
582 		break;
583 	case 2: /* DPP2 */
584 		REG_UPDATE(DOMAIN5_PG_CONFIG,
585 				DOMAIN5_POWER_GATE, power_gate);
586 
587 		REG_WAIT(DOMAIN5_PG_STATUS,
588 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
589 				1, 1000);
590 		break;
591 	case 3: /* DPP3 */
592 		REG_UPDATE(DOMAIN7_PG_CONFIG,
593 				DOMAIN7_POWER_GATE, power_gate);
594 
595 		REG_WAIT(DOMAIN7_PG_STATUS,
596 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
597 				1, 1000);
598 		break;
599 	default:
600 		BREAK_TO_DEBUGGER();
601 		break;
602 	}
603 }
604 
605 /**
606  * dcn10_hubp_pg_control - HUBP power gate control.
607  *
608  * @hws: dce_hwseq reference.
609  * @hubp_inst: DPP instance reference.
610  * @power_on: true if we want to enable power gate, false otherwise.
611  *
612  * Enable or disable power gate in the specific HUBP instance.
613  */
614 void dcn10_hubp_pg_control(
615 		struct dce_hwseq *hws,
616 		unsigned int hubp_inst,
617 		bool power_on)
618 {
619 	uint32_t power_gate = power_on ? 0 : 1;
620 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
621 
622 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
623 		return;
624 	if (REG(DOMAIN0_PG_CONFIG) == 0)
625 		return;
626 
627 	switch (hubp_inst) {
628 	case 0: /* DCHUBP0 */
629 		REG_UPDATE(DOMAIN0_PG_CONFIG,
630 				DOMAIN0_POWER_GATE, power_gate);
631 
632 		REG_WAIT(DOMAIN0_PG_STATUS,
633 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
634 				1, 1000);
635 		break;
636 	case 1: /* DCHUBP1 */
637 		REG_UPDATE(DOMAIN2_PG_CONFIG,
638 				DOMAIN2_POWER_GATE, power_gate);
639 
640 		REG_WAIT(DOMAIN2_PG_STATUS,
641 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
642 				1, 1000);
643 		break;
644 	case 2: /* DCHUBP2 */
645 		REG_UPDATE(DOMAIN4_PG_CONFIG,
646 				DOMAIN4_POWER_GATE, power_gate);
647 
648 		REG_WAIT(DOMAIN4_PG_STATUS,
649 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
650 				1, 1000);
651 		break;
652 	case 3: /* DCHUBP3 */
653 		REG_UPDATE(DOMAIN6_PG_CONFIG,
654 				DOMAIN6_POWER_GATE, power_gate);
655 
656 		REG_WAIT(DOMAIN6_PG_STATUS,
657 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
658 				1, 1000);
659 		break;
660 	default:
661 		BREAK_TO_DEBUGGER();
662 		break;
663 	}
664 }
665 
666 static void power_on_plane(
667 	struct dce_hwseq *hws,
668 	int plane_id)
669 {
670 	DC_LOGGER_INIT(hws->ctx->logger);
671 	if (REG(DC_IP_REQUEST_CNTL)) {
672 		REG_SET(DC_IP_REQUEST_CNTL, 0,
673 				IP_REQUEST_EN, 1);
674 
675 		if (hws->funcs.dpp_pg_control)
676 			hws->funcs.dpp_pg_control(hws, plane_id, true);
677 
678 		if (hws->funcs.hubp_pg_control)
679 			hws->funcs.hubp_pg_control(hws, plane_id, true);
680 
681 		REG_SET(DC_IP_REQUEST_CNTL, 0,
682 				IP_REQUEST_EN, 0);
683 		DC_LOG_DEBUG(
684 				"Un-gated front end for pipe %d\n", plane_id);
685 	}
686 }
687 
688 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
689 {
690 	struct dce_hwseq *hws = dc->hwseq;
691 	struct hubp *hubp = dc->res_pool->hubps[0];
692 
693 	if (!hws->wa_state.DEGVIDCN10_253_applied)
694 		return;
695 
696 	hubp->funcs->set_blank(hubp, true);
697 
698 	REG_SET(DC_IP_REQUEST_CNTL, 0,
699 			IP_REQUEST_EN, 1);
700 
701 	hws->funcs.hubp_pg_control(hws, 0, false);
702 	REG_SET(DC_IP_REQUEST_CNTL, 0,
703 			IP_REQUEST_EN, 0);
704 
705 	hws->wa_state.DEGVIDCN10_253_applied = false;
706 }
707 
708 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
709 {
710 	struct dce_hwseq *hws = dc->hwseq;
711 	struct hubp *hubp = dc->res_pool->hubps[0];
712 	int i;
713 
714 	if (dc->debug.disable_stutter)
715 		return;
716 
717 	if (!hws->wa.DEGVIDCN10_253)
718 		return;
719 
720 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
721 		if (!dc->res_pool->hubps[i]->power_gated)
722 			return;
723 	}
724 
725 	/* all pipe power gated, apply work around to enable stutter. */
726 
727 	REG_SET(DC_IP_REQUEST_CNTL, 0,
728 			IP_REQUEST_EN, 1);
729 
730 	hws->funcs.hubp_pg_control(hws, 0, true);
731 	REG_SET(DC_IP_REQUEST_CNTL, 0,
732 			IP_REQUEST_EN, 0);
733 
734 	hubp->funcs->set_hubp_blank_en(hubp, false);
735 	hws->wa_state.DEGVIDCN10_253_applied = true;
736 }
737 
738 void dcn10_bios_golden_init(struct dc *dc)
739 {
740 	struct dce_hwseq *hws = dc->hwseq;
741 	struct dc_bios *bp = dc->ctx->dc_bios;
742 	int i;
743 	bool allow_self_fresh_force_enable = true;
744 
745 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
746 		return;
747 
748 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
749 		allow_self_fresh_force_enable =
750 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
751 
752 
753 	/* WA for making DF sleep when idle after resume from S0i3.
754 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
755 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
756 	 * before calling command table and it changed to 1 after,
757 	 * it should be set back to 0.
758 	 */
759 
760 	/* initialize dcn global */
761 	bp->funcs->enable_disp_power_gating(bp,
762 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
763 
764 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
765 		/* initialize dcn per pipe */
766 		bp->funcs->enable_disp_power_gating(bp,
767 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
768 	}
769 
770 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
771 		if (allow_self_fresh_force_enable == false &&
772 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
773 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
774 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
775 
776 }
777 
778 static void false_optc_underflow_wa(
779 		struct dc *dc,
780 		const struct dc_stream_state *stream,
781 		struct timing_generator *tg)
782 {
783 	int i;
784 	bool underflow;
785 
786 	if (!dc->hwseq->wa.false_optc_underflow)
787 		return;
788 
789 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
790 
791 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
792 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
793 
794 		if (old_pipe_ctx->stream != stream)
795 			continue;
796 
797 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
798 	}
799 
800 	if (tg->funcs->set_blank_data_double_buffer)
801 		tg->funcs->set_blank_data_double_buffer(tg, true);
802 
803 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
804 		tg->funcs->clear_optc_underflow(tg);
805 }
806 
807 enum dc_status dcn10_enable_stream_timing(
808 		struct pipe_ctx *pipe_ctx,
809 		struct dc_state *context,
810 		struct dc *dc)
811 {
812 	struct dc_stream_state *stream = pipe_ctx->stream;
813 	enum dc_color_space color_space;
814 	struct tg_color black_color = {0};
815 
816 	/* by upper caller loop, pipe0 is parent pipe and be called first.
817 	 * back end is set up by for pipe0. Other children pipe share back end
818 	 * with pipe 0. No program is needed.
819 	 */
820 	if (pipe_ctx->top_pipe != NULL)
821 		return DC_OK;
822 
823 	/* TODO check if timing_changed, disable stream if timing changed */
824 
825 	/* HW program guide assume display already disable
826 	 * by unplug sequence. OTG assume stop.
827 	 */
828 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
829 
830 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
831 			pipe_ctx->clock_source,
832 			&pipe_ctx->stream_res.pix_clk_params,
833 			&pipe_ctx->pll_settings)) {
834 		BREAK_TO_DEBUGGER();
835 		return DC_ERROR_UNEXPECTED;
836 	}
837 
838 	pipe_ctx->stream_res.tg->funcs->program_timing(
839 			pipe_ctx->stream_res.tg,
840 			&stream->timing,
841 			pipe_ctx->pipe_dlg_param.vready_offset,
842 			pipe_ctx->pipe_dlg_param.vstartup_start,
843 			pipe_ctx->pipe_dlg_param.vupdate_offset,
844 			pipe_ctx->pipe_dlg_param.vupdate_width,
845 			pipe_ctx->stream->signal,
846 			true);
847 
848 #if 0 /* move to after enable_crtc */
849 	/* TODO: OPP FMT, ABM. etc. should be done here. */
850 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
851 
852 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
853 
854 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
855 				pipe_ctx->stream_res.opp,
856 				&stream->bit_depth_params,
857 				&stream->clamping);
858 #endif
859 	/* program otg blank color */
860 	color_space = stream->output_color_space;
861 	color_space_to_black_color(dc, color_space, &black_color);
862 
863 	/*
864 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
865 	 * alternate between Cb and Cr, so both channels need the pixel
866 	 * value for Y
867 	 */
868 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
869 		black_color.color_r_cr = black_color.color_g_y;
870 
871 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
872 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
873 				pipe_ctx->stream_res.tg,
874 				&black_color);
875 
876 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
877 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
878 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
879 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
880 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
881 	}
882 
883 	/* VTG is  within DCHUB command block. DCFCLK is always on */
884 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
885 		BREAK_TO_DEBUGGER();
886 		return DC_ERROR_UNEXPECTED;
887 	}
888 
889 	/* TODO program crtc source select for non-virtual signal*/
890 	/* TODO program FMT */
891 	/* TODO setup link_enc */
892 	/* TODO set stream attributes */
893 	/* TODO program audio */
894 	/* TODO enable stream if timing changed */
895 	/* TODO unblank stream if DP */
896 
897 	return DC_OK;
898 }
899 
900 static void dcn10_reset_back_end_for_pipe(
901 		struct dc *dc,
902 		struct pipe_ctx *pipe_ctx,
903 		struct dc_state *context)
904 {
905 	int i;
906 	struct dc_link *link;
907 	DC_LOGGER_INIT(dc->ctx->logger);
908 	if (pipe_ctx->stream_res.stream_enc == NULL) {
909 		pipe_ctx->stream = NULL;
910 		return;
911 	}
912 
913 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
914 		link = pipe_ctx->stream->link;
915 		/* DPMS may already disable or */
916 		/* dpms_off status is incorrect due to fastboot
917 		 * feature. When system resume from S4 with second
918 		 * screen only, the dpms_off would be true but
919 		 * VBIOS lit up eDP, so check link status too.
920 		 */
921 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
922 			core_link_disable_stream(pipe_ctx);
923 		else if (pipe_ctx->stream_res.audio)
924 			dc->hwss.disable_audio_stream(pipe_ctx);
925 
926 		if (pipe_ctx->stream_res.audio) {
927 			/*disable az_endpoint*/
928 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
929 
930 			/*free audio*/
931 			if (dc->caps.dynamic_audio == true) {
932 				/*we have to dynamic arbitrate the audio endpoints*/
933 				/*we free the resource, need reset is_audio_acquired*/
934 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
935 						pipe_ctx->stream_res.audio, false);
936 				pipe_ctx->stream_res.audio = NULL;
937 			}
938 		}
939 	}
940 
941 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
942 	 * back end share by all pipes and will be disable only when disable
943 	 * parent pipe.
944 	 */
945 	if (pipe_ctx->top_pipe == NULL) {
946 
947 		if (pipe_ctx->stream_res.abm)
948 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
949 
950 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
951 
952 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
953 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
954 			pipe_ctx->stream_res.tg->funcs->set_drr(
955 					pipe_ctx->stream_res.tg, NULL);
956 	}
957 
958 	for (i = 0; i < dc->res_pool->pipe_count; i++)
959 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
960 			break;
961 
962 	if (i == dc->res_pool->pipe_count)
963 		return;
964 
965 	pipe_ctx->stream = NULL;
966 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
967 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
968 }
969 
970 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
971 {
972 	struct hubp *hubp ;
973 	unsigned int i;
974 	bool need_recover = true;
975 
976 	if (!dc->debug.recovery_enabled)
977 		return false;
978 
979 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
980 		struct pipe_ctx *pipe_ctx =
981 			&dc->current_state->res_ctx.pipe_ctx[i];
982 		if (pipe_ctx != NULL) {
983 			hubp = pipe_ctx->plane_res.hubp;
984 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
985 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
986 					/* one pipe underflow, we will reset all the pipes*/
987 					need_recover = true;
988 				}
989 			}
990 		}
991 	}
992 	if (!need_recover)
993 		return false;
994 	/*
995 	DCHUBP_CNTL:HUBP_BLANK_EN=1
996 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
997 	DCHUBP_CNTL:HUBP_DISABLE=1
998 	DCHUBP_CNTL:HUBP_DISABLE=0
999 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1000 	DCSURF_PRIMARY_SURFACE_ADDRESS
1001 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1002 	*/
1003 
1004 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1005 		struct pipe_ctx *pipe_ctx =
1006 			&dc->current_state->res_ctx.pipe_ctx[i];
1007 		if (pipe_ctx != NULL) {
1008 			hubp = pipe_ctx->plane_res.hubp;
1009 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1010 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1011 				hubp->funcs->set_hubp_blank_en(hubp, true);
1012 		}
1013 	}
1014 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1015 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1016 
1017 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1018 		struct pipe_ctx *pipe_ctx =
1019 			&dc->current_state->res_ctx.pipe_ctx[i];
1020 		if (pipe_ctx != NULL) {
1021 			hubp = pipe_ctx->plane_res.hubp;
1022 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1023 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1024 				hubp->funcs->hubp_disable_control(hubp, true);
1025 		}
1026 	}
1027 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1028 		struct pipe_ctx *pipe_ctx =
1029 			&dc->current_state->res_ctx.pipe_ctx[i];
1030 		if (pipe_ctx != NULL) {
1031 			hubp = pipe_ctx->plane_res.hubp;
1032 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1033 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1034 				hubp->funcs->hubp_disable_control(hubp, true);
1035 		}
1036 	}
1037 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1038 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1039 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1040 		struct pipe_ctx *pipe_ctx =
1041 			&dc->current_state->res_ctx.pipe_ctx[i];
1042 		if (pipe_ctx != NULL) {
1043 			hubp = pipe_ctx->plane_res.hubp;
1044 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1045 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1046 				hubp->funcs->set_hubp_blank_en(hubp, true);
1047 		}
1048 	}
1049 	return true;
1050 
1051 }
1052 
1053 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1054 {
1055 	static bool should_log_hw_state; /* prevent hw state log by default */
1056 
1057 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1058 		int i = 0;
1059 
1060 		if (should_log_hw_state)
1061 			dcn10_log_hw_state(dc, NULL);
1062 
1063 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1064 		BREAK_TO_DEBUGGER();
1065 		if (dcn10_hw_wa_force_recovery(dc)) {
1066 		/*check again*/
1067 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1068 				BREAK_TO_DEBUGGER();
1069 		}
1070 	}
1071 }
1072 
1073 /* trigger HW to start disconnect plane from stream on the next vsync */
1074 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1075 {
1076 	struct dce_hwseq *hws = dc->hwseq;
1077 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1078 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1079 	struct mpc *mpc = dc->res_pool->mpc;
1080 	struct mpc_tree *mpc_tree_params;
1081 	struct mpcc *mpcc_to_remove = NULL;
1082 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1083 
1084 	mpc_tree_params = &(opp->mpc_tree_params);
1085 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1086 
1087 	/*Already reset*/
1088 	if (mpcc_to_remove == NULL)
1089 		return;
1090 
1091 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1092 	if (opp != NULL)
1093 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1094 
1095 	dc->optimized_required = true;
1096 
1097 	if (hubp->funcs->hubp_disconnect)
1098 		hubp->funcs->hubp_disconnect(hubp);
1099 
1100 	if (dc->debug.sanity_checks)
1101 		hws->funcs.verify_allow_pstate_change_high(dc);
1102 }
1103 
1104 /**
1105  * dcn10_plane_atomic_power_down - Power down plane components.
1106  *
1107  * @dc: dc struct reference. used for grab hwseq.
1108  * @dpp: dpp struct reference.
1109  * @hubp: hubp struct reference.
1110  *
1111  * Keep in mind that this operation requires a power gate configuration;
1112  * however, requests for switch power gate are precisely controlled to avoid
1113  * problems. For this reason, power gate request is usually disabled. This
1114  * function first needs to enable the power gate request before disabling DPP
1115  * and HUBP. Finally, it disables the power gate request again.
1116  */
1117 void dcn10_plane_atomic_power_down(struct dc *dc,
1118 		struct dpp *dpp,
1119 		struct hubp *hubp)
1120 {
1121 	struct dce_hwseq *hws = dc->hwseq;
1122 	DC_LOGGER_INIT(dc->ctx->logger);
1123 
1124 	if (REG(DC_IP_REQUEST_CNTL)) {
1125 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1126 				IP_REQUEST_EN, 1);
1127 
1128 		if (hws->funcs.dpp_pg_control)
1129 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1130 
1131 		if (hws->funcs.hubp_pg_control)
1132 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1133 
1134 		dpp->funcs->dpp_reset(dpp);
1135 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1136 				IP_REQUEST_EN, 0);
1137 		DC_LOG_DEBUG(
1138 				"Power gated front end %d\n", hubp->inst);
1139 	}
1140 }
1141 
1142 /* disable HW used by plane.
1143  * note:  cannot disable until disconnect is complete
1144  */
1145 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1146 {
1147 	struct dce_hwseq *hws = dc->hwseq;
1148 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1149 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1150 	int opp_id = hubp->opp_id;
1151 
1152 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1153 
1154 	hubp->funcs->hubp_clk_cntl(hubp, false);
1155 
1156 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1157 
1158 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1159 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1160 				pipe_ctx->stream_res.opp,
1161 				false);
1162 
1163 	hubp->power_gated = true;
1164 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1165 
1166 	hws->funcs.plane_atomic_power_down(dc,
1167 			pipe_ctx->plane_res.dpp,
1168 			pipe_ctx->plane_res.hubp);
1169 
1170 	pipe_ctx->stream = NULL;
1171 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1172 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1173 	pipe_ctx->top_pipe = NULL;
1174 	pipe_ctx->bottom_pipe = NULL;
1175 	pipe_ctx->plane_state = NULL;
1176 }
1177 
1178 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1179 {
1180 	struct dce_hwseq *hws = dc->hwseq;
1181 	DC_LOGGER_INIT(dc->ctx->logger);
1182 
1183 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1184 		return;
1185 
1186 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1187 
1188 	apply_DEGVIDCN10_253_wa(dc);
1189 
1190 	DC_LOG_DC("Power down front end %d\n",
1191 					pipe_ctx->pipe_idx);
1192 }
1193 
1194 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1195 {
1196 	int i;
1197 	struct dce_hwseq *hws = dc->hwseq;
1198 	bool can_apply_seamless_boot = false;
1199 
1200 	for (i = 0; i < context->stream_count; i++) {
1201 		if (context->streams[i]->apply_seamless_boot_optimization) {
1202 			can_apply_seamless_boot = true;
1203 			break;
1204 		}
1205 	}
1206 
1207 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1208 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1209 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1210 
1211 		/* There is assumption that pipe_ctx is not mapping irregularly
1212 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1213 		 * we will use the pipe, so don't disable
1214 		 */
1215 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1216 			continue;
1217 
1218 		/* Blank controller using driver code instead of
1219 		 * command table.
1220 		 */
1221 		if (tg->funcs->is_tg_enabled(tg)) {
1222 			if (hws->funcs.init_blank != NULL) {
1223 				hws->funcs.init_blank(dc, tg);
1224 				tg->funcs->lock(tg);
1225 			} else {
1226 				tg->funcs->lock(tg);
1227 				tg->funcs->set_blank(tg, true);
1228 				hwss_wait_for_blank_complete(tg);
1229 			}
1230 		}
1231 	}
1232 
1233 	/* num_opp will be equal to number of mpcc */
1234 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1235 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1236 
1237 		/* Cannot reset the MPC mux if seamless boot */
1238 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1239 			continue;
1240 
1241 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1242 				dc->res_pool->mpc, i);
1243 	}
1244 
1245 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1246 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1247 		struct hubp *hubp = dc->res_pool->hubps[i];
1248 		struct dpp *dpp = dc->res_pool->dpps[i];
1249 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1250 
1251 		/* There is assumption that pipe_ctx is not mapping irregularly
1252 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1253 		 * we will use the pipe, so don't disable
1254 		 */
1255 		if (can_apply_seamless_boot &&
1256 			pipe_ctx->stream != NULL &&
1257 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1258 				pipe_ctx->stream_res.tg)) {
1259 			// Enable double buffering for OTG_BLANK no matter if
1260 			// seamless boot is enabled or not to suppress global sync
1261 			// signals when OTG blanked. This is to prevent pipe from
1262 			// requesting data while in PSR.
1263 			tg->funcs->tg_init(tg);
1264 			hubp->power_gated = true;
1265 			continue;
1266 		}
1267 
1268 		/* Disable on the current state so the new one isn't cleared. */
1269 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1270 
1271 		dpp->funcs->dpp_reset(dpp);
1272 
1273 		pipe_ctx->stream_res.tg = tg;
1274 		pipe_ctx->pipe_idx = i;
1275 
1276 		pipe_ctx->plane_res.hubp = hubp;
1277 		pipe_ctx->plane_res.dpp = dpp;
1278 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1279 		hubp->mpcc_id = dpp->inst;
1280 		hubp->opp_id = OPP_ID_INVALID;
1281 		hubp->power_gated = false;
1282 
1283 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1284 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1285 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1286 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1287 
1288 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1289 
1290 		if (tg->funcs->is_tg_enabled(tg))
1291 			tg->funcs->unlock(tg);
1292 
1293 		dc->hwss.disable_plane(dc, pipe_ctx);
1294 
1295 		pipe_ctx->stream_res.tg = NULL;
1296 		pipe_ctx->plane_res.hubp = NULL;
1297 
1298 		tg->funcs->tg_init(tg);
1299 	}
1300 }
1301 
1302 void dcn10_init_hw(struct dc *dc)
1303 {
1304 	int i, j;
1305 	struct abm *abm = dc->res_pool->abm;
1306 	struct dmcu *dmcu = dc->res_pool->dmcu;
1307 	struct dce_hwseq *hws = dc->hwseq;
1308 	struct dc_bios *dcb = dc->ctx->dc_bios;
1309 	struct resource_pool *res_pool = dc->res_pool;
1310 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1311 	bool   is_optimized_init_done = false;
1312 
1313 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1314 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1315 
1316 	// Initialize the dccg
1317 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1318 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1319 
1320 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1321 
1322 		REG_WRITE(REFCLK_CNTL, 0);
1323 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1324 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1325 
1326 		if (!dc->debug.disable_clock_gate) {
1327 			/* enable all DCN clock gating */
1328 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1329 
1330 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1331 
1332 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1333 		}
1334 
1335 		//Enable ability to power gate / don't force power on permanently
1336 		if (hws->funcs.enable_power_gating_plane)
1337 			hws->funcs.enable_power_gating_plane(hws, true);
1338 
1339 		return;
1340 	}
1341 
1342 	if (!dcb->funcs->is_accelerated_mode(dcb))
1343 		hws->funcs.disable_vga(dc->hwseq);
1344 
1345 	hws->funcs.bios_golden_init(dc);
1346 
1347 	if (dc->ctx->dc_bios->fw_info_valid) {
1348 		res_pool->ref_clocks.xtalin_clock_inKhz =
1349 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1350 
1351 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1352 			if (res_pool->dccg && res_pool->hubbub) {
1353 
1354 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1355 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1356 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1357 
1358 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1359 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1360 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1361 			} else {
1362 				// Not all ASICs have DCCG sw component
1363 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1364 						res_pool->ref_clocks.xtalin_clock_inKhz;
1365 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1366 						res_pool->ref_clocks.xtalin_clock_inKhz;
1367 			}
1368 		}
1369 	} else
1370 		ASSERT_CRITICAL(false);
1371 
1372 	for (i = 0; i < dc->link_count; i++) {
1373 		/* Power up AND update implementation according to the
1374 		 * required signal (which may be different from the
1375 		 * default signal on connector).
1376 		 */
1377 		struct dc_link *link = dc->links[i];
1378 
1379 		if (!is_optimized_init_done)
1380 			link->link_enc->funcs->hw_init(link->link_enc);
1381 
1382 		/* Check for enabled DIG to identify enabled display */
1383 		if (link->link_enc->funcs->is_dig_enabled &&
1384 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1385 			link->link_status.link_active = true;
1386 	}
1387 
1388 	/* Power gate DSCs */
1389 	if (!is_optimized_init_done) {
1390 		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1391 			if (hws->funcs.dsc_pg_control != NULL)
1392 				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1393 	}
1394 
1395 	/* Enable outbox notification feature of dmub */
1396 	if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1397 		dmub_enable_outbox_notification(dc);
1398 
1399 	/* we want to turn off all dp displays before doing detection */
1400 	if (dc->config.power_down_display_on_boot) {
1401 		uint8_t dpcd_power_state = '\0';
1402 		enum dc_status status = DC_ERROR_UNEXPECTED;
1403 
1404 		for (i = 0; i < dc->link_count; i++) {
1405 			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1406 				continue;
1407 
1408 			/* DP 2.0 requires that LTTPR Caps be read first */
1409 			dp_retrieve_lttpr_cap(dc->links[i]);
1410 
1411 			/*
1412 			 * If any of the displays are lit up turn them off.
1413 			 * The reason is that some MST hubs cannot be turned off
1414 			 * completely until we tell them to do so.
1415 			 * If not turned off, then displays connected to MST hub
1416 			 * won't light up.
1417 			 */
1418 			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1419 							&dpcd_power_state, sizeof(dpcd_power_state));
1420 			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1421 				/* blank dp stream before power off receiver*/
1422 				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1423 					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1424 
1425 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1426 						if (fe == dc->res_pool->stream_enc[j]->id) {
1427 							dc->res_pool->stream_enc[j]->funcs->dp_blank(
1428 										dc->res_pool->stream_enc[j]);
1429 							break;
1430 						}
1431 					}
1432 				}
1433 				dp_receiver_power_ctrl(dc->links[i], false);
1434 			}
1435 		}
1436 	}
1437 
1438 	/* If taking control over from VBIOS, we may want to optimize our first
1439 	 * mode set, so we need to skip powering down pipes until we know which
1440 	 * pipes we want to use.
1441 	 * Otherwise, if taking control is not possible, we need to power
1442 	 * everything down.
1443 	 */
1444 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1445 		if (!is_optimized_init_done) {
1446 			hws->funcs.init_pipes(dc, dc->current_state);
1447 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1448 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1449 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1450 		}
1451 	}
1452 
1453 	if (!is_optimized_init_done) {
1454 
1455 		for (i = 0; i < res_pool->audio_count; i++) {
1456 			struct audio *audio = res_pool->audios[i];
1457 
1458 			audio->funcs->hw_init(audio);
1459 		}
1460 
1461 		for (i = 0; i < dc->link_count; i++) {
1462 			struct dc_link *link = dc->links[i];
1463 
1464 			if (link->panel_cntl)
1465 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1466 		}
1467 
1468 		if (abm != NULL)
1469 			abm->funcs->abm_init(abm, backlight);
1470 
1471 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1472 			dmcu->funcs->dmcu_init(dmcu);
1473 	}
1474 
1475 	if (abm != NULL && dmcu != NULL)
1476 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1477 
1478 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1479 	if (!is_optimized_init_done)
1480 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1481 
1482 	if (!dc->debug.disable_clock_gate) {
1483 		/* enable all DCN clock gating */
1484 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1485 
1486 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1487 
1488 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1489 	}
1490 	if (hws->funcs.enable_power_gating_plane)
1491 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1492 
1493 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1494 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1495 }
1496 
1497 /* In headless boot cases, DIG may be turned
1498  * on which causes HW/SW discrepancies.
1499  * To avoid this, power down hardware on boot
1500  * if DIG is turned on
1501  */
1502 void dcn10_power_down_on_boot(struct dc *dc)
1503 {
1504 	struct dc_link *edp_links[MAX_NUM_EDP];
1505 	struct dc_link *edp_link;
1506 	int edp_num;
1507 	int i = 0;
1508 
1509 	get_edp_links(dc, edp_links, &edp_num);
1510 
1511 	if (edp_num) {
1512 		for (i = 0; i < edp_num; i++) {
1513 			edp_link = edp_links[i];
1514 			if (edp_link->link_enc->funcs->is_dig_enabled &&
1515 					edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1516 					dc->hwseq->funcs.edp_backlight_control &&
1517 					dc->hwss.power_down &&
1518 					dc->hwss.edp_power_control) {
1519 				dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1520 				dc->hwss.power_down(dc);
1521 				dc->hwss.edp_power_control(edp_link, false);
1522 			}
1523 		}
1524 	} else {
1525 		for (i = 0; i < dc->link_count; i++) {
1526 			struct dc_link *link = dc->links[i];
1527 
1528 			if (link->link_enc->funcs->is_dig_enabled &&
1529 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1530 					dc->hwss.power_down) {
1531 				dc->hwss.power_down(dc);
1532 				break;
1533 			}
1534 
1535 		}
1536 	}
1537 
1538 	/*
1539 	 * Call update_clocks with empty context
1540 	 * to send DISPLAY_OFF
1541 	 * Otherwise DISPLAY_OFF may not be asserted
1542 	 */
1543 	if (dc->clk_mgr->funcs->set_low_power_state)
1544 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1545 }
1546 
1547 void dcn10_reset_hw_ctx_wrap(
1548 		struct dc *dc,
1549 		struct dc_state *context)
1550 {
1551 	int i;
1552 	struct dce_hwseq *hws = dc->hwseq;
1553 
1554 	/* Reset Back End*/
1555 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1556 		struct pipe_ctx *pipe_ctx_old =
1557 			&dc->current_state->res_ctx.pipe_ctx[i];
1558 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1559 
1560 		if (!pipe_ctx_old->stream)
1561 			continue;
1562 
1563 		if (pipe_ctx_old->top_pipe)
1564 			continue;
1565 
1566 		if (!pipe_ctx->stream ||
1567 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1568 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1569 
1570 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1571 			if (hws->funcs.enable_stream_gating)
1572 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1573 			if (old_clk)
1574 				old_clk->funcs->cs_power_down(old_clk);
1575 		}
1576 	}
1577 }
1578 
1579 static bool patch_address_for_sbs_tb_stereo(
1580 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1581 {
1582 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1583 	bool sec_split = pipe_ctx->top_pipe &&
1584 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1585 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1586 		(pipe_ctx->stream->timing.timing_3d_format ==
1587 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1588 		 pipe_ctx->stream->timing.timing_3d_format ==
1589 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1590 		*addr = plane_state->address.grph_stereo.left_addr;
1591 		plane_state->address.grph_stereo.left_addr =
1592 		plane_state->address.grph_stereo.right_addr;
1593 		return true;
1594 	} else {
1595 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1596 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1597 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1598 			plane_state->address.grph_stereo.right_addr =
1599 			plane_state->address.grph_stereo.left_addr;
1600 			plane_state->address.grph_stereo.right_meta_addr =
1601 			plane_state->address.grph_stereo.left_meta_addr;
1602 		}
1603 	}
1604 	return false;
1605 }
1606 
1607 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1608 {
1609 	bool addr_patched = false;
1610 	PHYSICAL_ADDRESS_LOC addr;
1611 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1612 
1613 	if (plane_state == NULL)
1614 		return;
1615 
1616 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1617 
1618 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1619 			pipe_ctx->plane_res.hubp,
1620 			&plane_state->address,
1621 			plane_state->flip_immediate);
1622 
1623 	plane_state->status.requested_address = plane_state->address;
1624 
1625 	if (plane_state->flip_immediate)
1626 		plane_state->status.current_address = plane_state->address;
1627 
1628 	if (addr_patched)
1629 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1630 }
1631 
1632 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1633 			const struct dc_plane_state *plane_state)
1634 {
1635 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1636 	const struct dc_transfer_func *tf = NULL;
1637 	bool result = true;
1638 
1639 	if (dpp_base == NULL)
1640 		return false;
1641 
1642 	if (plane_state->in_transfer_func)
1643 		tf = plane_state->in_transfer_func;
1644 
1645 	if (plane_state->gamma_correction &&
1646 		!dpp_base->ctx->dc->debug.always_use_regamma
1647 		&& !plane_state->gamma_correction->is_identity
1648 			&& dce_use_lut(plane_state->format))
1649 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1650 
1651 	if (tf == NULL)
1652 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1653 	else if (tf->type == TF_TYPE_PREDEFINED) {
1654 		switch (tf->tf) {
1655 		case TRANSFER_FUNCTION_SRGB:
1656 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1657 			break;
1658 		case TRANSFER_FUNCTION_BT709:
1659 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1660 			break;
1661 		case TRANSFER_FUNCTION_LINEAR:
1662 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1663 			break;
1664 		case TRANSFER_FUNCTION_PQ:
1665 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1666 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1667 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1668 			result = true;
1669 			break;
1670 		default:
1671 			result = false;
1672 			break;
1673 		}
1674 	} else if (tf->type == TF_TYPE_BYPASS) {
1675 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1676 	} else {
1677 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1678 					&dpp_base->degamma_params);
1679 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1680 				&dpp_base->degamma_params);
1681 		result = true;
1682 	}
1683 
1684 	return result;
1685 }
1686 
1687 #define MAX_NUM_HW_POINTS 0x200
1688 
1689 static void log_tf(struct dc_context *ctx,
1690 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1691 {
1692 	// DC_LOG_GAMMA is default logging of all hw points
1693 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1694 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1695 	int i = 0;
1696 
1697 	DC_LOGGER_INIT(ctx->logger);
1698 	DC_LOG_GAMMA("Gamma Correction TF");
1699 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1700 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1701 
1702 	for (i = 0; i < hw_points_num; i++) {
1703 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1704 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1705 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1706 	}
1707 
1708 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1709 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1710 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1711 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1712 	}
1713 }
1714 
1715 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1716 				const struct dc_stream_state *stream)
1717 {
1718 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1719 
1720 	if (dpp == NULL)
1721 		return false;
1722 
1723 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1724 
1725 	if (stream->out_transfer_func &&
1726 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1727 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1728 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1729 
1730 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1731 	 * update.
1732 	 */
1733 	else if (cm_helper_translate_curve_to_hw_format(
1734 			stream->out_transfer_func,
1735 			&dpp->regamma_params, false)) {
1736 		dpp->funcs->dpp_program_regamma_pwl(
1737 				dpp,
1738 				&dpp->regamma_params, OPP_REGAMMA_USER);
1739 	} else
1740 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1741 
1742 	if (stream != NULL && stream->ctx != NULL &&
1743 			stream->out_transfer_func != NULL) {
1744 		log_tf(stream->ctx,
1745 				stream->out_transfer_func,
1746 				dpp->regamma_params.hw_points_num);
1747 	}
1748 
1749 	return true;
1750 }
1751 
1752 void dcn10_pipe_control_lock(
1753 	struct dc *dc,
1754 	struct pipe_ctx *pipe,
1755 	bool lock)
1756 {
1757 	struct dce_hwseq *hws = dc->hwseq;
1758 
1759 	/* use TG master update lock to lock everything on the TG
1760 	 * therefore only top pipe need to lock
1761 	 */
1762 	if (!pipe || pipe->top_pipe)
1763 		return;
1764 
1765 	if (dc->debug.sanity_checks)
1766 		hws->funcs.verify_allow_pstate_change_high(dc);
1767 
1768 	if (lock)
1769 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1770 	else
1771 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1772 
1773 	if (dc->debug.sanity_checks)
1774 		hws->funcs.verify_allow_pstate_change_high(dc);
1775 }
1776 
1777 /**
1778  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1779  *
1780  * Software keepout workaround to prevent cursor update locking from stalling
1781  * out cursor updates indefinitely or from old values from being retained in
1782  * the case where the viewport changes in the same frame as the cursor.
1783  *
1784  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1785  * too close to VUPDATE, then stall out until VUPDATE finishes.
1786  *
1787  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1788  *       to avoid the need for this workaround.
1789  */
1790 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1791 {
1792 	struct dc_stream_state *stream = pipe_ctx->stream;
1793 	struct crtc_position position;
1794 	uint32_t vupdate_start, vupdate_end;
1795 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1796 	unsigned int us_per_line, us_vupdate;
1797 
1798 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1799 		return;
1800 
1801 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1802 		return;
1803 
1804 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1805 				       &vupdate_end);
1806 
1807 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1808 	vpos = position.vertical_count;
1809 
1810 	/* Avoid wraparound calculation issues */
1811 	vupdate_start += stream->timing.v_total;
1812 	vupdate_end += stream->timing.v_total;
1813 	vpos += stream->timing.v_total;
1814 
1815 	if (vpos <= vupdate_start) {
1816 		/* VPOS is in VACTIVE or back porch. */
1817 		lines_to_vupdate = vupdate_start - vpos;
1818 	} else if (vpos > vupdate_end) {
1819 		/* VPOS is in the front porch. */
1820 		return;
1821 	} else {
1822 		/* VPOS is in VUPDATE. */
1823 		lines_to_vupdate = 0;
1824 	}
1825 
1826 	/* Calculate time until VUPDATE in microseconds. */
1827 	us_per_line =
1828 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1829 	us_to_vupdate = lines_to_vupdate * us_per_line;
1830 
1831 	/* 70 us is a conservative estimate of cursor update time*/
1832 	if (us_to_vupdate > 70)
1833 		return;
1834 
1835 	/* Stall out until the cursor update completes. */
1836 	if (vupdate_end < vupdate_start)
1837 		vupdate_end += stream->timing.v_total;
1838 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1839 	udelay(us_to_vupdate + us_vupdate);
1840 }
1841 
1842 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1843 {
1844 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1845 	if (!pipe || pipe->top_pipe)
1846 		return;
1847 
1848 	/* Prevent cursor lock from stalling out cursor updates. */
1849 	if (lock)
1850 		delay_cursor_until_vupdate(dc, pipe);
1851 
1852 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1853 		union dmub_hw_lock_flags hw_locks = { 0 };
1854 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1855 
1856 		hw_locks.bits.lock_cursor = 1;
1857 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1858 
1859 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1860 					lock,
1861 					&hw_locks,
1862 					&inst_flags);
1863 	} else
1864 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1865 				pipe->stream_res.opp->inst, lock);
1866 }
1867 
1868 static bool wait_for_reset_trigger_to_occur(
1869 	struct dc_context *dc_ctx,
1870 	struct timing_generator *tg)
1871 {
1872 	bool rc = false;
1873 
1874 	/* To avoid endless loop we wait at most
1875 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1876 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1877 	int i;
1878 
1879 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1880 
1881 		if (!tg->funcs->is_counter_moving(tg)) {
1882 			DC_ERROR("TG counter is not moving!\n");
1883 			break;
1884 		}
1885 
1886 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1887 			rc = true;
1888 			/* usually occurs at i=1 */
1889 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1890 					i);
1891 			break;
1892 		}
1893 
1894 		/* Wait for one frame. */
1895 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1896 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1897 	}
1898 
1899 	if (false == rc)
1900 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1901 
1902 	return rc;
1903 }
1904 
1905 uint64_t reduceSizeAndFraction(
1906 	uint64_t *numerator,
1907 	uint64_t *denominator,
1908 	bool checkUint32Bounary)
1909 {
1910 	int i;
1911 	bool ret = checkUint32Bounary == false;
1912 	uint64_t max_int32 = 0xffffffff;
1913 	uint64_t num, denom;
1914 	static const uint16_t prime_numbers[] = {
1915 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1916 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1917 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1918 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1919 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1920 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1921 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1922 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1923 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1924 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1925 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1926 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1927 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1928 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1929 		941, 947, 953, 967, 971, 977, 983, 991, 997};
1930 	int count = ARRAY_SIZE(prime_numbers);
1931 
1932 	num = *numerator;
1933 	denom = *denominator;
1934 	for (i = 0; i < count; i++) {
1935 		uint32_t num_remainder, denom_remainder;
1936 		uint64_t num_result, denom_result;
1937 		if (checkUint32Bounary &&
1938 			num <= max_int32 && denom <= max_int32) {
1939 			ret = true;
1940 			break;
1941 		}
1942 		do {
1943 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
1944 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
1945 			if (num_remainder == 0 && denom_remainder == 0) {
1946 				num = num_result;
1947 				denom = denom_result;
1948 			}
1949 		} while (num_remainder == 0 && denom_remainder == 0);
1950 	}
1951 	*numerator = num;
1952 	*denominator = denom;
1953 	return ret;
1954 }
1955 
1956 bool is_low_refresh_rate(struct pipe_ctx *pipe)
1957 {
1958 	uint32_t master_pipe_refresh_rate =
1959 		pipe->stream->timing.pix_clk_100hz * 100 /
1960 		pipe->stream->timing.h_total /
1961 		pipe->stream->timing.v_total;
1962 	return master_pipe_refresh_rate <= 30;
1963 }
1964 
1965 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
1966 {
1967 	uint32_t clock_divider = 1;
1968 	uint32_t numpipes = 1;
1969 
1970 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
1971 		clock_divider *= 2;
1972 
1973 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1974 		clock_divider *= 2;
1975 
1976 	while (pipe->next_odm_pipe) {
1977 		pipe = pipe->next_odm_pipe;
1978 		numpipes++;
1979 	}
1980 	clock_divider *= numpipes;
1981 
1982 	return clock_divider;
1983 }
1984 
1985 int dcn10_align_pixel_clocks(
1986 	struct dc *dc,
1987 	int group_size,
1988 	struct pipe_ctx *grouped_pipes[])
1989 {
1990 	struct dc_context *dc_ctx = dc->ctx;
1991 	int i, master = -1, embedded = -1;
1992 	struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
1993 	uint64_t phase[MAX_PIPES];
1994 	uint64_t modulo[MAX_PIPES];
1995 	unsigned int pclk;
1996 
1997 	uint32_t embedded_pix_clk_100hz;
1998 	uint16_t embedded_h_total;
1999 	uint16_t embedded_v_total;
2000 	bool clamshell_closed = false;
2001 	uint32_t dp_ref_clk_100hz =
2002 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2003 
2004 	if (dc->config.vblank_alignment_dto_params &&
2005 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2006 		clamshell_closed =
2007 			(dc->config.vblank_alignment_dto_params >> 63);
2008 		embedded_h_total =
2009 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2010 		embedded_v_total =
2011 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2012 		embedded_pix_clk_100hz =
2013 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2014 
2015 		for (i = 0; i < group_size; i++) {
2016 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2017 					grouped_pipes[i]->stream_res.tg,
2018 					&hw_crtc_timing[i]);
2019 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2020 				dc->res_pool->dp_clock_source,
2021 				grouped_pipes[i]->stream_res.tg->inst,
2022 				&pclk);
2023 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2024 			if (dc_is_embedded_signal(
2025 					grouped_pipes[i]->stream->signal)) {
2026 				embedded = i;
2027 				master = i;
2028 				phase[i] = embedded_pix_clk_100hz*100;
2029 				modulo[i] = dp_ref_clk_100hz*100;
2030 			} else {
2031 
2032 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2033 					hw_crtc_timing[i].h_total*
2034 					hw_crtc_timing[i].v_total;
2035 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2036 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2037 					embedded_h_total*
2038 					embedded_v_total;
2039 
2040 				if (reduceSizeAndFraction(&phase[i],
2041 						&modulo[i], true) == false) {
2042 					/*
2043 					 * this will help to stop reporting
2044 					 * this timing synchronizable
2045 					 */
2046 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2047 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2048 				}
2049 			}
2050 		}
2051 
2052 		for (i = 0; i < group_size; i++) {
2053 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2054 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2055 					dc->res_pool->dp_clock_source,
2056 					grouped_pipes[i]->stream_res.tg->inst,
2057 					phase[i], modulo[i]);
2058 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2059 					dc->res_pool->dp_clock_source,
2060 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2061 					grouped_pipes[i]->stream->timing.pix_clk_100hz =
2062 						pclk*get_clock_divider(grouped_pipes[i], false);
2063 				if (master == -1)
2064 					master = i;
2065 			}
2066 		}
2067 
2068 	}
2069 	return master;
2070 }
2071 
2072 void dcn10_enable_vblanks_synchronization(
2073 	struct dc *dc,
2074 	int group_index,
2075 	int group_size,
2076 	struct pipe_ctx *grouped_pipes[])
2077 {
2078 	struct dc_context *dc_ctx = dc->ctx;
2079 	struct output_pixel_processor *opp;
2080 	struct timing_generator *tg;
2081 	int i, width, height, master;
2082 
2083 	for (i = 1; i < group_size; i++) {
2084 		opp = grouped_pipes[i]->stream_res.opp;
2085 		tg = grouped_pipes[i]->stream_res.tg;
2086 		tg->funcs->get_otg_active_size(tg, &width, &height);
2087 		if (opp->funcs->opp_program_dpg_dimensions)
2088 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2089 	}
2090 
2091 	for (i = 0; i < group_size; i++) {
2092 		if (grouped_pipes[i]->stream == NULL)
2093 			continue;
2094 		grouped_pipes[i]->stream->vblank_synchronized = false;
2095 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2096 	}
2097 
2098 	DC_SYNC_INFO("Aligning DP DTOs\n");
2099 
2100 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2101 
2102 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2103 
2104 	if (master >= 0) {
2105 		for (i = 0; i < group_size; i++) {
2106 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2107 			grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2108 				grouped_pipes[master]->stream_res.tg,
2109 				grouped_pipes[i]->stream_res.tg,
2110 				grouped_pipes[master]->stream->timing.pix_clk_100hz,
2111 				grouped_pipes[i]->stream->timing.pix_clk_100hz,
2112 				get_clock_divider(grouped_pipes[master], false),
2113 				get_clock_divider(grouped_pipes[i], false));
2114 				grouped_pipes[i]->stream->vblank_synchronized = true;
2115 		}
2116 		grouped_pipes[master]->stream->vblank_synchronized = true;
2117 		DC_SYNC_INFO("Sync complete\n");
2118 	}
2119 
2120 	for (i = 1; i < group_size; i++) {
2121 		opp = grouped_pipes[i]->stream_res.opp;
2122 		tg = grouped_pipes[i]->stream_res.tg;
2123 		tg->funcs->get_otg_active_size(tg, &width, &height);
2124 		if (opp->funcs->opp_program_dpg_dimensions)
2125 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2126 	}
2127 }
2128 
2129 void dcn10_enable_timing_synchronization(
2130 	struct dc *dc,
2131 	int group_index,
2132 	int group_size,
2133 	struct pipe_ctx *grouped_pipes[])
2134 {
2135 	struct dc_context *dc_ctx = dc->ctx;
2136 	struct output_pixel_processor *opp;
2137 	struct timing_generator *tg;
2138 	int i, width, height;
2139 
2140 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2141 
2142 	for (i = 1; i < group_size; i++) {
2143 		opp = grouped_pipes[i]->stream_res.opp;
2144 		tg = grouped_pipes[i]->stream_res.tg;
2145 		tg->funcs->get_otg_active_size(tg, &width, &height);
2146 		if (opp->funcs->opp_program_dpg_dimensions)
2147 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2148 	}
2149 
2150 	for (i = 0; i < group_size; i++) {
2151 		if (grouped_pipes[i]->stream == NULL)
2152 			continue;
2153 		grouped_pipes[i]->stream->vblank_synchronized = false;
2154 	}
2155 
2156 	for (i = 1; i < group_size; i++)
2157 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2158 				grouped_pipes[i]->stream_res.tg,
2159 				grouped_pipes[0]->stream_res.tg->inst);
2160 
2161 	DC_SYNC_INFO("Waiting for trigger\n");
2162 
2163 	/* Need to get only check 1 pipe for having reset as all the others are
2164 	 * synchronized. Look at last pipe programmed to reset.
2165 	 */
2166 
2167 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2168 	for (i = 1; i < group_size; i++)
2169 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2170 				grouped_pipes[i]->stream_res.tg);
2171 
2172 	for (i = 1; i < group_size; i++) {
2173 		opp = grouped_pipes[i]->stream_res.opp;
2174 		tg = grouped_pipes[i]->stream_res.tg;
2175 		tg->funcs->get_otg_active_size(tg, &width, &height);
2176 		if (opp->funcs->opp_program_dpg_dimensions)
2177 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2178 	}
2179 
2180 	DC_SYNC_INFO("Sync complete\n");
2181 }
2182 
2183 void dcn10_enable_per_frame_crtc_position_reset(
2184 	struct dc *dc,
2185 	int group_size,
2186 	struct pipe_ctx *grouped_pipes[])
2187 {
2188 	struct dc_context *dc_ctx = dc->ctx;
2189 	int i;
2190 
2191 	DC_SYNC_INFO("Setting up\n");
2192 	for (i = 0; i < group_size; i++)
2193 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2194 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2195 					grouped_pipes[i]->stream_res.tg,
2196 					0,
2197 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2198 
2199 	DC_SYNC_INFO("Waiting for trigger\n");
2200 
2201 	for (i = 0; i < group_size; i++)
2202 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2203 
2204 	DC_SYNC_INFO("Multi-display sync is complete\n");
2205 }
2206 
2207 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2208 		struct vm_system_aperture_param *apt,
2209 		struct dce_hwseq *hws)
2210 {
2211 	PHYSICAL_ADDRESS_LOC physical_page_number;
2212 	uint32_t logical_addr_low;
2213 	uint32_t logical_addr_high;
2214 
2215 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2216 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2217 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2218 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2219 
2220 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2221 			LOGICAL_ADDR, &logical_addr_low);
2222 
2223 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2224 			LOGICAL_ADDR, &logical_addr_high);
2225 
2226 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2227 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2228 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2229 }
2230 
2231 /* Temporary read settings, future will get values from kmd directly */
2232 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2233 		struct vm_context0_param *vm0,
2234 		struct dce_hwseq *hws)
2235 {
2236 	PHYSICAL_ADDRESS_LOC fb_base;
2237 	PHYSICAL_ADDRESS_LOC fb_offset;
2238 	uint32_t fb_base_value;
2239 	uint32_t fb_offset_value;
2240 
2241 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2242 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2243 
2244 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2245 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2246 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2247 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2248 
2249 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2250 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2251 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2252 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2253 
2254 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2255 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2256 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2257 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2258 
2259 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2260 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2261 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2262 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2263 
2264 	/*
2265 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2266 	 * Therefore we need to do
2267 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2268 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2269 	 */
2270 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2271 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2272 	vm0->pte_base.quad_part += fb_base.quad_part;
2273 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2274 }
2275 
2276 
2277 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2278 {
2279 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2280 	struct vm_system_aperture_param apt = { {{ 0 } } };
2281 	struct vm_context0_param vm0 = { { { 0 } } };
2282 
2283 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2284 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2285 
2286 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2287 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2288 }
2289 
2290 static void dcn10_enable_plane(
2291 	struct dc *dc,
2292 	struct pipe_ctx *pipe_ctx,
2293 	struct dc_state *context)
2294 {
2295 	struct dce_hwseq *hws = dc->hwseq;
2296 
2297 	if (dc->debug.sanity_checks) {
2298 		hws->funcs.verify_allow_pstate_change_high(dc);
2299 	}
2300 
2301 	undo_DEGVIDCN10_253_wa(dc);
2302 
2303 	power_on_plane(dc->hwseq,
2304 		pipe_ctx->plane_res.hubp->inst);
2305 
2306 	/* enable DCFCLK current DCHUB */
2307 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2308 
2309 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2310 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2311 			pipe_ctx->stream_res.opp,
2312 			true);
2313 
2314 	if (dc->config.gpu_vm_support)
2315 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2316 
2317 	if (dc->debug.sanity_checks) {
2318 		hws->funcs.verify_allow_pstate_change_high(dc);
2319 	}
2320 
2321 	if (!pipe_ctx->top_pipe
2322 		&& pipe_ctx->plane_state
2323 		&& pipe_ctx->plane_state->flip_int_enabled
2324 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2325 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2326 
2327 }
2328 
2329 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2330 {
2331 	int i = 0;
2332 	struct dpp_grph_csc_adjustment adjust;
2333 	memset(&adjust, 0, sizeof(adjust));
2334 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2335 
2336 
2337 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2338 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2339 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2340 			adjust.temperature_matrix[i] =
2341 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2342 	} else if (pipe_ctx->plane_state &&
2343 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2344 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2345 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2346 			adjust.temperature_matrix[i] =
2347 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2348 	}
2349 
2350 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2351 }
2352 
2353 
2354 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2355 {
2356 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2357 		if (pipe_ctx->top_pipe) {
2358 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2359 
2360 			while (top->top_pipe)
2361 				top = top->top_pipe; // Traverse to top pipe_ctx
2362 			if (top->plane_state && top->plane_state->layer_index == 0)
2363 				return true; // Front MPO plane not hidden
2364 		}
2365 	}
2366 	return false;
2367 }
2368 
2369 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2370 {
2371 	// Override rear plane RGB bias to fix MPO brightness
2372 	uint16_t rgb_bias = matrix[3];
2373 
2374 	matrix[3] = 0;
2375 	matrix[7] = 0;
2376 	matrix[11] = 0;
2377 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2378 	matrix[3] = rgb_bias;
2379 	matrix[7] = rgb_bias;
2380 	matrix[11] = rgb_bias;
2381 }
2382 
2383 void dcn10_program_output_csc(struct dc *dc,
2384 		struct pipe_ctx *pipe_ctx,
2385 		enum dc_color_space colorspace,
2386 		uint16_t *matrix,
2387 		int opp_id)
2388 {
2389 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2390 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2391 
2392 			/* MPO is broken with RGB colorspaces when OCSC matrix
2393 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2394 			 * Blending adds offsets from front + rear to rear plane
2395 			 *
2396 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2397 			 * black value pixels add offset instead of rear + front
2398 			 */
2399 
2400 			int16_t rgb_bias = matrix[3];
2401 			// matrix[3/7/11] are all the same offset value
2402 
2403 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2404 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2405 			} else {
2406 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2407 			}
2408 		}
2409 	} else {
2410 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2411 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2412 	}
2413 }
2414 
2415 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2416 {
2417 	struct dc_bias_and_scale bns_params = {0};
2418 
2419 	// program the input csc
2420 	dpp->funcs->dpp_setup(dpp,
2421 			plane_state->format,
2422 			EXPANSION_MODE_ZERO,
2423 			plane_state->input_csc_color_matrix,
2424 			plane_state->color_space,
2425 			NULL);
2426 
2427 	//set scale and bias registers
2428 	build_prescale_params(&bns_params, plane_state);
2429 	if (dpp->funcs->dpp_program_bias_and_scale)
2430 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2431 }
2432 
2433 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2434 {
2435 	struct mpc *mpc = dc->res_pool->mpc;
2436 
2437 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2438 		get_hdr_visual_confirm_color(pipe_ctx, color);
2439 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2440 		get_surface_visual_confirm_color(pipe_ctx, color);
2441 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2442 		get_surface_tile_visual_confirm_color(pipe_ctx, color);
2443 	else
2444 		color_space_to_black_color(
2445 				dc, pipe_ctx->stream->output_color_space, color);
2446 
2447 	if (mpc->funcs->set_bg_color)
2448 		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2449 }
2450 
2451 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2452 {
2453 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2454 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2455 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2456 	int mpcc_id;
2457 	struct mpcc *new_mpcc;
2458 	struct mpc *mpc = dc->res_pool->mpc;
2459 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2460 
2461 	if (per_pixel_alpha)
2462 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2463 	else
2464 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2465 
2466 	blnd_cfg.overlap_only = false;
2467 	blnd_cfg.global_gain = 0xff;
2468 
2469 	if (pipe_ctx->plane_state->global_alpha)
2470 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2471 	else
2472 		blnd_cfg.global_alpha = 0xff;
2473 
2474 	/* DCN1.0 has output CM before MPC which seems to screw with
2475 	 * pre-multiplied alpha.
2476 	 */
2477 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2478 			pipe_ctx->stream->output_color_space)
2479 					&& per_pixel_alpha;
2480 
2481 
2482 	/*
2483 	 * TODO: remove hack
2484 	 * Note: currently there is a bug in init_hw such that
2485 	 * on resume from hibernate, BIOS sets up MPCC0, and
2486 	 * we do mpcc_remove but the mpcc cannot go to idle
2487 	 * after remove. This cause us to pick mpcc1 here,
2488 	 * which causes a pstate hang for yet unknown reason.
2489 	 */
2490 	mpcc_id = hubp->inst;
2491 
2492 	/* If there is no full update, don't need to touch MPC tree*/
2493 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2494 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2495 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2496 		return;
2497 	}
2498 
2499 	/* check if this MPCC is already being used */
2500 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2501 	/* remove MPCC if being used */
2502 	if (new_mpcc != NULL)
2503 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2504 	else
2505 		if (dc->debug.sanity_checks)
2506 			mpc->funcs->assert_mpcc_idle_before_connect(
2507 					dc->res_pool->mpc, mpcc_id);
2508 
2509 	/* Call MPC to insert new plane */
2510 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2511 			mpc_tree_params,
2512 			&blnd_cfg,
2513 			NULL,
2514 			NULL,
2515 			hubp->inst,
2516 			mpcc_id);
2517 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2518 
2519 	ASSERT(new_mpcc != NULL);
2520 
2521 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2522 	hubp->mpcc_id = mpcc_id;
2523 }
2524 
2525 static void update_scaler(struct pipe_ctx *pipe_ctx)
2526 {
2527 	bool per_pixel_alpha =
2528 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2529 
2530 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2531 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2532 	/* scaler configuration */
2533 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2534 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2535 }
2536 
2537 static void dcn10_update_dchubp_dpp(
2538 	struct dc *dc,
2539 	struct pipe_ctx *pipe_ctx,
2540 	struct dc_state *context)
2541 {
2542 	struct dce_hwseq *hws = dc->hwseq;
2543 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2544 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2545 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2546 	struct plane_size size = plane_state->plane_size;
2547 	unsigned int compat_level = 0;
2548 	bool should_divided_by_2 = false;
2549 
2550 	/* depends on DML calculation, DPP clock value may change dynamically */
2551 	/* If request max dpp clk is lower than current dispclk, no need to
2552 	 * divided by 2
2553 	 */
2554 	if (plane_state->update_flags.bits.full_update) {
2555 
2556 		/* new calculated dispclk, dppclk are stored in
2557 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2558 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2559 		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2560 		 * dispclk will put in use after optimize_bandwidth when
2561 		 * ramp_up_dispclk_with_dpp is called.
2562 		 * there are two places for dppclk be put in use. One location
2563 		 * is the same as the location as dispclk. Another is within
2564 		 * update_dchubp_dpp which happens between pre_bandwidth and
2565 		 * optimize_bandwidth.
2566 		 * dppclk updated within update_dchubp_dpp will cause new
2567 		 * clock values of dispclk and dppclk not be in use at the same
2568 		 * time. when clocks are decreased, this may cause dppclk is
2569 		 * lower than previous configuration and let pipe stuck.
2570 		 * for example, eDP + external dp,  change resolution of DP from
2571 		 * 1920x1080x144hz to 1280x960x60hz.
2572 		 * before change: dispclk = 337889 dppclk = 337889
2573 		 * change mode, dcn_validate_bandwidth calculate
2574 		 *                dispclk = 143122 dppclk = 143122
2575 		 * update_dchubp_dpp be executed before dispclk be updated,
2576 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2577 		 * 168944. this will cause pipe pstate warning issue.
2578 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2579 		 * dispclk is going to be decreased, keep dppclk = dispclk
2580 		 **/
2581 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2582 				dc->clk_mgr->clks.dispclk_khz)
2583 			should_divided_by_2 = false;
2584 		else
2585 			should_divided_by_2 =
2586 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2587 					dc->clk_mgr->clks.dispclk_khz / 2;
2588 
2589 		dpp->funcs->dpp_dppclk_control(
2590 				dpp,
2591 				should_divided_by_2,
2592 				true);
2593 
2594 		if (dc->res_pool->dccg)
2595 			dc->res_pool->dccg->funcs->update_dpp_dto(
2596 					dc->res_pool->dccg,
2597 					dpp->inst,
2598 					pipe_ctx->plane_res.bw.dppclk_khz);
2599 		else
2600 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2601 						dc->clk_mgr->clks.dispclk_khz / 2 :
2602 							dc->clk_mgr->clks.dispclk_khz;
2603 	}
2604 
2605 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2606 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2607 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2608 	 */
2609 	if (plane_state->update_flags.bits.full_update) {
2610 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2611 
2612 		hubp->funcs->hubp_setup(
2613 			hubp,
2614 			&pipe_ctx->dlg_regs,
2615 			&pipe_ctx->ttu_regs,
2616 			&pipe_ctx->rq_regs,
2617 			&pipe_ctx->pipe_dlg_param);
2618 		hubp->funcs->hubp_setup_interdependent(
2619 			hubp,
2620 			&pipe_ctx->dlg_regs,
2621 			&pipe_ctx->ttu_regs);
2622 	}
2623 
2624 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2625 
2626 	if (plane_state->update_flags.bits.full_update ||
2627 		plane_state->update_flags.bits.bpp_change)
2628 		dcn10_update_dpp(dpp, plane_state);
2629 
2630 	if (plane_state->update_flags.bits.full_update ||
2631 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2632 		plane_state->update_flags.bits.global_alpha_change)
2633 		hws->funcs.update_mpcc(dc, pipe_ctx);
2634 
2635 	if (plane_state->update_flags.bits.full_update ||
2636 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2637 		plane_state->update_flags.bits.global_alpha_change ||
2638 		plane_state->update_flags.bits.scaling_change ||
2639 		plane_state->update_flags.bits.position_change) {
2640 		update_scaler(pipe_ctx);
2641 	}
2642 
2643 	if (plane_state->update_flags.bits.full_update ||
2644 		plane_state->update_flags.bits.scaling_change ||
2645 		plane_state->update_flags.bits.position_change) {
2646 		hubp->funcs->mem_program_viewport(
2647 			hubp,
2648 			&pipe_ctx->plane_res.scl_data.viewport,
2649 			&pipe_ctx->plane_res.scl_data.viewport_c);
2650 	}
2651 
2652 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2653 		dc->hwss.set_cursor_position(pipe_ctx);
2654 		dc->hwss.set_cursor_attribute(pipe_ctx);
2655 
2656 		if (dc->hwss.set_cursor_sdr_white_level)
2657 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2658 	}
2659 
2660 	if (plane_state->update_flags.bits.full_update) {
2661 		/*gamut remap*/
2662 		dc->hwss.program_gamut_remap(pipe_ctx);
2663 
2664 		dc->hwss.program_output_csc(dc,
2665 				pipe_ctx,
2666 				pipe_ctx->stream->output_color_space,
2667 				pipe_ctx->stream->csc_color_matrix.matrix,
2668 				pipe_ctx->stream_res.opp->inst);
2669 	}
2670 
2671 	if (plane_state->update_flags.bits.full_update ||
2672 		plane_state->update_flags.bits.pixel_format_change ||
2673 		plane_state->update_flags.bits.horizontal_mirror_change ||
2674 		plane_state->update_flags.bits.rotation_change ||
2675 		plane_state->update_flags.bits.swizzle_change ||
2676 		plane_state->update_flags.bits.dcc_change ||
2677 		plane_state->update_flags.bits.bpp_change ||
2678 		plane_state->update_flags.bits.scaling_change ||
2679 		plane_state->update_flags.bits.plane_size_change) {
2680 		hubp->funcs->hubp_program_surface_config(
2681 			hubp,
2682 			plane_state->format,
2683 			&plane_state->tiling_info,
2684 			&size,
2685 			plane_state->rotation,
2686 			&plane_state->dcc,
2687 			plane_state->horizontal_mirror,
2688 			compat_level);
2689 	}
2690 
2691 	hubp->power_gated = false;
2692 
2693 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2694 
2695 	if (is_pipe_tree_visible(pipe_ctx))
2696 		hubp->funcs->set_blank(hubp, false);
2697 }
2698 
2699 void dcn10_blank_pixel_data(
2700 		struct dc *dc,
2701 		struct pipe_ctx *pipe_ctx,
2702 		bool blank)
2703 {
2704 	enum dc_color_space color_space;
2705 	struct tg_color black_color = {0};
2706 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2707 	struct dc_stream_state *stream = pipe_ctx->stream;
2708 
2709 	/* program otg blank color */
2710 	color_space = stream->output_color_space;
2711 	color_space_to_black_color(dc, color_space, &black_color);
2712 
2713 	/*
2714 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2715 	 * alternate between Cb and Cr, so both channels need the pixel
2716 	 * value for Y
2717 	 */
2718 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2719 		black_color.color_r_cr = black_color.color_g_y;
2720 
2721 
2722 	if (stream_res->tg->funcs->set_blank_color)
2723 		stream_res->tg->funcs->set_blank_color(
2724 				stream_res->tg,
2725 				&black_color);
2726 
2727 	if (!blank) {
2728 		if (stream_res->tg->funcs->set_blank)
2729 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2730 		if (stream_res->abm) {
2731 			dc->hwss.set_pipe(pipe_ctx);
2732 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2733 		}
2734 	} else if (blank) {
2735 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2736 		if (stream_res->tg->funcs->set_blank) {
2737 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2738 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2739 		}
2740 	}
2741 }
2742 
2743 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2744 {
2745 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2746 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2747 	struct custom_float_format fmt;
2748 
2749 	fmt.exponenta_bits = 6;
2750 	fmt.mantissa_bits = 12;
2751 	fmt.sign = true;
2752 
2753 
2754 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2755 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2756 
2757 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2758 			pipe_ctx->plane_res.dpp, hw_mult);
2759 }
2760 
2761 void dcn10_program_pipe(
2762 		struct dc *dc,
2763 		struct pipe_ctx *pipe_ctx,
2764 		struct dc_state *context)
2765 {
2766 	struct dce_hwseq *hws = dc->hwseq;
2767 
2768 	if (pipe_ctx->top_pipe == NULL) {
2769 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2770 
2771 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2772 				pipe_ctx->stream_res.tg,
2773 				pipe_ctx->pipe_dlg_param.vready_offset,
2774 				pipe_ctx->pipe_dlg_param.vstartup_start,
2775 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2776 				pipe_ctx->pipe_dlg_param.vupdate_width);
2777 
2778 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2779 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2780 
2781 		if (hws->funcs.setup_vupdate_interrupt)
2782 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2783 
2784 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2785 	}
2786 
2787 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2788 		dcn10_enable_plane(dc, pipe_ctx, context);
2789 
2790 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2791 
2792 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2793 
2794 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2795 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2796 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2797 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2798 
2799 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2800 	 * only do gamma programming for full update.
2801 	 * TODO: This can be further optimized/cleaned up
2802 	 * Always call this for now since it does memcmp inside before
2803 	 * doing heavy calculation and programming
2804 	 */
2805 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2806 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2807 }
2808 
2809 void dcn10_wait_for_pending_cleared(struct dc *dc,
2810 		struct dc_state *context)
2811 {
2812 		struct pipe_ctx *pipe_ctx;
2813 		struct timing_generator *tg;
2814 		int i;
2815 
2816 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2817 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2818 			tg = pipe_ctx->stream_res.tg;
2819 
2820 			/*
2821 			 * Only wait for top pipe's tg penindg bit
2822 			 * Also skip if pipe is disabled.
2823 			 */
2824 			if (pipe_ctx->top_pipe ||
2825 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2826 			    !tg->funcs->is_tg_enabled(tg))
2827 				continue;
2828 
2829 			/*
2830 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2831 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2832 			 * seems to not trigger the update right away, and if we
2833 			 * lock again before VUPDATE then we don't get a separated
2834 			 * operation.
2835 			 */
2836 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2837 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2838 		}
2839 }
2840 
2841 void dcn10_post_unlock_program_front_end(
2842 		struct dc *dc,
2843 		struct dc_state *context)
2844 {
2845 	int i;
2846 
2847 	DC_LOGGER_INIT(dc->ctx->logger);
2848 
2849 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2850 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2851 
2852 		if (!pipe_ctx->top_pipe &&
2853 			!pipe_ctx->prev_odm_pipe &&
2854 			pipe_ctx->stream) {
2855 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
2856 
2857 			if (context->stream_status[i].plane_count == 0)
2858 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2859 		}
2860 	}
2861 
2862 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2863 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2864 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2865 
2866 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2867 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2868 			dc->hwss.optimize_bandwidth(dc, context);
2869 			break;
2870 		}
2871 
2872 	if (dc->hwseq->wa.DEGVIDCN10_254)
2873 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2874 }
2875 
2876 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2877 {
2878 	uint8_t i;
2879 
2880 	for (i = 0; i < context->stream_count; i++) {
2881 		if (context->streams[i]->timing.timing_3d_format
2882 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2883 			/*
2884 			 * Disable stutter
2885 			 */
2886 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2887 			break;
2888 		}
2889 	}
2890 }
2891 
2892 void dcn10_prepare_bandwidth(
2893 		struct dc *dc,
2894 		struct dc_state *context)
2895 {
2896 	struct dce_hwseq *hws = dc->hwseq;
2897 	struct hubbub *hubbub = dc->res_pool->hubbub;
2898 
2899 	if (dc->debug.sanity_checks)
2900 		hws->funcs.verify_allow_pstate_change_high(dc);
2901 
2902 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2903 		if (context->stream_count == 0)
2904 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2905 
2906 		dc->clk_mgr->funcs->update_clocks(
2907 				dc->clk_mgr,
2908 				context,
2909 				false);
2910 	}
2911 
2912 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2913 			&context->bw_ctx.bw.dcn.watermarks,
2914 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2915 			true);
2916 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2917 
2918 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2919 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2920 
2921 	if (dc->debug.sanity_checks)
2922 		hws->funcs.verify_allow_pstate_change_high(dc);
2923 }
2924 
2925 void dcn10_optimize_bandwidth(
2926 		struct dc *dc,
2927 		struct dc_state *context)
2928 {
2929 	struct dce_hwseq *hws = dc->hwseq;
2930 	struct hubbub *hubbub = dc->res_pool->hubbub;
2931 
2932 	if (dc->debug.sanity_checks)
2933 		hws->funcs.verify_allow_pstate_change_high(dc);
2934 
2935 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2936 		if (context->stream_count == 0)
2937 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2938 
2939 		dc->clk_mgr->funcs->update_clocks(
2940 				dc->clk_mgr,
2941 				context,
2942 				true);
2943 	}
2944 
2945 	hubbub->funcs->program_watermarks(hubbub,
2946 			&context->bw_ctx.bw.dcn.watermarks,
2947 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2948 			true);
2949 
2950 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2951 
2952 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2953 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2954 
2955 	if (dc->debug.sanity_checks)
2956 		hws->funcs.verify_allow_pstate_change_high(dc);
2957 }
2958 
2959 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2960 		int num_pipes, struct dc_crtc_timing_adjust adjust)
2961 {
2962 	int i = 0;
2963 	struct drr_params params = {0};
2964 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2965 	unsigned int event_triggers = 0x800;
2966 	// Note DRR trigger events are generated regardless of whether num frames met.
2967 	unsigned int num_frames = 2;
2968 
2969 	params.vertical_total_max = adjust.v_total_max;
2970 	params.vertical_total_min = adjust.v_total_min;
2971 	params.vertical_total_mid = adjust.v_total_mid;
2972 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
2973 	/* TODO: If multiple pipes are to be supported, you need
2974 	 * some GSL stuff. Static screen triggers may be programmed differently
2975 	 * as well.
2976 	 */
2977 	for (i = 0; i < num_pipes; i++) {
2978 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2979 			pipe_ctx[i]->stream_res.tg, &params);
2980 		if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
2981 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2982 					pipe_ctx[i]->stream_res.tg,
2983 					event_triggers, num_frames);
2984 	}
2985 }
2986 
2987 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2988 		int num_pipes,
2989 		struct crtc_position *position)
2990 {
2991 	int i = 0;
2992 
2993 	/* TODO: handle pipes > 1
2994 	 */
2995 	for (i = 0; i < num_pipes; i++)
2996 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2997 }
2998 
2999 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3000 		int num_pipes, const struct dc_static_screen_params *params)
3001 {
3002 	unsigned int i;
3003 	unsigned int triggers = 0;
3004 
3005 	if (params->triggers.surface_update)
3006 		triggers |= 0x80;
3007 	if (params->triggers.cursor_update)
3008 		triggers |= 0x2;
3009 	if (params->triggers.force_trigger)
3010 		triggers |= 0x1;
3011 
3012 	for (i = 0; i < num_pipes; i++)
3013 		pipe_ctx[i]->stream_res.tg->funcs->
3014 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3015 					triggers, params->num_frames);
3016 }
3017 
3018 static void dcn10_config_stereo_parameters(
3019 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3020 {
3021 	enum view_3d_format view_format = stream->view_format;
3022 	enum dc_timing_3d_format timing_3d_format =\
3023 			stream->timing.timing_3d_format;
3024 	bool non_stereo_timing = false;
3025 
3026 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3027 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3028 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3029 		non_stereo_timing = true;
3030 
3031 	if (non_stereo_timing == false &&
3032 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3033 
3034 		flags->PROGRAM_STEREO         = 1;
3035 		flags->PROGRAM_POLARITY       = 1;
3036 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3037 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3038 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3039 			enum display_dongle_type dongle = \
3040 					stream->link->ddc->dongle_type;
3041 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3042 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3043 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3044 				flags->DISABLE_STEREO_DP_SYNC = 1;
3045 		}
3046 		flags->RIGHT_EYE_POLARITY =\
3047 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3048 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3049 			flags->FRAME_PACKED = 1;
3050 	}
3051 
3052 	return;
3053 }
3054 
3055 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3056 {
3057 	struct crtc_stereo_flags flags = { 0 };
3058 	struct dc_stream_state *stream = pipe_ctx->stream;
3059 
3060 	dcn10_config_stereo_parameters(stream, &flags);
3061 
3062 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3063 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3064 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3065 	} else {
3066 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3067 	}
3068 
3069 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3070 		pipe_ctx->stream_res.opp,
3071 		flags.PROGRAM_STEREO == 1,
3072 		&stream->timing);
3073 
3074 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3075 		pipe_ctx->stream_res.tg,
3076 		&stream->timing,
3077 		&flags);
3078 
3079 	return;
3080 }
3081 
3082 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3083 {
3084 	int i;
3085 
3086 	for (i = 0; i < res_pool->pipe_count; i++) {
3087 		if (res_pool->hubps[i]->inst == mpcc_inst)
3088 			return res_pool->hubps[i];
3089 	}
3090 	ASSERT(false);
3091 	return NULL;
3092 }
3093 
3094 void dcn10_wait_for_mpcc_disconnect(
3095 		struct dc *dc,
3096 		struct resource_pool *res_pool,
3097 		struct pipe_ctx *pipe_ctx)
3098 {
3099 	struct dce_hwseq *hws = dc->hwseq;
3100 	int mpcc_inst;
3101 
3102 	if (dc->debug.sanity_checks) {
3103 		hws->funcs.verify_allow_pstate_change_high(dc);
3104 	}
3105 
3106 	if (!pipe_ctx->stream_res.opp)
3107 		return;
3108 
3109 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3110 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3111 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3112 
3113 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3114 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3115 			hubp->funcs->set_blank(hubp, true);
3116 		}
3117 	}
3118 
3119 	if (dc->debug.sanity_checks) {
3120 		hws->funcs.verify_allow_pstate_change_high(dc);
3121 	}
3122 
3123 }
3124 
3125 bool dcn10_dummy_display_power_gating(
3126 	struct dc *dc,
3127 	uint8_t controller_id,
3128 	struct dc_bios *dcb,
3129 	enum pipe_gating_control power_gating)
3130 {
3131 	return true;
3132 }
3133 
3134 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3135 {
3136 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3137 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3138 	bool flip_pending;
3139 	struct dc *dc = plane_state->ctx->dc;
3140 
3141 	if (plane_state == NULL)
3142 		return;
3143 
3144 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3145 					pipe_ctx->plane_res.hubp);
3146 
3147 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3148 
3149 	if (!flip_pending)
3150 		plane_state->status.current_address = plane_state->status.requested_address;
3151 
3152 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3153 			tg->funcs->is_stereo_left_eye) {
3154 		plane_state->status.is_right_eye =
3155 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3156 	}
3157 
3158 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3159 		struct dce_hwseq *hwseq = dc->hwseq;
3160 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3161 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3162 
3163 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3164 			struct hubbub *hubbub = dc->res_pool->hubbub;
3165 
3166 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3167 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3168 		}
3169 	}
3170 }
3171 
3172 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3173 {
3174 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3175 
3176 	/* In DCN, this programming sequence is owned by the hubbub */
3177 	hubbub->funcs->update_dchub(hubbub, dh_data);
3178 }
3179 
3180 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3181 {
3182 	struct pipe_ctx *test_pipe;
3183 	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3184 	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3185 
3186 	/**
3187 	 * Disable the cursor if there's another pipe above this with a
3188 	 * plane that contains this pipe's viewport to prevent double cursor
3189 	 * and incorrect scaling artifacts.
3190 	 */
3191 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3192 	     test_pipe = test_pipe->top_pipe) {
3193 		if (!test_pipe->plane_state->visible)
3194 			continue;
3195 
3196 		r2 = &test_pipe->plane_res.scl_data.recout;
3197 		r2_r = r2->x + r2->width;
3198 		r2_b = r2->y + r2->height;
3199 
3200 		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3201 			return true;
3202 	}
3203 
3204 	return false;
3205 }
3206 
3207 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3208 {
3209 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3210 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3211 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3212 	struct dc_cursor_mi_param param = {
3213 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3214 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3215 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3216 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3217 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3218 		.rotation = pipe_ctx->plane_state->rotation,
3219 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3220 	};
3221 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3222 		(pipe_ctx->bottom_pipe != NULL);
3223 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3224 		(pipe_ctx->prev_odm_pipe != NULL);
3225 
3226 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3227 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3228 	int x_pos = pos_cpy.x;
3229 	int y_pos = pos_cpy.y;
3230 
3231 	/**
3232 	 * DC cursor is stream space, HW cursor is plane space and drawn
3233 	 * as part of the framebuffer.
3234 	 *
3235 	 * Cursor position can't be negative, but hotspot can be used to
3236 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3237 	 * than the cursor size.
3238 	 */
3239 
3240 	/**
3241 	 * Translate cursor from stream space to plane space.
3242 	 *
3243 	 * If the cursor is scaled then we need to scale the position
3244 	 * to be in the approximately correct place. We can't do anything
3245 	 * about the actual size being incorrect, that's a limitation of
3246 	 * the hardware.
3247 	 */
3248 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3249 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3250 				pipe_ctx->plane_state->dst_rect.width;
3251 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3252 				pipe_ctx->plane_state->dst_rect.height;
3253 	} else {
3254 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3255 				pipe_ctx->plane_state->dst_rect.width;
3256 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3257 				pipe_ctx->plane_state->dst_rect.height;
3258 	}
3259 
3260 	/**
3261 	 * If the cursor's source viewport is clipped then we need to
3262 	 * translate the cursor to appear in the correct position on
3263 	 * the screen.
3264 	 *
3265 	 * This translation isn't affected by scaling so it needs to be
3266 	 * done *after* we adjust the position for the scale factor.
3267 	 *
3268 	 * This is only done by opt-in for now since there are still
3269 	 * some usecases like tiled display that might enable the
3270 	 * cursor on both streams while expecting dc to clip it.
3271 	 */
3272 	if (pos_cpy.translate_by_source) {
3273 		x_pos += pipe_ctx->plane_state->src_rect.x;
3274 		y_pos += pipe_ctx->plane_state->src_rect.y;
3275 	}
3276 
3277 	/**
3278 	 * If the position is negative then we need to add to the hotspot
3279 	 * to shift the cursor outside the plane.
3280 	 */
3281 
3282 	if (x_pos < 0) {
3283 		pos_cpy.x_hotspot -= x_pos;
3284 		x_pos = 0;
3285 	}
3286 
3287 	if (y_pos < 0) {
3288 		pos_cpy.y_hotspot -= y_pos;
3289 		y_pos = 0;
3290 	}
3291 
3292 	pos_cpy.x = (uint32_t)x_pos;
3293 	pos_cpy.y = (uint32_t)y_pos;
3294 
3295 	if (pipe_ctx->plane_state->address.type
3296 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3297 		pos_cpy.enable = false;
3298 
3299 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3300 		pos_cpy.enable = false;
3301 
3302 	// Swap axis and mirror horizontally
3303 	if (param.rotation == ROTATION_ANGLE_90) {
3304 		uint32_t temp_x = pos_cpy.x;
3305 
3306 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3307 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3308 		pos_cpy.y = temp_x;
3309 	}
3310 	// Swap axis and mirror vertically
3311 	else if (param.rotation == ROTATION_ANGLE_270) {
3312 		uint32_t temp_y = pos_cpy.y;
3313 		int viewport_height =
3314 			pipe_ctx->plane_res.scl_data.viewport.height;
3315 		int viewport_y =
3316 			pipe_ctx->plane_res.scl_data.viewport.y;
3317 
3318 		/**
3319 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3320 		 * For pipe split cases:
3321 		 * - apply offset of viewport.y to normalize pos_cpy.x
3322 		 * - calculate the pos_cpy.y as before
3323 		 * - shift pos_cpy.y back by same offset to get final value
3324 		 * - since we iterate through both pipes, use the lower
3325 		 *   viewport.y for offset
3326 		 * For non pipe split cases, use the same calculation for
3327 		 *  pos_cpy.y as the 180 degree rotation case below,
3328 		 *  but use pos_cpy.x as our input because we are rotating
3329 		 *  270 degrees
3330 		 */
3331 		if (pipe_split_on || odm_combine_on) {
3332 			int pos_cpy_x_offset;
3333 			int other_pipe_viewport_y;
3334 
3335 			if (pipe_split_on) {
3336 				if (pipe_ctx->bottom_pipe) {
3337 					other_pipe_viewport_y =
3338 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3339 				} else {
3340 					other_pipe_viewport_y =
3341 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3342 				}
3343 			} else {
3344 				if (pipe_ctx->next_odm_pipe) {
3345 					other_pipe_viewport_y =
3346 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3347 				} else {
3348 					other_pipe_viewport_y =
3349 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3350 				}
3351 			}
3352 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3353 				other_pipe_viewport_y : viewport_y;
3354 			pos_cpy.x -= pos_cpy_x_offset;
3355 			if (pos_cpy.x > viewport_height) {
3356 				pos_cpy.x = pos_cpy.x - viewport_height;
3357 				pos_cpy.y = viewport_height - pos_cpy.x;
3358 			} else {
3359 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3360 			}
3361 			pos_cpy.y += pos_cpy_x_offset;
3362 		} else {
3363 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3364 		}
3365 		pos_cpy.x = temp_y;
3366 	}
3367 	// Mirror horizontally and vertically
3368 	else if (param.rotation == ROTATION_ANGLE_180) {
3369 		int viewport_width =
3370 			pipe_ctx->plane_res.scl_data.viewport.width;
3371 		int viewport_x =
3372 			pipe_ctx->plane_res.scl_data.viewport.x;
3373 
3374 		if (pipe_split_on || odm_combine_on) {
3375 			if (pos_cpy.x >= viewport_width + viewport_x) {
3376 				pos_cpy.x = 2 * viewport_width
3377 						- pos_cpy.x + 2 * viewport_x;
3378 			} else {
3379 				uint32_t temp_x = pos_cpy.x;
3380 
3381 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3382 				if (temp_x >= viewport_x +
3383 					(int)hubp->curs_attr.width || pos_cpy.x
3384 					<= (int)hubp->curs_attr.width +
3385 					pipe_ctx->plane_state->src_rect.x) {
3386 					pos_cpy.x = temp_x + viewport_width;
3387 				}
3388 			}
3389 		} else {
3390 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3391 		}
3392 
3393 		/**
3394 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3395 		 * Calculation:
3396 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3397 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3398 		 * Simplify it as:
3399 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3400 		 */
3401 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3402 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3403 	}
3404 
3405 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3406 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3407 }
3408 
3409 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3410 {
3411 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3412 
3413 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3414 			pipe_ctx->plane_res.hubp, attributes);
3415 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3416 		pipe_ctx->plane_res.dpp, attributes);
3417 }
3418 
3419 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3420 {
3421 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3422 	struct fixed31_32 multiplier;
3423 	struct dpp_cursor_attributes opt_attr = { 0 };
3424 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3425 	struct custom_float_format fmt;
3426 
3427 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3428 		return;
3429 
3430 	fmt.exponenta_bits = 5;
3431 	fmt.mantissa_bits = 10;
3432 	fmt.sign = true;
3433 
3434 	if (sdr_white_level > 80) {
3435 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3436 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3437 	}
3438 
3439 	opt_attr.scale = hw_scale;
3440 	opt_attr.bias = 0;
3441 
3442 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3443 			pipe_ctx->plane_res.dpp, &opt_attr);
3444 }
3445 
3446 /*
3447  * apply_front_porch_workaround  TODO FPGA still need?
3448  *
3449  * This is a workaround for a bug that has existed since R5xx and has not been
3450  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3451  */
3452 static void apply_front_porch_workaround(
3453 	struct dc_crtc_timing *timing)
3454 {
3455 	if (timing->flags.INTERLACE == 1) {
3456 		if (timing->v_front_porch < 2)
3457 			timing->v_front_porch = 2;
3458 	} else {
3459 		if (timing->v_front_porch < 1)
3460 			timing->v_front_porch = 1;
3461 	}
3462 }
3463 
3464 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3465 {
3466 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3467 	struct dc_crtc_timing patched_crtc_timing;
3468 	int vesa_sync_start;
3469 	int asic_blank_end;
3470 	int interlace_factor;
3471 	int vertical_line_start;
3472 
3473 	patched_crtc_timing = *dc_crtc_timing;
3474 	apply_front_porch_workaround(&patched_crtc_timing);
3475 
3476 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3477 
3478 	vesa_sync_start = patched_crtc_timing.v_addressable +
3479 			patched_crtc_timing.v_border_bottom +
3480 			patched_crtc_timing.v_front_porch;
3481 
3482 	asic_blank_end = (patched_crtc_timing.v_total -
3483 			vesa_sync_start -
3484 			patched_crtc_timing.v_border_top)
3485 			* interlace_factor;
3486 
3487 	vertical_line_start = asic_blank_end -
3488 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3489 
3490 	return vertical_line_start;
3491 }
3492 
3493 void dcn10_calc_vupdate_position(
3494 		struct dc *dc,
3495 		struct pipe_ctx *pipe_ctx,
3496 		uint32_t *start_line,
3497 		uint32_t *end_line)
3498 {
3499 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3500 	int vline_int_offset_from_vupdate =
3501 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3502 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3503 	int start_position;
3504 
3505 	if (vline_int_offset_from_vupdate > 0)
3506 		vline_int_offset_from_vupdate--;
3507 	else if (vline_int_offset_from_vupdate < 0)
3508 		vline_int_offset_from_vupdate++;
3509 
3510 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3511 
3512 	if (start_position >= 0)
3513 		*start_line = start_position;
3514 	else
3515 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3516 
3517 	*end_line = *start_line + 2;
3518 
3519 	if (*end_line >= dc_crtc_timing->v_total)
3520 		*end_line = 2;
3521 }
3522 
3523 static void dcn10_cal_vline_position(
3524 		struct dc *dc,
3525 		struct pipe_ctx *pipe_ctx,
3526 		enum vline_select vline,
3527 		uint32_t *start_line,
3528 		uint32_t *end_line)
3529 {
3530 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3531 
3532 	if (vline == VLINE0)
3533 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3534 	else if (vline == VLINE1)
3535 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3536 
3537 	switch (ref_point) {
3538 	case START_V_UPDATE:
3539 		dcn10_calc_vupdate_position(
3540 				dc,
3541 				pipe_ctx,
3542 				start_line,
3543 				end_line);
3544 		break;
3545 	case START_V_SYNC:
3546 		// Suppose to do nothing because vsync is 0;
3547 		break;
3548 	default:
3549 		ASSERT(0);
3550 		break;
3551 	}
3552 }
3553 
3554 void dcn10_setup_periodic_interrupt(
3555 		struct dc *dc,
3556 		struct pipe_ctx *pipe_ctx,
3557 		enum vline_select vline)
3558 {
3559 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3560 
3561 	if (vline == VLINE0) {
3562 		uint32_t start_line = 0;
3563 		uint32_t end_line = 0;
3564 
3565 		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3566 
3567 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3568 
3569 	} else if (vline == VLINE1) {
3570 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3571 				tg,
3572 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3573 	}
3574 }
3575 
3576 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3577 {
3578 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3579 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3580 
3581 	if (start_line < 0) {
3582 		ASSERT(0);
3583 		start_line = 0;
3584 	}
3585 
3586 	if (tg->funcs->setup_vertical_interrupt2)
3587 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3588 }
3589 
3590 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3591 		struct dc_link_settings *link_settings)
3592 {
3593 	struct encoder_unblank_param params = { { 0 } };
3594 	struct dc_stream_state *stream = pipe_ctx->stream;
3595 	struct dc_link *link = stream->link;
3596 	struct dce_hwseq *hws = link->dc->hwseq;
3597 
3598 	/* only 3 items below are used by unblank */
3599 	params.timing = pipe_ctx->stream->timing;
3600 
3601 	params.link_settings.link_rate = link_settings->link_rate;
3602 
3603 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3604 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3605 			params.timing.pix_clk_100hz /= 2;
3606 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3607 	}
3608 
3609 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3610 		hws->funcs.edp_backlight_control(link, true);
3611 	}
3612 }
3613 
3614 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3615 				const uint8_t *custom_sdp_message,
3616 				unsigned int sdp_message_size)
3617 {
3618 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3619 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3620 				pipe_ctx->stream_res.stream_enc,
3621 				custom_sdp_message,
3622 				sdp_message_size);
3623 	}
3624 }
3625 enum dc_status dcn10_set_clock(struct dc *dc,
3626 			enum dc_clock_type clock_type,
3627 			uint32_t clk_khz,
3628 			uint32_t stepping)
3629 {
3630 	struct dc_state *context = dc->current_state;
3631 	struct dc_clock_config clock_cfg = {0};
3632 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3633 
3634 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3635 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3636 						context, clock_type, &clock_cfg);
3637 
3638 	if (!dc->clk_mgr->funcs->get_clock)
3639 		return DC_FAIL_UNSUPPORTED_1;
3640 
3641 	if (clk_khz > clock_cfg.max_clock_khz)
3642 		return DC_FAIL_CLK_EXCEED_MAX;
3643 
3644 	if (clk_khz < clock_cfg.min_clock_khz)
3645 		return DC_FAIL_CLK_BELOW_MIN;
3646 
3647 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3648 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3649 
3650 	/*update internal request clock for update clock use*/
3651 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3652 		current_clocks->dispclk_khz = clk_khz;
3653 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3654 		current_clocks->dppclk_khz = clk_khz;
3655 	else
3656 		return DC_ERROR_UNEXPECTED;
3657 
3658 	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3659 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3660 				context, true);
3661 	return DC_OK;
3662 
3663 }
3664 
3665 void dcn10_get_clock(struct dc *dc,
3666 			enum dc_clock_type clock_type,
3667 			struct dc_clock_config *clock_cfg)
3668 {
3669 	struct dc_state *context = dc->current_state;
3670 
3671 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3672 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3673 
3674 }
3675 
3676 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3677 {
3678 	struct resource_pool *pool = dc->res_pool;
3679 	int i;
3680 
3681 	for (i = 0; i < pool->pipe_count; i++) {
3682 		struct hubp *hubp = pool->hubps[i];
3683 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3684 
3685 		hubp->funcs->hubp_read_state(hubp);
3686 
3687 		if (!s->blank_en)
3688 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3689 	}
3690 }
3691