1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_psr.h"
55 #include "dc_dmub_srv.h"
56 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dc_trace.h"
58 #include "dce/dmub_outbox.h"
59 #include "inc/dc_link_dp.h"
60 #include "inc/link_dpcd.h"
61 
62 #define DC_LOGGER_INIT(logger)
63 
64 #define CTX \
65 	hws->ctx
66 #define REG(reg)\
67 	hws->regs->reg
68 
69 #undef FN
70 #define FN(reg_name, field_name) \
71 	hws->shifts->field_name, hws->masks->field_name
72 
73 /*print is 17 wide, first two characters are spaces*/
74 #define DTN_INFO_MICRO_SEC(ref_cycle) \
75 	print_microsec(dc_ctx, log_ctx, ref_cycle)
76 
77 #define GAMMA_HW_POINTS_NUM 256
78 
79 #define PGFSM_POWER_ON 0
80 #define PGFSM_POWER_OFF 2
81 
82 static void print_microsec(struct dc_context *dc_ctx,
83 			   struct dc_log_buffer_ctx *log_ctx,
84 			   uint32_t ref_cycle)
85 {
86 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
87 	static const unsigned int frac = 1000;
88 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
89 
90 	DTN_INFO("  %11d.%03d",
91 			us_x10 / frac,
92 			us_x10 % frac);
93 }
94 
95 void dcn10_lock_all_pipes(struct dc *dc,
96 	struct dc_state *context,
97 	bool lock)
98 {
99 	struct pipe_ctx *pipe_ctx;
100 	struct pipe_ctx *old_pipe_ctx;
101 	struct timing_generator *tg;
102 	int i;
103 
104 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
105 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
106 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
107 		tg = pipe_ctx->stream_res.tg;
108 
109 		/*
110 		 * Only lock the top pipe's tg to prevent redundant
111 		 * (un)locking. Also skip if pipe is disabled.
112 		 */
113 		if (pipe_ctx->top_pipe ||
114 		    !pipe_ctx->stream ||
115 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
116 		    !tg->funcs->is_tg_enabled(tg))
117 			continue;
118 
119 		if (lock)
120 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
121 		else
122 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
123 	}
124 }
125 
126 static void log_mpc_crc(struct dc *dc,
127 	struct dc_log_buffer_ctx *log_ctx)
128 {
129 	struct dc_context *dc_ctx = dc->ctx;
130 	struct dce_hwseq *hws = dc->hwseq;
131 
132 	if (REG(MPC_CRC_RESULT_GB))
133 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
134 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
135 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
136 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
137 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
138 }
139 
140 static void dcn10_log_hubbub_state(struct dc *dc,
141 				   struct dc_log_buffer_ctx *log_ctx)
142 {
143 	struct dc_context *dc_ctx = dc->ctx;
144 	struct dcn_hubbub_wm wm;
145 	int i;
146 
147 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
148 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
149 
150 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
151 			"         sr_enter          sr_exit  dram_clk_change\n");
152 
153 	for (i = 0; i < 4; i++) {
154 		struct dcn_hubbub_wm_set *s;
155 
156 		s = &wm.sets[i];
157 		DTN_INFO("WM_Set[%d]:", s->wm_set);
158 		DTN_INFO_MICRO_SEC(s->data_urgent);
159 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
160 		DTN_INFO_MICRO_SEC(s->sr_enter);
161 		DTN_INFO_MICRO_SEC(s->sr_exit);
162 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
163 		DTN_INFO("\n");
164 	}
165 
166 	DTN_INFO("\n");
167 }
168 
169 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
170 {
171 	struct dc_context *dc_ctx = dc->ctx;
172 	struct resource_pool *pool = dc->res_pool;
173 	int i;
174 
175 	DTN_INFO(
176 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
177 	for (i = 0; i < pool->pipe_count; i++) {
178 		struct hubp *hubp = pool->hubps[i];
179 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
180 
181 		hubp->funcs->hubp_read_state(hubp);
182 
183 		if (!s->blank_en) {
184 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
185 					hubp->inst,
186 					s->pixel_format,
187 					s->inuse_addr_hi,
188 					s->viewport_width,
189 					s->viewport_height,
190 					s->rotation_angle,
191 					s->h_mirror_en,
192 					s->sw_mode,
193 					s->dcc_en,
194 					s->blank_en,
195 					s->clock_en,
196 					s->ttu_disable,
197 					s->underflow_status);
198 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
199 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
200 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
201 			DTN_INFO("\n");
202 		}
203 	}
204 
205 	DTN_INFO("\n=========RQ========\n");
206 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
207 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
208 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
209 	for (i = 0; i < pool->pipe_count; i++) {
210 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
211 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
212 
213 		if (!s->blank_en)
214 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
215 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
216 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
217 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
218 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
219 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
220 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
221 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
222 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
223 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
224 	}
225 
226 	DTN_INFO("========DLG========\n");
227 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
228 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
229 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
230 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
231 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
232 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
233 			"  x_rp_dlay  x_rr_sfl\n");
234 	for (i = 0; i < pool->pipe_count; i++) {
235 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
236 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
237 
238 		if (!s->blank_en)
239 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
240 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
241 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
242 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
243 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
244 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
245 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
246 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
247 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
248 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
249 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
250 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
251 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
252 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
253 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
254 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
255 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
256 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
257 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
258 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
259 				dlg_regs->xfc_reg_remote_surface_flip_latency);
260 	}
261 
262 	DTN_INFO("========TTU========\n");
263 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
264 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
265 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
266 	for (i = 0; i < pool->pipe_count; i++) {
267 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
268 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
269 
270 		if (!s->blank_en)
271 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
272 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
273 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
274 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
275 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
276 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
277 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
278 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
279 	}
280 	DTN_INFO("\n");
281 }
282 
283 void dcn10_log_hw_state(struct dc *dc,
284 	struct dc_log_buffer_ctx *log_ctx)
285 {
286 	struct dc_context *dc_ctx = dc->ctx;
287 	struct resource_pool *pool = dc->res_pool;
288 	int i;
289 
290 	DTN_INFO_BEGIN();
291 
292 	dcn10_log_hubbub_state(dc, log_ctx);
293 
294 	dcn10_log_hubp_states(dc, log_ctx);
295 
296 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
297 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
298 			"C31 C32   C33 C34\n");
299 	for (i = 0; i < pool->pipe_count; i++) {
300 		struct dpp *dpp = pool->dpps[i];
301 		struct dcn_dpp_state s = {0};
302 
303 		dpp->funcs->dpp_read_state(dpp, &s);
304 
305 		if (!s.is_enabled)
306 			continue;
307 
308 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
309 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
310 				dpp->inst,
311 				s.igam_input_format,
312 				(s.igam_lut_mode == 0) ? "BypassFixed" :
313 					((s.igam_lut_mode == 1) ? "BypassFloat" :
314 					((s.igam_lut_mode == 2) ? "RAM" :
315 					((s.igam_lut_mode == 3) ? "RAM" :
316 								 "Unknown"))),
317 				(s.dgam_lut_mode == 0) ? "Bypass" :
318 					((s.dgam_lut_mode == 1) ? "sRGB" :
319 					((s.dgam_lut_mode == 2) ? "Ycc" :
320 					((s.dgam_lut_mode == 3) ? "RAM" :
321 					((s.dgam_lut_mode == 4) ? "RAM" :
322 								 "Unknown")))),
323 				(s.rgam_lut_mode == 0) ? "Bypass" :
324 					((s.rgam_lut_mode == 1) ? "sRGB" :
325 					((s.rgam_lut_mode == 2) ? "Ycc" :
326 					((s.rgam_lut_mode == 3) ? "RAM" :
327 					((s.rgam_lut_mode == 4) ? "RAM" :
328 								 "Unknown")))),
329 				s.gamut_remap_mode,
330 				s.gamut_remap_c11_c12,
331 				s.gamut_remap_c13_c14,
332 				s.gamut_remap_c21_c22,
333 				s.gamut_remap_c23_c24,
334 				s.gamut_remap_c31_c32,
335 				s.gamut_remap_c33_c34);
336 		DTN_INFO("\n");
337 	}
338 	DTN_INFO("\n");
339 
340 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
341 	for (i = 0; i < pool->pipe_count; i++) {
342 		struct mpcc_state s = {0};
343 
344 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
345 		if (s.opp_id != 0xf)
346 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
347 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
348 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
349 				s.idle);
350 	}
351 	DTN_INFO("\n");
352 
353 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
354 
355 	for (i = 0; i < pool->timing_generator_count; i++) {
356 		struct timing_generator *tg = pool->timing_generators[i];
357 		struct dcn_otg_state s = {0};
358 		/* Read shared OTG state registers for all DCNx */
359 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
360 
361 		/*
362 		 * For DCN2 and greater, a register on the OPP is used to
363 		 * determine if the CRTC is blanked instead of the OTG. So use
364 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
365 		 *
366 		 * TODO: Implement DCN-specific read_otg_state hooks.
367 		 */
368 		if (pool->opps[i]->funcs->dpg_is_blanked)
369 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
370 		else
371 			s.blank_enabled = tg->funcs->is_blanked(tg);
372 
373 		//only print if OTG master is enabled
374 		if ((s.otg_enabled & 1) == 0)
375 			continue;
376 
377 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
378 				tg->inst,
379 				s.v_blank_start,
380 				s.v_blank_end,
381 				s.v_sync_a_start,
382 				s.v_sync_a_end,
383 				s.v_sync_a_pol,
384 				s.v_total_max,
385 				s.v_total_min,
386 				s.v_total_max_sel,
387 				s.v_total_min_sel,
388 				s.h_blank_start,
389 				s.h_blank_end,
390 				s.h_sync_a_start,
391 				s.h_sync_a_end,
392 				s.h_sync_a_pol,
393 				s.h_total,
394 				s.v_total,
395 				s.underflow_occurred_status,
396 				s.blank_enabled);
397 
398 		// Clear underflow for debug purposes
399 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
400 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
401 		// it from here without affecting the original intent.
402 		tg->funcs->clear_optc_underflow(tg);
403 	}
404 	DTN_INFO("\n");
405 
406 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
407 	// TODO: Update golden log header to reflect this name change
408 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
409 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
410 		struct display_stream_compressor *dsc = pool->dscs[i];
411 		struct dcn_dsc_state s = {0};
412 
413 		dsc->funcs->dsc_read_state(dsc, &s);
414 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
415 		dsc->inst,
416 			s.dsc_clock_en,
417 			s.dsc_slice_width,
418 			s.dsc_bits_per_pixel);
419 		DTN_INFO("\n");
420 	}
421 	DTN_INFO("\n");
422 
423 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
424 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
425 	for (i = 0; i < pool->stream_enc_count; i++) {
426 		struct stream_encoder *enc = pool->stream_enc[i];
427 		struct enc_state s = {0};
428 
429 		if (enc->funcs->enc_read_state) {
430 			enc->funcs->enc_read_state(enc, &s);
431 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
432 				enc->id,
433 				s.dsc_mode,
434 				s.sec_gsp_pps_line_num,
435 				s.vbid6_line_reference,
436 				s.vbid6_line_num,
437 				s.sec_gsp_pps_enable,
438 				s.sec_stream_enable);
439 			DTN_INFO("\n");
440 		}
441 	}
442 	DTN_INFO("\n");
443 
444 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
445 	for (i = 0; i < dc->link_count; i++) {
446 		struct link_encoder *lenc = dc->links[i]->link_enc;
447 
448 		struct link_enc_state s = {0};
449 
450 		if (lenc && lenc->funcs->read_state) {
451 			lenc->funcs->read_state(lenc, &s);
452 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
453 				i,
454 				s.dphy_fec_en,
455 				s.dphy_fec_ready_shadow,
456 				s.dphy_fec_active_status,
457 				s.dp_link_training_complete);
458 			DTN_INFO("\n");
459 		}
460 	}
461 	DTN_INFO("\n");
462 
463 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
464 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
465 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
466 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
467 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
468 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
469 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
470 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
471 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
472 
473 	log_mpc_crc(dc, log_ctx);
474 
475 	{
476 		if (pool->hpo_dp_stream_enc_count > 0) {
477 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
478 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
479 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
480 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
481 
482 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
483 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
484 
485 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
486 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
487 							hpo_dp_se_state.stream_enc_enabled,
488 							hpo_dp_se_state.otg_inst,
489 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
490 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
491 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
492 							(hpo_dp_se_state.component_depth == 0) ? 6 :
493 									((hpo_dp_se_state.component_depth == 1) ? 8 :
494 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
495 							hpo_dp_se_state.vid_stream_enabled,
496 							hpo_dp_se_state.sdp_enabled,
497 							hpo_dp_se_state.compressed_format,
498 							hpo_dp_se_state.mapped_to_link_enc);
499 				}
500 			}
501 
502 			DTN_INFO("\n");
503 		}
504 
505 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
506 		if (pool->hpo_dp_link_enc_count) {
507 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
508 
509 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
510 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
511 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
512 
513 				if (hpo_dp_link_enc->funcs->read_state) {
514 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
515 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
516 							hpo_dp_link_enc->inst,
517 							hpo_dp_le_state.link_enc_enabled,
518 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
519 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
520 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
521 							hpo_dp_le_state.lane_count,
522 							hpo_dp_le_state.stream_src[0],
523 							hpo_dp_le_state.slot_count[0],
524 							hpo_dp_le_state.vc_rate_x[0],
525 							hpo_dp_le_state.vc_rate_y[0]);
526 					DTN_INFO("\n");
527 				}
528 			}
529 
530 			DTN_INFO("\n");
531 		}
532 	}
533 
534 	DTN_INFO_END();
535 }
536 
537 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
538 {
539 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
540 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
541 
542 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
543 		tg->funcs->clear_optc_underflow(tg);
544 		return true;
545 	}
546 
547 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
548 		hubp->funcs->hubp_clear_underflow(hubp);
549 		return true;
550 	}
551 	return false;
552 }
553 
554 void dcn10_enable_power_gating_plane(
555 	struct dce_hwseq *hws,
556 	bool enable)
557 {
558 	bool force_on = true; /* disable power gating */
559 
560 	if (enable)
561 		force_on = false;
562 
563 	/* DCHUBP0/1/2/3 */
564 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
565 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
566 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
567 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
568 
569 	/* DPP0/1/2/3 */
570 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
571 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
572 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
573 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
574 }
575 
576 void dcn10_disable_vga(
577 	struct dce_hwseq *hws)
578 {
579 	unsigned int in_vga1_mode = 0;
580 	unsigned int in_vga2_mode = 0;
581 	unsigned int in_vga3_mode = 0;
582 	unsigned int in_vga4_mode = 0;
583 
584 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
585 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
586 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
587 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
588 
589 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
590 			in_vga3_mode == 0 && in_vga4_mode == 0)
591 		return;
592 
593 	REG_WRITE(D1VGA_CONTROL, 0);
594 	REG_WRITE(D2VGA_CONTROL, 0);
595 	REG_WRITE(D3VGA_CONTROL, 0);
596 	REG_WRITE(D4VGA_CONTROL, 0);
597 
598 	/* HW Engineer's Notes:
599 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
600 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
601 	 *
602 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
603 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
604 	 */
605 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
606 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
607 }
608 
609 /**
610  * dcn10_dpp_pg_control - DPP power gate control.
611  *
612  * @hws: dce_hwseq reference.
613  * @dpp_inst: DPP instance reference.
614  * @power_on: true if we want to enable power gate, false otherwise.
615  *
616  * Enable or disable power gate in the specific DPP instance.
617  */
618 void dcn10_dpp_pg_control(
619 		struct dce_hwseq *hws,
620 		unsigned int dpp_inst,
621 		bool power_on)
622 {
623 	uint32_t power_gate = power_on ? 0 : 1;
624 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
625 
626 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
627 		return;
628 	if (REG(DOMAIN1_PG_CONFIG) == 0)
629 		return;
630 
631 	switch (dpp_inst) {
632 	case 0: /* DPP0 */
633 		REG_UPDATE(DOMAIN1_PG_CONFIG,
634 				DOMAIN1_POWER_GATE, power_gate);
635 
636 		REG_WAIT(DOMAIN1_PG_STATUS,
637 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
638 				1, 1000);
639 		break;
640 	case 1: /* DPP1 */
641 		REG_UPDATE(DOMAIN3_PG_CONFIG,
642 				DOMAIN3_POWER_GATE, power_gate);
643 
644 		REG_WAIT(DOMAIN3_PG_STATUS,
645 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
646 				1, 1000);
647 		break;
648 	case 2: /* DPP2 */
649 		REG_UPDATE(DOMAIN5_PG_CONFIG,
650 				DOMAIN5_POWER_GATE, power_gate);
651 
652 		REG_WAIT(DOMAIN5_PG_STATUS,
653 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
654 				1, 1000);
655 		break;
656 	case 3: /* DPP3 */
657 		REG_UPDATE(DOMAIN7_PG_CONFIG,
658 				DOMAIN7_POWER_GATE, power_gate);
659 
660 		REG_WAIT(DOMAIN7_PG_STATUS,
661 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
662 				1, 1000);
663 		break;
664 	default:
665 		BREAK_TO_DEBUGGER();
666 		break;
667 	}
668 }
669 
670 /**
671  * dcn10_hubp_pg_control - HUBP power gate control.
672  *
673  * @hws: dce_hwseq reference.
674  * @hubp_inst: DPP instance reference.
675  * @power_on: true if we want to enable power gate, false otherwise.
676  *
677  * Enable or disable power gate in the specific HUBP instance.
678  */
679 void dcn10_hubp_pg_control(
680 		struct dce_hwseq *hws,
681 		unsigned int hubp_inst,
682 		bool power_on)
683 {
684 	uint32_t power_gate = power_on ? 0 : 1;
685 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
686 
687 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
688 		return;
689 	if (REG(DOMAIN0_PG_CONFIG) == 0)
690 		return;
691 
692 	switch (hubp_inst) {
693 	case 0: /* DCHUBP0 */
694 		REG_UPDATE(DOMAIN0_PG_CONFIG,
695 				DOMAIN0_POWER_GATE, power_gate);
696 
697 		REG_WAIT(DOMAIN0_PG_STATUS,
698 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
699 				1, 1000);
700 		break;
701 	case 1: /* DCHUBP1 */
702 		REG_UPDATE(DOMAIN2_PG_CONFIG,
703 				DOMAIN2_POWER_GATE, power_gate);
704 
705 		REG_WAIT(DOMAIN2_PG_STATUS,
706 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
707 				1, 1000);
708 		break;
709 	case 2: /* DCHUBP2 */
710 		REG_UPDATE(DOMAIN4_PG_CONFIG,
711 				DOMAIN4_POWER_GATE, power_gate);
712 
713 		REG_WAIT(DOMAIN4_PG_STATUS,
714 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
715 				1, 1000);
716 		break;
717 	case 3: /* DCHUBP3 */
718 		REG_UPDATE(DOMAIN6_PG_CONFIG,
719 				DOMAIN6_POWER_GATE, power_gate);
720 
721 		REG_WAIT(DOMAIN6_PG_STATUS,
722 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
723 				1, 1000);
724 		break;
725 	default:
726 		BREAK_TO_DEBUGGER();
727 		break;
728 	}
729 }
730 
731 static void power_on_plane(
732 	struct dce_hwseq *hws,
733 	int plane_id)
734 {
735 	DC_LOGGER_INIT(hws->ctx->logger);
736 	if (REG(DC_IP_REQUEST_CNTL)) {
737 		REG_SET(DC_IP_REQUEST_CNTL, 0,
738 				IP_REQUEST_EN, 1);
739 
740 		if (hws->funcs.dpp_pg_control)
741 			hws->funcs.dpp_pg_control(hws, plane_id, true);
742 
743 		if (hws->funcs.hubp_pg_control)
744 			hws->funcs.hubp_pg_control(hws, plane_id, true);
745 
746 		REG_SET(DC_IP_REQUEST_CNTL, 0,
747 				IP_REQUEST_EN, 0);
748 		DC_LOG_DEBUG(
749 				"Un-gated front end for pipe %d\n", plane_id);
750 	}
751 }
752 
753 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
754 {
755 	struct dce_hwseq *hws = dc->hwseq;
756 	struct hubp *hubp = dc->res_pool->hubps[0];
757 
758 	if (!hws->wa_state.DEGVIDCN10_253_applied)
759 		return;
760 
761 	hubp->funcs->set_blank(hubp, true);
762 
763 	REG_SET(DC_IP_REQUEST_CNTL, 0,
764 			IP_REQUEST_EN, 1);
765 
766 	hws->funcs.hubp_pg_control(hws, 0, false);
767 	REG_SET(DC_IP_REQUEST_CNTL, 0,
768 			IP_REQUEST_EN, 0);
769 
770 	hws->wa_state.DEGVIDCN10_253_applied = false;
771 }
772 
773 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
774 {
775 	struct dce_hwseq *hws = dc->hwseq;
776 	struct hubp *hubp = dc->res_pool->hubps[0];
777 	int i;
778 
779 	if (dc->debug.disable_stutter)
780 		return;
781 
782 	if (!hws->wa.DEGVIDCN10_253)
783 		return;
784 
785 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
786 		if (!dc->res_pool->hubps[i]->power_gated)
787 			return;
788 	}
789 
790 	/* all pipe power gated, apply work around to enable stutter. */
791 
792 	REG_SET(DC_IP_REQUEST_CNTL, 0,
793 			IP_REQUEST_EN, 1);
794 
795 	hws->funcs.hubp_pg_control(hws, 0, true);
796 	REG_SET(DC_IP_REQUEST_CNTL, 0,
797 			IP_REQUEST_EN, 0);
798 
799 	hubp->funcs->set_hubp_blank_en(hubp, false);
800 	hws->wa_state.DEGVIDCN10_253_applied = true;
801 }
802 
803 void dcn10_bios_golden_init(struct dc *dc)
804 {
805 	struct dce_hwseq *hws = dc->hwseq;
806 	struct dc_bios *bp = dc->ctx->dc_bios;
807 	int i;
808 	bool allow_self_fresh_force_enable = true;
809 
810 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
811 		return;
812 
813 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
814 		allow_self_fresh_force_enable =
815 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
816 
817 
818 	/* WA for making DF sleep when idle after resume from S0i3.
819 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
820 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
821 	 * before calling command table and it changed to 1 after,
822 	 * it should be set back to 0.
823 	 */
824 
825 	/* initialize dcn global */
826 	bp->funcs->enable_disp_power_gating(bp,
827 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
828 
829 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
830 		/* initialize dcn per pipe */
831 		bp->funcs->enable_disp_power_gating(bp,
832 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
833 	}
834 
835 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
836 		if (allow_self_fresh_force_enable == false &&
837 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
838 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
839 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
840 
841 }
842 
843 static void false_optc_underflow_wa(
844 		struct dc *dc,
845 		const struct dc_stream_state *stream,
846 		struct timing_generator *tg)
847 {
848 	int i;
849 	bool underflow;
850 
851 	if (!dc->hwseq->wa.false_optc_underflow)
852 		return;
853 
854 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
855 
856 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
857 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
858 
859 		if (old_pipe_ctx->stream != stream)
860 			continue;
861 
862 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
863 	}
864 
865 	if (tg->funcs->set_blank_data_double_buffer)
866 		tg->funcs->set_blank_data_double_buffer(tg, true);
867 
868 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
869 		tg->funcs->clear_optc_underflow(tg);
870 }
871 
872 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
873 {
874 	struct pipe_ctx *other_pipe;
875 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
876 
877 	/* Always use the largest vready_offset of all connected pipes */
878 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
879 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
880 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
881 	}
882 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
883 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
884 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
885 	}
886 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
887 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
888 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
889 	}
890 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
891 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
892 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
893 	}
894 
895 	return vready_offset;
896 }
897 
898 enum dc_status dcn10_enable_stream_timing(
899 		struct pipe_ctx *pipe_ctx,
900 		struct dc_state *context,
901 		struct dc *dc)
902 {
903 	struct dc_stream_state *stream = pipe_ctx->stream;
904 	enum dc_color_space color_space;
905 	struct tg_color black_color = {0};
906 
907 	/* by upper caller loop, pipe0 is parent pipe and be called first.
908 	 * back end is set up by for pipe0. Other children pipe share back end
909 	 * with pipe 0. No program is needed.
910 	 */
911 	if (pipe_ctx->top_pipe != NULL)
912 		return DC_OK;
913 
914 	/* TODO check if timing_changed, disable stream if timing changed */
915 
916 	/* HW program guide assume display already disable
917 	 * by unplug sequence. OTG assume stop.
918 	 */
919 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
920 
921 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
922 			pipe_ctx->clock_source,
923 			&pipe_ctx->stream_res.pix_clk_params,
924 			dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
925 			&pipe_ctx->pll_settings)) {
926 		BREAK_TO_DEBUGGER();
927 		return DC_ERROR_UNEXPECTED;
928 	}
929 
930 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
931 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
932 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
933 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
934 		else
935 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
936 	}
937 
938 	pipe_ctx->stream_res.tg->funcs->program_timing(
939 			pipe_ctx->stream_res.tg,
940 			&stream->timing,
941 			calculate_vready_offset_for_group(pipe_ctx),
942 			pipe_ctx->pipe_dlg_param.vstartup_start,
943 			pipe_ctx->pipe_dlg_param.vupdate_offset,
944 			pipe_ctx->pipe_dlg_param.vupdate_width,
945 			pipe_ctx->stream->signal,
946 			true);
947 
948 #if 0 /* move to after enable_crtc */
949 	/* TODO: OPP FMT, ABM. etc. should be done here. */
950 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
951 
952 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
953 
954 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
955 				pipe_ctx->stream_res.opp,
956 				&stream->bit_depth_params,
957 				&stream->clamping);
958 #endif
959 	/* program otg blank color */
960 	color_space = stream->output_color_space;
961 	color_space_to_black_color(dc, color_space, &black_color);
962 
963 	/*
964 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
965 	 * alternate between Cb and Cr, so both channels need the pixel
966 	 * value for Y
967 	 */
968 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
969 		black_color.color_r_cr = black_color.color_g_y;
970 
971 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
972 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
973 				pipe_ctx->stream_res.tg,
974 				&black_color);
975 
976 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
977 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
978 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
979 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
980 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
981 	}
982 
983 	/* VTG is  within DCHUB command block. DCFCLK is always on */
984 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
985 		BREAK_TO_DEBUGGER();
986 		return DC_ERROR_UNEXPECTED;
987 	}
988 
989 	/* TODO program crtc source select for non-virtual signal*/
990 	/* TODO program FMT */
991 	/* TODO setup link_enc */
992 	/* TODO set stream attributes */
993 	/* TODO program audio */
994 	/* TODO enable stream if timing changed */
995 	/* TODO unblank stream if DP */
996 
997 	return DC_OK;
998 }
999 
1000 static void dcn10_reset_back_end_for_pipe(
1001 		struct dc *dc,
1002 		struct pipe_ctx *pipe_ctx,
1003 		struct dc_state *context)
1004 {
1005 	int i;
1006 	struct dc_link *link;
1007 	DC_LOGGER_INIT(dc->ctx->logger);
1008 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1009 		pipe_ctx->stream = NULL;
1010 		return;
1011 	}
1012 
1013 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1014 		link = pipe_ctx->stream->link;
1015 		/* DPMS may already disable or */
1016 		/* dpms_off status is incorrect due to fastboot
1017 		 * feature. When system resume from S4 with second
1018 		 * screen only, the dpms_off would be true but
1019 		 * VBIOS lit up eDP, so check link status too.
1020 		 */
1021 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1022 			core_link_disable_stream(pipe_ctx);
1023 		else if (pipe_ctx->stream_res.audio)
1024 			dc->hwss.disable_audio_stream(pipe_ctx);
1025 
1026 		if (pipe_ctx->stream_res.audio) {
1027 			/*disable az_endpoint*/
1028 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1029 
1030 			/*free audio*/
1031 			if (dc->caps.dynamic_audio == true) {
1032 				/*we have to dynamic arbitrate the audio endpoints*/
1033 				/*we free the resource, need reset is_audio_acquired*/
1034 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1035 						pipe_ctx->stream_res.audio, false);
1036 				pipe_ctx->stream_res.audio = NULL;
1037 			}
1038 		}
1039 	}
1040 
1041 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1042 	 * back end share by all pipes and will be disable only when disable
1043 	 * parent pipe.
1044 	 */
1045 	if (pipe_ctx->top_pipe == NULL) {
1046 
1047 		if (pipe_ctx->stream_res.abm)
1048 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1049 
1050 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1051 
1052 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1053 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1054 			pipe_ctx->stream_res.tg->funcs->set_drr(
1055 					pipe_ctx->stream_res.tg, NULL);
1056 		pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1057 	}
1058 
1059 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1060 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1061 			break;
1062 
1063 	if (i == dc->res_pool->pipe_count)
1064 		return;
1065 
1066 	pipe_ctx->stream = NULL;
1067 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1068 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1069 }
1070 
1071 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1072 {
1073 	struct hubp *hubp ;
1074 	unsigned int i;
1075 	bool need_recover = true;
1076 
1077 	if (!dc->debug.recovery_enabled)
1078 		return false;
1079 
1080 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1081 		struct pipe_ctx *pipe_ctx =
1082 			&dc->current_state->res_ctx.pipe_ctx[i];
1083 		if (pipe_ctx != NULL) {
1084 			hubp = pipe_ctx->plane_res.hubp;
1085 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1086 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1087 					/* one pipe underflow, we will reset all the pipes*/
1088 					need_recover = true;
1089 				}
1090 			}
1091 		}
1092 	}
1093 	if (!need_recover)
1094 		return false;
1095 	/*
1096 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1097 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1098 	DCHUBP_CNTL:HUBP_DISABLE=1
1099 	DCHUBP_CNTL:HUBP_DISABLE=0
1100 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1101 	DCSURF_PRIMARY_SURFACE_ADDRESS
1102 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1103 	*/
1104 
1105 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1106 		struct pipe_ctx *pipe_ctx =
1107 			&dc->current_state->res_ctx.pipe_ctx[i];
1108 		if (pipe_ctx != NULL) {
1109 			hubp = pipe_ctx->plane_res.hubp;
1110 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1111 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1112 				hubp->funcs->set_hubp_blank_en(hubp, true);
1113 		}
1114 	}
1115 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1116 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1117 
1118 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1119 		struct pipe_ctx *pipe_ctx =
1120 			&dc->current_state->res_ctx.pipe_ctx[i];
1121 		if (pipe_ctx != NULL) {
1122 			hubp = pipe_ctx->plane_res.hubp;
1123 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1124 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1125 				hubp->funcs->hubp_disable_control(hubp, true);
1126 		}
1127 	}
1128 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1129 		struct pipe_ctx *pipe_ctx =
1130 			&dc->current_state->res_ctx.pipe_ctx[i];
1131 		if (pipe_ctx != NULL) {
1132 			hubp = pipe_ctx->plane_res.hubp;
1133 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1134 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1135 				hubp->funcs->hubp_disable_control(hubp, true);
1136 		}
1137 	}
1138 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1139 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1140 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1141 		struct pipe_ctx *pipe_ctx =
1142 			&dc->current_state->res_ctx.pipe_ctx[i];
1143 		if (pipe_ctx != NULL) {
1144 			hubp = pipe_ctx->plane_res.hubp;
1145 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1146 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1147 				hubp->funcs->set_hubp_blank_en(hubp, true);
1148 		}
1149 	}
1150 	return true;
1151 
1152 }
1153 
1154 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1155 {
1156 	struct hubbub *hubbub = dc->res_pool->hubbub;
1157 	static bool should_log_hw_state; /* prevent hw state log by default */
1158 
1159 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1160 		return;
1161 
1162 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1163 		int i = 0;
1164 
1165 		if (should_log_hw_state)
1166 			dcn10_log_hw_state(dc, NULL);
1167 
1168 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1169 		BREAK_TO_DEBUGGER();
1170 		if (dcn10_hw_wa_force_recovery(dc)) {
1171 			/*check again*/
1172 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1173 				BREAK_TO_DEBUGGER();
1174 		}
1175 	}
1176 }
1177 
1178 /* trigger HW to start disconnect plane from stream on the next vsync */
1179 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1180 {
1181 	struct dce_hwseq *hws = dc->hwseq;
1182 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1183 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1184 	struct mpc *mpc = dc->res_pool->mpc;
1185 	struct mpc_tree *mpc_tree_params;
1186 	struct mpcc *mpcc_to_remove = NULL;
1187 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1188 
1189 	mpc_tree_params = &(opp->mpc_tree_params);
1190 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1191 
1192 	/*Already reset*/
1193 	if (mpcc_to_remove == NULL)
1194 		return;
1195 
1196 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1197 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1198 	// so don't wait for MPCC_IDLE in the programming sequence
1199 	if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1200 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1201 
1202 	dc->optimized_required = true;
1203 
1204 	if (hubp->funcs->hubp_disconnect)
1205 		hubp->funcs->hubp_disconnect(hubp);
1206 
1207 	if (dc->debug.sanity_checks)
1208 		hws->funcs.verify_allow_pstate_change_high(dc);
1209 }
1210 
1211 /**
1212  * dcn10_plane_atomic_power_down - Power down plane components.
1213  *
1214  * @dc: dc struct reference. used for grab hwseq.
1215  * @dpp: dpp struct reference.
1216  * @hubp: hubp struct reference.
1217  *
1218  * Keep in mind that this operation requires a power gate configuration;
1219  * however, requests for switch power gate are precisely controlled to avoid
1220  * problems. For this reason, power gate request is usually disabled. This
1221  * function first needs to enable the power gate request before disabling DPP
1222  * and HUBP. Finally, it disables the power gate request again.
1223  */
1224 void dcn10_plane_atomic_power_down(struct dc *dc,
1225 		struct dpp *dpp,
1226 		struct hubp *hubp)
1227 {
1228 	struct dce_hwseq *hws = dc->hwseq;
1229 	DC_LOGGER_INIT(dc->ctx->logger);
1230 
1231 	if (REG(DC_IP_REQUEST_CNTL)) {
1232 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1233 				IP_REQUEST_EN, 1);
1234 
1235 		if (hws->funcs.dpp_pg_control)
1236 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1237 
1238 		if (hws->funcs.hubp_pg_control)
1239 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1240 
1241 		dpp->funcs->dpp_reset(dpp);
1242 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1243 				IP_REQUEST_EN, 0);
1244 		DC_LOG_DEBUG(
1245 				"Power gated front end %d\n", hubp->inst);
1246 	}
1247 }
1248 
1249 /* disable HW used by plane.
1250  * note:  cannot disable until disconnect is complete
1251  */
1252 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1253 {
1254 	struct dce_hwseq *hws = dc->hwseq;
1255 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1256 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1257 	int opp_id = hubp->opp_id;
1258 
1259 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1260 
1261 	hubp->funcs->hubp_clk_cntl(hubp, false);
1262 
1263 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1264 
1265 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1266 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1267 				pipe_ctx->stream_res.opp,
1268 				false);
1269 
1270 	hubp->power_gated = true;
1271 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1272 
1273 	hws->funcs.plane_atomic_power_down(dc,
1274 			pipe_ctx->plane_res.dpp,
1275 			pipe_ctx->plane_res.hubp);
1276 
1277 	pipe_ctx->stream = NULL;
1278 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1279 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1280 	pipe_ctx->top_pipe = NULL;
1281 	pipe_ctx->bottom_pipe = NULL;
1282 	pipe_ctx->plane_state = NULL;
1283 }
1284 
1285 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1286 {
1287 	struct dce_hwseq *hws = dc->hwseq;
1288 	DC_LOGGER_INIT(dc->ctx->logger);
1289 
1290 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1291 		return;
1292 
1293 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1294 
1295 	apply_DEGVIDCN10_253_wa(dc);
1296 
1297 	DC_LOG_DC("Power down front end %d\n",
1298 					pipe_ctx->pipe_idx);
1299 }
1300 
1301 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1302 {
1303 	int i;
1304 	struct dce_hwseq *hws = dc->hwseq;
1305 	struct hubbub *hubbub = dc->res_pool->hubbub;
1306 	bool can_apply_seamless_boot = false;
1307 
1308 	for (i = 0; i < context->stream_count; i++) {
1309 		if (context->streams[i]->apply_seamless_boot_optimization) {
1310 			can_apply_seamless_boot = true;
1311 			break;
1312 		}
1313 	}
1314 
1315 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1316 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1317 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1318 
1319 		/* There is assumption that pipe_ctx is not mapping irregularly
1320 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1321 		 * we will use the pipe, so don't disable
1322 		 */
1323 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1324 			continue;
1325 
1326 		/* Blank controller using driver code instead of
1327 		 * command table.
1328 		 */
1329 		if (tg->funcs->is_tg_enabled(tg)) {
1330 			if (hws->funcs.init_blank != NULL) {
1331 				hws->funcs.init_blank(dc, tg);
1332 				tg->funcs->lock(tg);
1333 			} else {
1334 				tg->funcs->lock(tg);
1335 				tg->funcs->set_blank(tg, true);
1336 				hwss_wait_for_blank_complete(tg);
1337 			}
1338 		}
1339 	}
1340 
1341 	/* Reset det size */
1342 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1343 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1344 		struct hubp *hubp = dc->res_pool->hubps[i];
1345 
1346 		/* Do not need to reset for seamless boot */
1347 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1348 			continue;
1349 
1350 		if (hubbub && hubp) {
1351 			if (hubbub->funcs->program_det_size)
1352 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1353 		}
1354 	}
1355 
1356 	/* num_opp will be equal to number of mpcc */
1357 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1358 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1359 
1360 		/* Cannot reset the MPC mux if seamless boot */
1361 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1362 			continue;
1363 
1364 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1365 				dc->res_pool->mpc, i);
1366 	}
1367 
1368 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1369 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1370 		struct hubp *hubp = dc->res_pool->hubps[i];
1371 		struct dpp *dpp = dc->res_pool->dpps[i];
1372 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1373 
1374 		/* There is assumption that pipe_ctx is not mapping irregularly
1375 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1376 		 * we will use the pipe, so don't disable
1377 		 */
1378 		if (can_apply_seamless_boot &&
1379 			pipe_ctx->stream != NULL &&
1380 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1381 				pipe_ctx->stream_res.tg)) {
1382 			// Enable double buffering for OTG_BLANK no matter if
1383 			// seamless boot is enabled or not to suppress global sync
1384 			// signals when OTG blanked. This is to prevent pipe from
1385 			// requesting data while in PSR.
1386 			tg->funcs->tg_init(tg);
1387 			hubp->power_gated = true;
1388 			continue;
1389 		}
1390 
1391 		/* Disable on the current state so the new one isn't cleared. */
1392 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1393 
1394 		dpp->funcs->dpp_reset(dpp);
1395 
1396 		pipe_ctx->stream_res.tg = tg;
1397 		pipe_ctx->pipe_idx = i;
1398 
1399 		pipe_ctx->plane_res.hubp = hubp;
1400 		pipe_ctx->plane_res.dpp = dpp;
1401 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1402 		hubp->mpcc_id = dpp->inst;
1403 		hubp->opp_id = OPP_ID_INVALID;
1404 		hubp->power_gated = false;
1405 
1406 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1407 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1408 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1409 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1410 
1411 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1412 
1413 		if (tg->funcs->is_tg_enabled(tg))
1414 			tg->funcs->unlock(tg);
1415 
1416 		dc->hwss.disable_plane(dc, pipe_ctx);
1417 
1418 		pipe_ctx->stream_res.tg = NULL;
1419 		pipe_ctx->plane_res.hubp = NULL;
1420 
1421 		if (tg->funcs->is_tg_enabled(tg)) {
1422 			if (tg->funcs->init_odm)
1423 				tg->funcs->init_odm(tg);
1424 		}
1425 
1426 		tg->funcs->tg_init(tg);
1427 	}
1428 
1429 	/* Power gate DSCs */
1430 	if (hws->funcs.dsc_pg_control != NULL) {
1431 		uint32_t num_opps = 0;
1432 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1433 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1434 
1435 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1436 		// We can't use res_pool->res_cap->num_timing_generator to check
1437 		// Because it records display pipes default setting built in driver,
1438 		// not display pipes of the current chip.
1439 		// Some ASICs would be fused display pipes less than the default setting.
1440 		// In dcnxx_resource_construct function, driver would obatin real information.
1441 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1442 			uint32_t optc_dsc_state = 0;
1443 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1444 
1445 			if (tg->funcs->is_tg_enabled(tg)) {
1446 				if (tg->funcs->get_dsc_status)
1447 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1448 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1449 				// non-zero value is DSC enabled
1450 				if (optc_dsc_state != 0) {
1451 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1452 					break;
1453 				}
1454 			}
1455 		}
1456 
1457 		// Step 2: To power down DSC but skip DSC  of running OPTC
1458 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1459 			struct dcn_dsc_state s  = {0};
1460 
1461 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1462 
1463 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1464 				s.dsc_clock_en && s.dsc_fw_en)
1465 				continue;
1466 
1467 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1468 		}
1469 	}
1470 }
1471 
1472 void dcn10_init_hw(struct dc *dc)
1473 {
1474 	int i;
1475 	struct abm *abm = dc->res_pool->abm;
1476 	struct dmcu *dmcu = dc->res_pool->dmcu;
1477 	struct dce_hwseq *hws = dc->hwseq;
1478 	struct dc_bios *dcb = dc->ctx->dc_bios;
1479 	struct resource_pool *res_pool = dc->res_pool;
1480 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1481 	bool   is_optimized_init_done = false;
1482 
1483 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1484 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1485 
1486 	/* Align bw context with hw config when system resume. */
1487 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1488 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1489 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1490 	}
1491 
1492 	// Initialize the dccg
1493 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1494 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1495 
1496 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1497 
1498 		REG_WRITE(REFCLK_CNTL, 0);
1499 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1500 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1501 
1502 		if (!dc->debug.disable_clock_gate) {
1503 			/* enable all DCN clock gating */
1504 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1505 
1506 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1507 
1508 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1509 		}
1510 
1511 		//Enable ability to power gate / don't force power on permanently
1512 		if (hws->funcs.enable_power_gating_plane)
1513 			hws->funcs.enable_power_gating_plane(hws, true);
1514 
1515 		return;
1516 	}
1517 
1518 	if (!dcb->funcs->is_accelerated_mode(dcb))
1519 		hws->funcs.disable_vga(dc->hwseq);
1520 
1521 	hws->funcs.bios_golden_init(dc);
1522 
1523 	if (dc->ctx->dc_bios->fw_info_valid) {
1524 		res_pool->ref_clocks.xtalin_clock_inKhz =
1525 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1526 
1527 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1528 			if (res_pool->dccg && res_pool->hubbub) {
1529 
1530 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1531 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1532 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1533 
1534 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1535 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1536 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1537 			} else {
1538 				// Not all ASICs have DCCG sw component
1539 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1540 						res_pool->ref_clocks.xtalin_clock_inKhz;
1541 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1542 						res_pool->ref_clocks.xtalin_clock_inKhz;
1543 			}
1544 		}
1545 	} else
1546 		ASSERT_CRITICAL(false);
1547 
1548 	for (i = 0; i < dc->link_count; i++) {
1549 		/* Power up AND update implementation according to the
1550 		 * required signal (which may be different from the
1551 		 * default signal on connector).
1552 		 */
1553 		struct dc_link *link = dc->links[i];
1554 
1555 		if (!is_optimized_init_done)
1556 			link->link_enc->funcs->hw_init(link->link_enc);
1557 
1558 		/* Check for enabled DIG to identify enabled display */
1559 		if (link->link_enc->funcs->is_dig_enabled &&
1560 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1561 			link->link_status.link_active = true;
1562 			if (link->link_enc->funcs->fec_is_active &&
1563 					link->link_enc->funcs->fec_is_active(link->link_enc))
1564 				link->fec_state = dc_link_fec_enabled;
1565 		}
1566 	}
1567 
1568 	/* we want to turn off all dp displays before doing detection */
1569 	dc_link_blank_all_dp_displays(dc);
1570 
1571 	if (hws->funcs.enable_power_gating_plane)
1572 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1573 
1574 	/* If taking control over from VBIOS, we may want to optimize our first
1575 	 * mode set, so we need to skip powering down pipes until we know which
1576 	 * pipes we want to use.
1577 	 * Otherwise, if taking control is not possible, we need to power
1578 	 * everything down.
1579 	 */
1580 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1581 		if (!is_optimized_init_done) {
1582 			hws->funcs.init_pipes(dc, dc->current_state);
1583 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1584 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1585 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1586 		}
1587 	}
1588 
1589 	if (!is_optimized_init_done) {
1590 
1591 		for (i = 0; i < res_pool->audio_count; i++) {
1592 			struct audio *audio = res_pool->audios[i];
1593 
1594 			audio->funcs->hw_init(audio);
1595 		}
1596 
1597 		for (i = 0; i < dc->link_count; i++) {
1598 			struct dc_link *link = dc->links[i];
1599 
1600 			if (link->panel_cntl)
1601 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1602 		}
1603 
1604 		if (abm != NULL)
1605 			abm->funcs->abm_init(abm, backlight);
1606 
1607 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1608 			dmcu->funcs->dmcu_init(dmcu);
1609 	}
1610 
1611 	if (abm != NULL && dmcu != NULL)
1612 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1613 
1614 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1615 	if (!is_optimized_init_done)
1616 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1617 
1618 	if (!dc->debug.disable_clock_gate) {
1619 		/* enable all DCN clock gating */
1620 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1621 
1622 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1623 
1624 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1625 	}
1626 
1627 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1628 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1629 }
1630 
1631 /* In headless boot cases, DIG may be turned
1632  * on which causes HW/SW discrepancies.
1633  * To avoid this, power down hardware on boot
1634  * if DIG is turned on
1635  */
1636 void dcn10_power_down_on_boot(struct dc *dc)
1637 {
1638 	struct dc_link *edp_links[MAX_NUM_EDP];
1639 	struct dc_link *edp_link = NULL;
1640 	int edp_num;
1641 	int i = 0;
1642 
1643 	get_edp_links(dc, edp_links, &edp_num);
1644 	if (edp_num)
1645 		edp_link = edp_links[0];
1646 
1647 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1648 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1649 			dc->hwseq->funcs.edp_backlight_control &&
1650 			dc->hwss.power_down &&
1651 			dc->hwss.edp_power_control) {
1652 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1653 		dc->hwss.power_down(dc);
1654 		dc->hwss.edp_power_control(edp_link, false);
1655 	} else {
1656 		for (i = 0; i < dc->link_count; i++) {
1657 			struct dc_link *link = dc->links[i];
1658 
1659 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1660 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1661 					dc->hwss.power_down) {
1662 				dc->hwss.power_down(dc);
1663 				break;
1664 			}
1665 
1666 		}
1667 	}
1668 
1669 	/*
1670 	 * Call update_clocks with empty context
1671 	 * to send DISPLAY_OFF
1672 	 * Otherwise DISPLAY_OFF may not be asserted
1673 	 */
1674 	if (dc->clk_mgr->funcs->set_low_power_state)
1675 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1676 }
1677 
1678 void dcn10_reset_hw_ctx_wrap(
1679 		struct dc *dc,
1680 		struct dc_state *context)
1681 {
1682 	int i;
1683 	struct dce_hwseq *hws = dc->hwseq;
1684 
1685 	/* Reset Back End*/
1686 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1687 		struct pipe_ctx *pipe_ctx_old =
1688 			&dc->current_state->res_ctx.pipe_ctx[i];
1689 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1690 
1691 		if (!pipe_ctx_old->stream)
1692 			continue;
1693 
1694 		if (pipe_ctx_old->top_pipe)
1695 			continue;
1696 
1697 		if (!pipe_ctx->stream ||
1698 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1699 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1700 
1701 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1702 			if (hws->funcs.enable_stream_gating)
1703 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1704 			if (old_clk)
1705 				old_clk->funcs->cs_power_down(old_clk);
1706 		}
1707 	}
1708 }
1709 
1710 static bool patch_address_for_sbs_tb_stereo(
1711 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1712 {
1713 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1714 	bool sec_split = pipe_ctx->top_pipe &&
1715 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1716 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1717 		(pipe_ctx->stream->timing.timing_3d_format ==
1718 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1719 		 pipe_ctx->stream->timing.timing_3d_format ==
1720 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1721 		*addr = plane_state->address.grph_stereo.left_addr;
1722 		plane_state->address.grph_stereo.left_addr =
1723 		plane_state->address.grph_stereo.right_addr;
1724 		return true;
1725 	} else {
1726 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1727 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1728 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1729 			plane_state->address.grph_stereo.right_addr =
1730 			plane_state->address.grph_stereo.left_addr;
1731 			plane_state->address.grph_stereo.right_meta_addr =
1732 			plane_state->address.grph_stereo.left_meta_addr;
1733 		}
1734 	}
1735 	return false;
1736 }
1737 
1738 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1739 {
1740 	bool addr_patched = false;
1741 	PHYSICAL_ADDRESS_LOC addr;
1742 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1743 
1744 	if (plane_state == NULL)
1745 		return;
1746 
1747 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1748 
1749 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1750 			pipe_ctx->plane_res.hubp,
1751 			&plane_state->address,
1752 			plane_state->flip_immediate);
1753 
1754 	plane_state->status.requested_address = plane_state->address;
1755 
1756 	if (plane_state->flip_immediate)
1757 		plane_state->status.current_address = plane_state->address;
1758 
1759 	if (addr_patched)
1760 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1761 }
1762 
1763 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1764 			const struct dc_plane_state *plane_state)
1765 {
1766 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1767 	const struct dc_transfer_func *tf = NULL;
1768 	bool result = true;
1769 
1770 	if (dpp_base == NULL)
1771 		return false;
1772 
1773 	if (plane_state->in_transfer_func)
1774 		tf = plane_state->in_transfer_func;
1775 
1776 	if (plane_state->gamma_correction &&
1777 		!dpp_base->ctx->dc->debug.always_use_regamma
1778 		&& !plane_state->gamma_correction->is_identity
1779 			&& dce_use_lut(plane_state->format))
1780 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1781 
1782 	if (tf == NULL)
1783 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1784 	else if (tf->type == TF_TYPE_PREDEFINED) {
1785 		switch (tf->tf) {
1786 		case TRANSFER_FUNCTION_SRGB:
1787 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1788 			break;
1789 		case TRANSFER_FUNCTION_BT709:
1790 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1791 			break;
1792 		case TRANSFER_FUNCTION_LINEAR:
1793 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1794 			break;
1795 		case TRANSFER_FUNCTION_PQ:
1796 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1797 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1798 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1799 			result = true;
1800 			break;
1801 		default:
1802 			result = false;
1803 			break;
1804 		}
1805 	} else if (tf->type == TF_TYPE_BYPASS) {
1806 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1807 	} else {
1808 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1809 					&dpp_base->degamma_params);
1810 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1811 				&dpp_base->degamma_params);
1812 		result = true;
1813 	}
1814 
1815 	return result;
1816 }
1817 
1818 #define MAX_NUM_HW_POINTS 0x200
1819 
1820 static void log_tf(struct dc_context *ctx,
1821 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1822 {
1823 	// DC_LOG_GAMMA is default logging of all hw points
1824 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1825 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1826 	int i = 0;
1827 
1828 	DC_LOGGER_INIT(ctx->logger);
1829 	DC_LOG_GAMMA("Gamma Correction TF");
1830 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1831 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1832 
1833 	for (i = 0; i < hw_points_num; i++) {
1834 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1835 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1836 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1837 	}
1838 
1839 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1840 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1841 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1842 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1843 	}
1844 }
1845 
1846 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1847 				const struct dc_stream_state *stream)
1848 {
1849 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1850 
1851 	if (dpp == NULL)
1852 		return false;
1853 
1854 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1855 
1856 	if (stream->out_transfer_func &&
1857 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1858 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1859 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1860 
1861 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1862 	 * update.
1863 	 */
1864 	else if (cm_helper_translate_curve_to_hw_format(
1865 			stream->out_transfer_func,
1866 			&dpp->regamma_params, false)) {
1867 		dpp->funcs->dpp_program_regamma_pwl(
1868 				dpp,
1869 				&dpp->regamma_params, OPP_REGAMMA_USER);
1870 	} else
1871 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1872 
1873 	if (stream != NULL && stream->ctx != NULL &&
1874 			stream->out_transfer_func != NULL) {
1875 		log_tf(stream->ctx,
1876 				stream->out_transfer_func,
1877 				dpp->regamma_params.hw_points_num);
1878 	}
1879 
1880 	return true;
1881 }
1882 
1883 void dcn10_pipe_control_lock(
1884 	struct dc *dc,
1885 	struct pipe_ctx *pipe,
1886 	bool lock)
1887 {
1888 	struct dce_hwseq *hws = dc->hwseq;
1889 
1890 	/* use TG master update lock to lock everything on the TG
1891 	 * therefore only top pipe need to lock
1892 	 */
1893 	if (!pipe || pipe->top_pipe)
1894 		return;
1895 
1896 	if (dc->debug.sanity_checks)
1897 		hws->funcs.verify_allow_pstate_change_high(dc);
1898 
1899 	if (lock)
1900 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1901 	else
1902 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1903 
1904 	if (dc->debug.sanity_checks)
1905 		hws->funcs.verify_allow_pstate_change_high(dc);
1906 }
1907 
1908 /**
1909  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1910  *
1911  * Software keepout workaround to prevent cursor update locking from stalling
1912  * out cursor updates indefinitely or from old values from being retained in
1913  * the case where the viewport changes in the same frame as the cursor.
1914  *
1915  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1916  * too close to VUPDATE, then stall out until VUPDATE finishes.
1917  *
1918  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1919  *       to avoid the need for this workaround.
1920  */
1921 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1922 {
1923 	struct dc_stream_state *stream = pipe_ctx->stream;
1924 	struct crtc_position position;
1925 	uint32_t vupdate_start, vupdate_end;
1926 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1927 	unsigned int us_per_line, us_vupdate;
1928 
1929 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1930 		return;
1931 
1932 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1933 		return;
1934 
1935 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1936 				       &vupdate_end);
1937 
1938 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1939 	vpos = position.vertical_count;
1940 
1941 	/* Avoid wraparound calculation issues */
1942 	vupdate_start += stream->timing.v_total;
1943 	vupdate_end += stream->timing.v_total;
1944 	vpos += stream->timing.v_total;
1945 
1946 	if (vpos <= vupdate_start) {
1947 		/* VPOS is in VACTIVE or back porch. */
1948 		lines_to_vupdate = vupdate_start - vpos;
1949 	} else if (vpos > vupdate_end) {
1950 		/* VPOS is in the front porch. */
1951 		return;
1952 	} else {
1953 		/* VPOS is in VUPDATE. */
1954 		lines_to_vupdate = 0;
1955 	}
1956 
1957 	/* Calculate time until VUPDATE in microseconds. */
1958 	us_per_line =
1959 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1960 	us_to_vupdate = lines_to_vupdate * us_per_line;
1961 
1962 	/* 70 us is a conservative estimate of cursor update time*/
1963 	if (us_to_vupdate > 70)
1964 		return;
1965 
1966 	/* Stall out until the cursor update completes. */
1967 	if (vupdate_end < vupdate_start)
1968 		vupdate_end += stream->timing.v_total;
1969 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1970 	udelay(us_to_vupdate + us_vupdate);
1971 }
1972 
1973 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1974 {
1975 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1976 	if (!pipe || pipe->top_pipe)
1977 		return;
1978 
1979 	/* Prevent cursor lock from stalling out cursor updates. */
1980 	if (lock)
1981 		delay_cursor_until_vupdate(dc, pipe);
1982 
1983 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1984 		union dmub_hw_lock_flags hw_locks = { 0 };
1985 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1986 
1987 		hw_locks.bits.lock_cursor = 1;
1988 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1989 
1990 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1991 					lock,
1992 					&hw_locks,
1993 					&inst_flags);
1994 	} else
1995 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1996 				pipe->stream_res.opp->inst, lock);
1997 }
1998 
1999 static bool wait_for_reset_trigger_to_occur(
2000 	struct dc_context *dc_ctx,
2001 	struct timing_generator *tg)
2002 {
2003 	bool rc = false;
2004 
2005 	/* To avoid endless loop we wait at most
2006 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2007 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2008 	int i;
2009 
2010 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2011 
2012 		if (!tg->funcs->is_counter_moving(tg)) {
2013 			DC_ERROR("TG counter is not moving!\n");
2014 			break;
2015 		}
2016 
2017 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2018 			rc = true;
2019 			/* usually occurs at i=1 */
2020 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2021 					i);
2022 			break;
2023 		}
2024 
2025 		/* Wait for one frame. */
2026 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2027 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2028 	}
2029 
2030 	if (false == rc)
2031 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2032 
2033 	return rc;
2034 }
2035 
2036 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2037 				      uint64_t *denominator,
2038 				      bool checkUint32Bounary)
2039 {
2040 	int i;
2041 	bool ret = checkUint32Bounary == false;
2042 	uint64_t max_int32 = 0xffffffff;
2043 	uint64_t num, denom;
2044 	static const uint16_t prime_numbers[] = {
2045 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2046 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2047 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2048 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2049 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2050 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2051 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2052 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2053 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2054 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2055 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2056 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2057 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2058 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2059 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2060 	int count = ARRAY_SIZE(prime_numbers);
2061 
2062 	num = *numerator;
2063 	denom = *denominator;
2064 	for (i = 0; i < count; i++) {
2065 		uint32_t num_remainder, denom_remainder;
2066 		uint64_t num_result, denom_result;
2067 		if (checkUint32Bounary &&
2068 			num <= max_int32 && denom <= max_int32) {
2069 			ret = true;
2070 			break;
2071 		}
2072 		do {
2073 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2074 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2075 			if (num_remainder == 0 && denom_remainder == 0) {
2076 				num = num_result;
2077 				denom = denom_result;
2078 			}
2079 		} while (num_remainder == 0 && denom_remainder == 0);
2080 	}
2081 	*numerator = num;
2082 	*denominator = denom;
2083 	return ret;
2084 }
2085 
2086 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2087 {
2088 	uint32_t master_pipe_refresh_rate =
2089 		pipe->stream->timing.pix_clk_100hz * 100 /
2090 		pipe->stream->timing.h_total /
2091 		pipe->stream->timing.v_total;
2092 	return master_pipe_refresh_rate <= 30;
2093 }
2094 
2095 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2096 				 bool account_low_refresh_rate)
2097 {
2098 	uint32_t clock_divider = 1;
2099 	uint32_t numpipes = 1;
2100 
2101 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2102 		clock_divider *= 2;
2103 
2104 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2105 		clock_divider *= 2;
2106 
2107 	while (pipe->next_odm_pipe) {
2108 		pipe = pipe->next_odm_pipe;
2109 		numpipes++;
2110 	}
2111 	clock_divider *= numpipes;
2112 
2113 	return clock_divider;
2114 }
2115 
2116 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2117 				    struct pipe_ctx *grouped_pipes[])
2118 {
2119 	struct dc_context *dc_ctx = dc->ctx;
2120 	int i, master = -1, embedded = -1;
2121 	struct dc_crtc_timing *hw_crtc_timing;
2122 	uint64_t phase[MAX_PIPES];
2123 	uint64_t modulo[MAX_PIPES];
2124 	unsigned int pclk;
2125 
2126 	uint32_t embedded_pix_clk_100hz;
2127 	uint16_t embedded_h_total;
2128 	uint16_t embedded_v_total;
2129 	uint32_t dp_ref_clk_100hz =
2130 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2131 
2132 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2133 	if (!hw_crtc_timing)
2134 		return master;
2135 
2136 	if (dc->config.vblank_alignment_dto_params &&
2137 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2138 		embedded_h_total =
2139 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2140 		embedded_v_total =
2141 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2142 		embedded_pix_clk_100hz =
2143 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2144 
2145 		for (i = 0; i < group_size; i++) {
2146 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2147 					grouped_pipes[i]->stream_res.tg,
2148 					&hw_crtc_timing[i]);
2149 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2150 				dc->res_pool->dp_clock_source,
2151 				grouped_pipes[i]->stream_res.tg->inst,
2152 				&pclk);
2153 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2154 			if (dc_is_embedded_signal(
2155 					grouped_pipes[i]->stream->signal)) {
2156 				embedded = i;
2157 				master = i;
2158 				phase[i] = embedded_pix_clk_100hz*100;
2159 				modulo[i] = dp_ref_clk_100hz*100;
2160 			} else {
2161 
2162 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2163 					hw_crtc_timing[i].h_total*
2164 					hw_crtc_timing[i].v_total;
2165 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2166 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2167 					embedded_h_total*
2168 					embedded_v_total;
2169 
2170 				if (reduceSizeAndFraction(&phase[i],
2171 						&modulo[i], true) == false) {
2172 					/*
2173 					 * this will help to stop reporting
2174 					 * this timing synchronizable
2175 					 */
2176 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2177 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2178 				}
2179 			}
2180 		}
2181 
2182 		for (i = 0; i < group_size; i++) {
2183 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2184 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2185 					dc->res_pool->dp_clock_source,
2186 					grouped_pipes[i]->stream_res.tg->inst,
2187 					phase[i], modulo[i]);
2188 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2189 					dc->res_pool->dp_clock_source,
2190 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2191 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2192 					pclk*get_clock_divider(grouped_pipes[i], false);
2193 				if (master == -1)
2194 					master = i;
2195 			}
2196 		}
2197 
2198 	}
2199 
2200 	kfree(hw_crtc_timing);
2201 	return master;
2202 }
2203 
2204 void dcn10_enable_vblanks_synchronization(
2205 	struct dc *dc,
2206 	int group_index,
2207 	int group_size,
2208 	struct pipe_ctx *grouped_pipes[])
2209 {
2210 	struct dc_context *dc_ctx = dc->ctx;
2211 	struct output_pixel_processor *opp;
2212 	struct timing_generator *tg;
2213 	int i, width, height, master;
2214 
2215 	for (i = 1; i < group_size; i++) {
2216 		opp = grouped_pipes[i]->stream_res.opp;
2217 		tg = grouped_pipes[i]->stream_res.tg;
2218 		tg->funcs->get_otg_active_size(tg, &width, &height);
2219 
2220 		if (!tg->funcs->is_tg_enabled(tg)) {
2221 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2222 			return;
2223 		}
2224 
2225 		if (opp->funcs->opp_program_dpg_dimensions)
2226 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2227 	}
2228 
2229 	for (i = 0; i < group_size; i++) {
2230 		if (grouped_pipes[i]->stream == NULL)
2231 			continue;
2232 		grouped_pipes[i]->stream->vblank_synchronized = false;
2233 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2234 	}
2235 
2236 	DC_SYNC_INFO("Aligning DP DTOs\n");
2237 
2238 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2239 
2240 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2241 
2242 	if (master >= 0) {
2243 		for (i = 0; i < group_size; i++) {
2244 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2245 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2246 					grouped_pipes[master]->stream_res.tg,
2247 					grouped_pipes[i]->stream_res.tg,
2248 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2249 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2250 					get_clock_divider(grouped_pipes[master], false),
2251 					get_clock_divider(grouped_pipes[i], false));
2252 			grouped_pipes[i]->stream->vblank_synchronized = true;
2253 		}
2254 		grouped_pipes[master]->stream->vblank_synchronized = true;
2255 		DC_SYNC_INFO("Sync complete\n");
2256 	}
2257 
2258 	for (i = 1; i < group_size; i++) {
2259 		opp = grouped_pipes[i]->stream_res.opp;
2260 		tg = grouped_pipes[i]->stream_res.tg;
2261 		tg->funcs->get_otg_active_size(tg, &width, &height);
2262 		if (opp->funcs->opp_program_dpg_dimensions)
2263 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2264 	}
2265 }
2266 
2267 void dcn10_enable_timing_synchronization(
2268 	struct dc *dc,
2269 	int group_index,
2270 	int group_size,
2271 	struct pipe_ctx *grouped_pipes[])
2272 {
2273 	struct dc_context *dc_ctx = dc->ctx;
2274 	struct output_pixel_processor *opp;
2275 	struct timing_generator *tg;
2276 	int i, width, height;
2277 
2278 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2279 
2280 	for (i = 1; i < group_size; i++) {
2281 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2282 			continue;
2283 
2284 		opp = grouped_pipes[i]->stream_res.opp;
2285 		tg = grouped_pipes[i]->stream_res.tg;
2286 		tg->funcs->get_otg_active_size(tg, &width, &height);
2287 
2288 		if (!tg->funcs->is_tg_enabled(tg)) {
2289 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2290 			return;
2291 		}
2292 
2293 		if (opp->funcs->opp_program_dpg_dimensions)
2294 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2295 	}
2296 
2297 	for (i = 0; i < group_size; i++) {
2298 		if (grouped_pipes[i]->stream == NULL)
2299 			continue;
2300 
2301 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2302 			continue;
2303 
2304 		grouped_pipes[i]->stream->vblank_synchronized = false;
2305 	}
2306 
2307 	for (i = 1; i < group_size; i++) {
2308 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2309 			continue;
2310 
2311 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2312 				grouped_pipes[i]->stream_res.tg,
2313 				grouped_pipes[0]->stream_res.tg->inst);
2314 	}
2315 
2316 	DC_SYNC_INFO("Waiting for trigger\n");
2317 
2318 	/* Need to get only check 1 pipe for having reset as all the others are
2319 	 * synchronized. Look at last pipe programmed to reset.
2320 	 */
2321 
2322 	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2323 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2324 
2325 	for (i = 1; i < group_size; i++) {
2326 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2327 			continue;
2328 
2329 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2330 				grouped_pipes[i]->stream_res.tg);
2331 	}
2332 
2333 	for (i = 1; i < group_size; i++) {
2334 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2335 			continue;
2336 
2337 		opp = grouped_pipes[i]->stream_res.opp;
2338 		tg = grouped_pipes[i]->stream_res.tg;
2339 		tg->funcs->get_otg_active_size(tg, &width, &height);
2340 		if (opp->funcs->opp_program_dpg_dimensions)
2341 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2342 	}
2343 
2344 	DC_SYNC_INFO("Sync complete\n");
2345 }
2346 
2347 void dcn10_enable_per_frame_crtc_position_reset(
2348 	struct dc *dc,
2349 	int group_size,
2350 	struct pipe_ctx *grouped_pipes[])
2351 {
2352 	struct dc_context *dc_ctx = dc->ctx;
2353 	int i;
2354 
2355 	DC_SYNC_INFO("Setting up\n");
2356 	for (i = 0; i < group_size; i++)
2357 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2358 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2359 					grouped_pipes[i]->stream_res.tg,
2360 					0,
2361 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2362 
2363 	DC_SYNC_INFO("Waiting for trigger\n");
2364 
2365 	for (i = 0; i < group_size; i++)
2366 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2367 
2368 	DC_SYNC_INFO("Multi-display sync is complete\n");
2369 }
2370 
2371 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2372 		struct vm_system_aperture_param *apt,
2373 		struct dce_hwseq *hws)
2374 {
2375 	PHYSICAL_ADDRESS_LOC physical_page_number;
2376 	uint32_t logical_addr_low;
2377 	uint32_t logical_addr_high;
2378 
2379 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2380 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2381 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2382 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2383 
2384 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2385 			LOGICAL_ADDR, &logical_addr_low);
2386 
2387 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2388 			LOGICAL_ADDR, &logical_addr_high);
2389 
2390 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2391 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2392 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2393 }
2394 
2395 /* Temporary read settings, future will get values from kmd directly */
2396 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2397 		struct vm_context0_param *vm0,
2398 		struct dce_hwseq *hws)
2399 {
2400 	PHYSICAL_ADDRESS_LOC fb_base;
2401 	PHYSICAL_ADDRESS_LOC fb_offset;
2402 	uint32_t fb_base_value;
2403 	uint32_t fb_offset_value;
2404 
2405 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2406 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2407 
2408 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2409 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2410 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2411 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2412 
2413 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2414 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2415 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2416 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2417 
2418 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2419 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2420 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2421 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2422 
2423 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2424 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2425 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2426 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2427 
2428 	/*
2429 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2430 	 * Therefore we need to do
2431 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2432 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2433 	 */
2434 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2435 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2436 	vm0->pte_base.quad_part += fb_base.quad_part;
2437 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2438 }
2439 
2440 
2441 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2442 {
2443 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2444 	struct vm_system_aperture_param apt = {0};
2445 	struct vm_context0_param vm0 = {0};
2446 
2447 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2448 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2449 
2450 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2451 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2452 }
2453 
2454 static void dcn10_enable_plane(
2455 	struct dc *dc,
2456 	struct pipe_ctx *pipe_ctx,
2457 	struct dc_state *context)
2458 {
2459 	struct dce_hwseq *hws = dc->hwseq;
2460 
2461 	if (dc->debug.sanity_checks) {
2462 		hws->funcs.verify_allow_pstate_change_high(dc);
2463 	}
2464 
2465 	undo_DEGVIDCN10_253_wa(dc);
2466 
2467 	power_on_plane(dc->hwseq,
2468 		pipe_ctx->plane_res.hubp->inst);
2469 
2470 	/* enable DCFCLK current DCHUB */
2471 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2472 
2473 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2474 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2475 			pipe_ctx->stream_res.opp,
2476 			true);
2477 
2478 	if (dc->config.gpu_vm_support)
2479 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2480 
2481 	if (dc->debug.sanity_checks) {
2482 		hws->funcs.verify_allow_pstate_change_high(dc);
2483 	}
2484 
2485 	if (!pipe_ctx->top_pipe
2486 		&& pipe_ctx->plane_state
2487 		&& pipe_ctx->plane_state->flip_int_enabled
2488 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2489 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2490 
2491 }
2492 
2493 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2494 {
2495 	int i = 0;
2496 	struct dpp_grph_csc_adjustment adjust;
2497 	memset(&adjust, 0, sizeof(adjust));
2498 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2499 
2500 
2501 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2502 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2503 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2504 			adjust.temperature_matrix[i] =
2505 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2506 	} else if (pipe_ctx->plane_state &&
2507 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2508 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2509 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2510 			adjust.temperature_matrix[i] =
2511 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2512 	}
2513 
2514 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2515 }
2516 
2517 
2518 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2519 {
2520 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2521 		if (pipe_ctx->top_pipe) {
2522 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2523 
2524 			while (top->top_pipe)
2525 				top = top->top_pipe; // Traverse to top pipe_ctx
2526 			if (top->plane_state && top->plane_state->layer_index == 0)
2527 				return true; // Front MPO plane not hidden
2528 		}
2529 	}
2530 	return false;
2531 }
2532 
2533 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2534 {
2535 	// Override rear plane RGB bias to fix MPO brightness
2536 	uint16_t rgb_bias = matrix[3];
2537 
2538 	matrix[3] = 0;
2539 	matrix[7] = 0;
2540 	matrix[11] = 0;
2541 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2542 	matrix[3] = rgb_bias;
2543 	matrix[7] = rgb_bias;
2544 	matrix[11] = rgb_bias;
2545 }
2546 
2547 void dcn10_program_output_csc(struct dc *dc,
2548 		struct pipe_ctx *pipe_ctx,
2549 		enum dc_color_space colorspace,
2550 		uint16_t *matrix,
2551 		int opp_id)
2552 {
2553 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2554 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2555 
2556 			/* MPO is broken with RGB colorspaces when OCSC matrix
2557 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2558 			 * Blending adds offsets from front + rear to rear plane
2559 			 *
2560 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2561 			 * black value pixels add offset instead of rear + front
2562 			 */
2563 
2564 			int16_t rgb_bias = matrix[3];
2565 			// matrix[3/7/11] are all the same offset value
2566 
2567 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2568 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2569 			} else {
2570 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2571 			}
2572 		}
2573 	} else {
2574 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2575 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2576 	}
2577 }
2578 
2579 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2580 {
2581 	struct dc_bias_and_scale bns_params = {0};
2582 
2583 	// program the input csc
2584 	dpp->funcs->dpp_setup(dpp,
2585 			plane_state->format,
2586 			EXPANSION_MODE_ZERO,
2587 			plane_state->input_csc_color_matrix,
2588 			plane_state->color_space,
2589 			NULL);
2590 
2591 	//set scale and bias registers
2592 	build_prescale_params(&bns_params, plane_state);
2593 	if (dpp->funcs->dpp_program_bias_and_scale)
2594 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2595 }
2596 
2597 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2598 {
2599 	struct mpc *mpc = dc->res_pool->mpc;
2600 
2601 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2602 		get_hdr_visual_confirm_color(pipe_ctx, color);
2603 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2604 		get_surface_visual_confirm_color(pipe_ctx, color);
2605 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2606 		get_surface_tile_visual_confirm_color(pipe_ctx, color);
2607 	else
2608 		color_space_to_black_color(
2609 				dc, pipe_ctx->stream->output_color_space, color);
2610 
2611 	if (mpc->funcs->set_bg_color) {
2612 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
2613 		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2614 	}
2615 }
2616 
2617 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2618 {
2619 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2620 	struct mpcc_blnd_cfg blnd_cfg = {0};
2621 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2622 	int mpcc_id;
2623 	struct mpcc *new_mpcc;
2624 	struct mpc *mpc = dc->res_pool->mpc;
2625 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2626 
2627 	blnd_cfg.overlap_only = false;
2628 	blnd_cfg.global_gain = 0xff;
2629 
2630 	if (per_pixel_alpha) {
2631 		/* DCN1.0 has output CM before MPC which seems to screw with
2632 		 * pre-multiplied alpha.
2633 		 */
2634 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2635 				pipe_ctx->stream->output_color_space)
2636 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2637 		if (pipe_ctx->plane_state->global_alpha) {
2638 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2639 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2640 		} else {
2641 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2642 		}
2643 	} else {
2644 		blnd_cfg.pre_multiplied_alpha = false;
2645 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2646 	}
2647 
2648 	if (pipe_ctx->plane_state->global_alpha)
2649 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2650 	else
2651 		blnd_cfg.global_alpha = 0xff;
2652 
2653 	/*
2654 	 * TODO: remove hack
2655 	 * Note: currently there is a bug in init_hw such that
2656 	 * on resume from hibernate, BIOS sets up MPCC0, and
2657 	 * we do mpcc_remove but the mpcc cannot go to idle
2658 	 * after remove. This cause us to pick mpcc1 here,
2659 	 * which causes a pstate hang for yet unknown reason.
2660 	 */
2661 	mpcc_id = hubp->inst;
2662 
2663 	/* If there is no full update, don't need to touch MPC tree*/
2664 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2665 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2666 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2667 		return;
2668 	}
2669 
2670 	/* check if this MPCC is already being used */
2671 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2672 	/* remove MPCC if being used */
2673 	if (new_mpcc != NULL)
2674 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2675 	else
2676 		if (dc->debug.sanity_checks)
2677 			mpc->funcs->assert_mpcc_idle_before_connect(
2678 					dc->res_pool->mpc, mpcc_id);
2679 
2680 	/* Call MPC to insert new plane */
2681 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2682 			mpc_tree_params,
2683 			&blnd_cfg,
2684 			NULL,
2685 			NULL,
2686 			hubp->inst,
2687 			mpcc_id);
2688 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2689 
2690 	ASSERT(new_mpcc != NULL);
2691 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2692 	hubp->mpcc_id = mpcc_id;
2693 }
2694 
2695 static void update_scaler(struct pipe_ctx *pipe_ctx)
2696 {
2697 	bool per_pixel_alpha =
2698 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2699 
2700 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2701 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2702 	/* scaler configuration */
2703 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2704 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2705 }
2706 
2707 static void dcn10_update_dchubp_dpp(
2708 	struct dc *dc,
2709 	struct pipe_ctx *pipe_ctx,
2710 	struct dc_state *context)
2711 {
2712 	struct dce_hwseq *hws = dc->hwseq;
2713 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2714 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2715 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2716 	struct plane_size size = plane_state->plane_size;
2717 	unsigned int compat_level = 0;
2718 	bool should_divided_by_2 = false;
2719 
2720 	/* depends on DML calculation, DPP clock value may change dynamically */
2721 	/* If request max dpp clk is lower than current dispclk, no need to
2722 	 * divided by 2
2723 	 */
2724 	if (plane_state->update_flags.bits.full_update) {
2725 
2726 		/* new calculated dispclk, dppclk are stored in
2727 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2728 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2729 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2730 		 * dispclk will put in use after optimize_bandwidth when
2731 		 * ramp_up_dispclk_with_dpp is called.
2732 		 * there are two places for dppclk be put in use. One location
2733 		 * is the same as the location as dispclk. Another is within
2734 		 * update_dchubp_dpp which happens between pre_bandwidth and
2735 		 * optimize_bandwidth.
2736 		 * dppclk updated within update_dchubp_dpp will cause new
2737 		 * clock values of dispclk and dppclk not be in use at the same
2738 		 * time. when clocks are decreased, this may cause dppclk is
2739 		 * lower than previous configuration and let pipe stuck.
2740 		 * for example, eDP + external dp,  change resolution of DP from
2741 		 * 1920x1080x144hz to 1280x960x60hz.
2742 		 * before change: dispclk = 337889 dppclk = 337889
2743 		 * change mode, dcn10_validate_bandwidth calculate
2744 		 *                dispclk = 143122 dppclk = 143122
2745 		 * update_dchubp_dpp be executed before dispclk be updated,
2746 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2747 		 * 168944. this will cause pipe pstate warning issue.
2748 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2749 		 * dispclk is going to be decreased, keep dppclk = dispclk
2750 		 **/
2751 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2752 				dc->clk_mgr->clks.dispclk_khz)
2753 			should_divided_by_2 = false;
2754 		else
2755 			should_divided_by_2 =
2756 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2757 					dc->clk_mgr->clks.dispclk_khz / 2;
2758 
2759 		dpp->funcs->dpp_dppclk_control(
2760 				dpp,
2761 				should_divided_by_2,
2762 				true);
2763 
2764 		if (dc->res_pool->dccg)
2765 			dc->res_pool->dccg->funcs->update_dpp_dto(
2766 					dc->res_pool->dccg,
2767 					dpp->inst,
2768 					pipe_ctx->plane_res.bw.dppclk_khz);
2769 		else
2770 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2771 						dc->clk_mgr->clks.dispclk_khz / 2 :
2772 							dc->clk_mgr->clks.dispclk_khz;
2773 	}
2774 
2775 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2776 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2777 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2778 	 */
2779 	if (plane_state->update_flags.bits.full_update) {
2780 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2781 
2782 		hubp->funcs->hubp_setup(
2783 			hubp,
2784 			&pipe_ctx->dlg_regs,
2785 			&pipe_ctx->ttu_regs,
2786 			&pipe_ctx->rq_regs,
2787 			&pipe_ctx->pipe_dlg_param);
2788 		hubp->funcs->hubp_setup_interdependent(
2789 			hubp,
2790 			&pipe_ctx->dlg_regs,
2791 			&pipe_ctx->ttu_regs);
2792 	}
2793 
2794 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2795 
2796 	if (plane_state->update_flags.bits.full_update ||
2797 		plane_state->update_flags.bits.bpp_change)
2798 		dcn10_update_dpp(dpp, plane_state);
2799 
2800 	if (plane_state->update_flags.bits.full_update ||
2801 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2802 		plane_state->update_flags.bits.global_alpha_change)
2803 		hws->funcs.update_mpcc(dc, pipe_ctx);
2804 
2805 	if (plane_state->update_flags.bits.full_update ||
2806 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2807 		plane_state->update_flags.bits.global_alpha_change ||
2808 		plane_state->update_flags.bits.scaling_change ||
2809 		plane_state->update_flags.bits.position_change) {
2810 		update_scaler(pipe_ctx);
2811 	}
2812 
2813 	if (plane_state->update_flags.bits.full_update ||
2814 		plane_state->update_flags.bits.scaling_change ||
2815 		plane_state->update_flags.bits.position_change) {
2816 		hubp->funcs->mem_program_viewport(
2817 			hubp,
2818 			&pipe_ctx->plane_res.scl_data.viewport,
2819 			&pipe_ctx->plane_res.scl_data.viewport_c);
2820 	}
2821 
2822 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2823 		dc->hwss.set_cursor_position(pipe_ctx);
2824 		dc->hwss.set_cursor_attribute(pipe_ctx);
2825 
2826 		if (dc->hwss.set_cursor_sdr_white_level)
2827 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2828 	}
2829 
2830 	if (plane_state->update_flags.bits.full_update) {
2831 		/*gamut remap*/
2832 		dc->hwss.program_gamut_remap(pipe_ctx);
2833 
2834 		dc->hwss.program_output_csc(dc,
2835 				pipe_ctx,
2836 				pipe_ctx->stream->output_color_space,
2837 				pipe_ctx->stream->csc_color_matrix.matrix,
2838 				pipe_ctx->stream_res.opp->inst);
2839 	}
2840 
2841 	if (plane_state->update_flags.bits.full_update ||
2842 		plane_state->update_flags.bits.pixel_format_change ||
2843 		plane_state->update_flags.bits.horizontal_mirror_change ||
2844 		plane_state->update_flags.bits.rotation_change ||
2845 		plane_state->update_flags.bits.swizzle_change ||
2846 		plane_state->update_flags.bits.dcc_change ||
2847 		plane_state->update_flags.bits.bpp_change ||
2848 		plane_state->update_flags.bits.scaling_change ||
2849 		plane_state->update_flags.bits.plane_size_change) {
2850 		hubp->funcs->hubp_program_surface_config(
2851 			hubp,
2852 			plane_state->format,
2853 			&plane_state->tiling_info,
2854 			&size,
2855 			plane_state->rotation,
2856 			&plane_state->dcc,
2857 			plane_state->horizontal_mirror,
2858 			compat_level);
2859 	}
2860 
2861 	hubp->power_gated = false;
2862 
2863 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2864 
2865 	if (is_pipe_tree_visible(pipe_ctx))
2866 		hubp->funcs->set_blank(hubp, false);
2867 }
2868 
2869 void dcn10_blank_pixel_data(
2870 		struct dc *dc,
2871 		struct pipe_ctx *pipe_ctx,
2872 		bool blank)
2873 {
2874 	enum dc_color_space color_space;
2875 	struct tg_color black_color = {0};
2876 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2877 	struct dc_stream_state *stream = pipe_ctx->stream;
2878 
2879 	/* program otg blank color */
2880 	color_space = stream->output_color_space;
2881 	color_space_to_black_color(dc, color_space, &black_color);
2882 
2883 	/*
2884 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2885 	 * alternate between Cb and Cr, so both channels need the pixel
2886 	 * value for Y
2887 	 */
2888 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2889 		black_color.color_r_cr = black_color.color_g_y;
2890 
2891 
2892 	if (stream_res->tg->funcs->set_blank_color)
2893 		stream_res->tg->funcs->set_blank_color(
2894 				stream_res->tg,
2895 				&black_color);
2896 
2897 	if (!blank) {
2898 		if (stream_res->tg->funcs->set_blank)
2899 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2900 		if (stream_res->abm) {
2901 			dc->hwss.set_pipe(pipe_ctx);
2902 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2903 		}
2904 	} else if (blank) {
2905 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2906 		if (stream_res->tg->funcs->set_blank) {
2907 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2908 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2909 		}
2910 	}
2911 }
2912 
2913 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2914 {
2915 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2916 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2917 	struct custom_float_format fmt;
2918 
2919 	fmt.exponenta_bits = 6;
2920 	fmt.mantissa_bits = 12;
2921 	fmt.sign = true;
2922 
2923 
2924 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2925 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2926 
2927 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2928 			pipe_ctx->plane_res.dpp, hw_mult);
2929 }
2930 
2931 void dcn10_program_pipe(
2932 		struct dc *dc,
2933 		struct pipe_ctx *pipe_ctx,
2934 		struct dc_state *context)
2935 {
2936 	struct dce_hwseq *hws = dc->hwseq;
2937 
2938 	if (pipe_ctx->top_pipe == NULL) {
2939 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2940 
2941 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2942 				pipe_ctx->stream_res.tg,
2943 				calculate_vready_offset_for_group(pipe_ctx),
2944 				pipe_ctx->pipe_dlg_param.vstartup_start,
2945 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2946 				pipe_ctx->pipe_dlg_param.vupdate_width);
2947 
2948 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2949 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2950 
2951 		if (hws->funcs.setup_vupdate_interrupt)
2952 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2953 
2954 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2955 	}
2956 
2957 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2958 		dcn10_enable_plane(dc, pipe_ctx, context);
2959 
2960 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2961 
2962 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2963 
2964 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2965 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2966 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2967 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2968 
2969 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2970 	 * only do gamma programming for full update.
2971 	 * TODO: This can be further optimized/cleaned up
2972 	 * Always call this for now since it does memcmp inside before
2973 	 * doing heavy calculation and programming
2974 	 */
2975 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2976 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2977 }
2978 
2979 void dcn10_wait_for_pending_cleared(struct dc *dc,
2980 		struct dc_state *context)
2981 {
2982 		struct pipe_ctx *pipe_ctx;
2983 		struct timing_generator *tg;
2984 		int i;
2985 
2986 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2987 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2988 			tg = pipe_ctx->stream_res.tg;
2989 
2990 			/*
2991 			 * Only wait for top pipe's tg penindg bit
2992 			 * Also skip if pipe is disabled.
2993 			 */
2994 			if (pipe_ctx->top_pipe ||
2995 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2996 			    !tg->funcs->is_tg_enabled(tg))
2997 				continue;
2998 
2999 			/*
3000 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
3001 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
3002 			 * seems to not trigger the update right away, and if we
3003 			 * lock again before VUPDATE then we don't get a separated
3004 			 * operation.
3005 			 */
3006 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3007 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3008 		}
3009 }
3010 
3011 void dcn10_post_unlock_program_front_end(
3012 		struct dc *dc,
3013 		struct dc_state *context)
3014 {
3015 	int i;
3016 
3017 	DC_LOGGER_INIT(dc->ctx->logger);
3018 
3019 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3020 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3021 
3022 		if (!pipe_ctx->top_pipe &&
3023 			!pipe_ctx->prev_odm_pipe &&
3024 			pipe_ctx->stream) {
3025 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3026 
3027 			if (context->stream_status[i].plane_count == 0)
3028 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3029 		}
3030 	}
3031 
3032 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3033 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3034 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3035 
3036 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3037 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3038 			dc->hwss.optimize_bandwidth(dc, context);
3039 			break;
3040 		}
3041 
3042 	if (dc->hwseq->wa.DEGVIDCN10_254)
3043 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3044 }
3045 
3046 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3047 {
3048 	uint8_t i;
3049 
3050 	for (i = 0; i < context->stream_count; i++) {
3051 		if (context->streams[i]->timing.timing_3d_format
3052 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3053 			/*
3054 			 * Disable stutter
3055 			 */
3056 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3057 			break;
3058 		}
3059 	}
3060 }
3061 
3062 void dcn10_prepare_bandwidth(
3063 		struct dc *dc,
3064 		struct dc_state *context)
3065 {
3066 	struct dce_hwseq *hws = dc->hwseq;
3067 	struct hubbub *hubbub = dc->res_pool->hubbub;
3068 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3069 
3070 	if (dc->debug.sanity_checks)
3071 		hws->funcs.verify_allow_pstate_change_high(dc);
3072 
3073 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3074 		if (context->stream_count == 0)
3075 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3076 
3077 		dc->clk_mgr->funcs->update_clocks(
3078 				dc->clk_mgr,
3079 				context,
3080 				false);
3081 	}
3082 
3083 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3084 			&context->bw_ctx.bw.dcn.watermarks,
3085 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3086 			true);
3087 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3088 
3089 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3090 		DC_FP_START();
3091 		dcn_get_soc_clks(
3092 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3093 		DC_FP_END();
3094 		dcn_bw_notify_pplib_of_wm_ranges(
3095 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3096 	}
3097 
3098 	if (dc->debug.sanity_checks)
3099 		hws->funcs.verify_allow_pstate_change_high(dc);
3100 }
3101 
3102 void dcn10_optimize_bandwidth(
3103 		struct dc *dc,
3104 		struct dc_state *context)
3105 {
3106 	struct dce_hwseq *hws = dc->hwseq;
3107 	struct hubbub *hubbub = dc->res_pool->hubbub;
3108 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3109 
3110 	if (dc->debug.sanity_checks)
3111 		hws->funcs.verify_allow_pstate_change_high(dc);
3112 
3113 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3114 		if (context->stream_count == 0)
3115 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3116 
3117 		dc->clk_mgr->funcs->update_clocks(
3118 				dc->clk_mgr,
3119 				context,
3120 				true);
3121 	}
3122 
3123 	hubbub->funcs->program_watermarks(hubbub,
3124 			&context->bw_ctx.bw.dcn.watermarks,
3125 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3126 			true);
3127 
3128 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3129 
3130 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3131 		DC_FP_START();
3132 		dcn_get_soc_clks(
3133 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3134 		DC_FP_END();
3135 		dcn_bw_notify_pplib_of_wm_ranges(
3136 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3137 	}
3138 
3139 	if (dc->debug.sanity_checks)
3140 		hws->funcs.verify_allow_pstate_change_high(dc);
3141 }
3142 
3143 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3144 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3145 {
3146 	int i = 0;
3147 	struct drr_params params = {0};
3148 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3149 	unsigned int event_triggers = 0x800;
3150 	// Note DRR trigger events are generated regardless of whether num frames met.
3151 	unsigned int num_frames = 2;
3152 
3153 	params.vertical_total_max = adjust.v_total_max;
3154 	params.vertical_total_min = adjust.v_total_min;
3155 	params.vertical_total_mid = adjust.v_total_mid;
3156 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3157 	/* TODO: If multiple pipes are to be supported, you need
3158 	 * some GSL stuff. Static screen triggers may be programmed differently
3159 	 * as well.
3160 	 */
3161 	for (i = 0; i < num_pipes; i++) {
3162 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3163 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3164 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3165 					pipe_ctx[i]->stream_res.tg, &params);
3166 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3167 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3168 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3169 						pipe_ctx[i]->stream_res.tg,
3170 						event_triggers, num_frames);
3171 		}
3172 	}
3173 }
3174 
3175 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3176 		int num_pipes,
3177 		struct crtc_position *position)
3178 {
3179 	int i = 0;
3180 
3181 	/* TODO: handle pipes > 1
3182 	 */
3183 	for (i = 0; i < num_pipes; i++)
3184 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3185 }
3186 
3187 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3188 		int num_pipes, const struct dc_static_screen_params *params)
3189 {
3190 	unsigned int i;
3191 	unsigned int triggers = 0;
3192 
3193 	if (params->triggers.surface_update)
3194 		triggers |= 0x80;
3195 	if (params->triggers.cursor_update)
3196 		triggers |= 0x2;
3197 	if (params->triggers.force_trigger)
3198 		triggers |= 0x1;
3199 
3200 	for (i = 0; i < num_pipes; i++)
3201 		pipe_ctx[i]->stream_res.tg->funcs->
3202 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3203 					triggers, params->num_frames);
3204 }
3205 
3206 static void dcn10_config_stereo_parameters(
3207 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3208 {
3209 	enum view_3d_format view_format = stream->view_format;
3210 	enum dc_timing_3d_format timing_3d_format =\
3211 			stream->timing.timing_3d_format;
3212 	bool non_stereo_timing = false;
3213 
3214 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3215 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3216 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3217 		non_stereo_timing = true;
3218 
3219 	if (non_stereo_timing == false &&
3220 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3221 
3222 		flags->PROGRAM_STEREO         = 1;
3223 		flags->PROGRAM_POLARITY       = 1;
3224 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3225 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3226 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3227 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3228 			enum display_dongle_type dongle = \
3229 					stream->link->ddc->dongle_type;
3230 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3231 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3232 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3233 				flags->DISABLE_STEREO_DP_SYNC = 1;
3234 		}
3235 		flags->RIGHT_EYE_POLARITY =\
3236 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3237 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3238 			flags->FRAME_PACKED = 1;
3239 	}
3240 
3241 	return;
3242 }
3243 
3244 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3245 {
3246 	struct crtc_stereo_flags flags = { 0 };
3247 	struct dc_stream_state *stream = pipe_ctx->stream;
3248 
3249 	dcn10_config_stereo_parameters(stream, &flags);
3250 
3251 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3252 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3253 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3254 	} else {
3255 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3256 	}
3257 
3258 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3259 		pipe_ctx->stream_res.opp,
3260 		flags.PROGRAM_STEREO == 1,
3261 		&stream->timing);
3262 
3263 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3264 		pipe_ctx->stream_res.tg,
3265 		&stream->timing,
3266 		&flags);
3267 
3268 	return;
3269 }
3270 
3271 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3272 {
3273 	int i;
3274 
3275 	for (i = 0; i < res_pool->pipe_count; i++) {
3276 		if (res_pool->hubps[i]->inst == mpcc_inst)
3277 			return res_pool->hubps[i];
3278 	}
3279 	ASSERT(false);
3280 	return NULL;
3281 }
3282 
3283 void dcn10_wait_for_mpcc_disconnect(
3284 		struct dc *dc,
3285 		struct resource_pool *res_pool,
3286 		struct pipe_ctx *pipe_ctx)
3287 {
3288 	struct dce_hwseq *hws = dc->hwseq;
3289 	int mpcc_inst;
3290 
3291 	if (dc->debug.sanity_checks) {
3292 		hws->funcs.verify_allow_pstate_change_high(dc);
3293 	}
3294 
3295 	if (!pipe_ctx->stream_res.opp)
3296 		return;
3297 
3298 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3299 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3300 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3301 
3302 			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3303 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3304 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3305 			hubp->funcs->set_blank(hubp, true);
3306 		}
3307 	}
3308 
3309 	if (dc->debug.sanity_checks) {
3310 		hws->funcs.verify_allow_pstate_change_high(dc);
3311 	}
3312 
3313 }
3314 
3315 bool dcn10_dummy_display_power_gating(
3316 	struct dc *dc,
3317 	uint8_t controller_id,
3318 	struct dc_bios *dcb,
3319 	enum pipe_gating_control power_gating)
3320 {
3321 	return true;
3322 }
3323 
3324 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3325 {
3326 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3327 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3328 	bool flip_pending;
3329 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3330 
3331 	if (plane_state == NULL)
3332 		return;
3333 
3334 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3335 					pipe_ctx->plane_res.hubp);
3336 
3337 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3338 
3339 	if (!flip_pending)
3340 		plane_state->status.current_address = plane_state->status.requested_address;
3341 
3342 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3343 			tg->funcs->is_stereo_left_eye) {
3344 		plane_state->status.is_right_eye =
3345 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3346 	}
3347 
3348 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3349 		struct dce_hwseq *hwseq = dc->hwseq;
3350 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3351 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3352 
3353 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3354 			struct hubbub *hubbub = dc->res_pool->hubbub;
3355 
3356 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3357 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3358 		}
3359 	}
3360 }
3361 
3362 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3363 {
3364 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3365 
3366 	/* In DCN, this programming sequence is owned by the hubbub */
3367 	hubbub->funcs->update_dchub(hubbub, dh_data);
3368 }
3369 
3370 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3371 {
3372 	struct pipe_ctx *test_pipe, *split_pipe;
3373 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3374 	struct rect r1 = scl_data->recout, r2, r2_half;
3375 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3376 	int cur_layer = pipe_ctx->plane_state->layer_index;
3377 
3378 	/**
3379 	 * Disable the cursor if there's another pipe above this with a
3380 	 * plane that contains this pipe's viewport to prevent double cursor
3381 	 * and incorrect scaling artifacts.
3382 	 */
3383 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3384 	     test_pipe = test_pipe->top_pipe) {
3385 		// Skip invisible layer and pipe-split plane on same layer
3386 		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3387 			continue;
3388 
3389 		r2 = test_pipe->plane_res.scl_data.recout;
3390 		r2_r = r2.x + r2.width;
3391 		r2_b = r2.y + r2.height;
3392 		split_pipe = test_pipe;
3393 
3394 		/**
3395 		 * There is another half plane on same layer because of
3396 		 * pipe-split, merge together per same height.
3397 		 */
3398 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3399 		     split_pipe = split_pipe->top_pipe)
3400 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3401 				r2_half = split_pipe->plane_res.scl_data.recout;
3402 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3403 				r2.width = r2.width + r2_half.width;
3404 				r2_r = r2.x + r2.width;
3405 				break;
3406 			}
3407 
3408 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3409 			return true;
3410 	}
3411 
3412 	return false;
3413 }
3414 
3415 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3416 {
3417 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3418 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3419 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3420 	struct dc_cursor_mi_param param = {
3421 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3422 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3423 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3424 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3425 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3426 		.rotation = pipe_ctx->plane_state->rotation,
3427 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3428 	};
3429 	bool pipe_split_on = false;
3430 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3431 		(pipe_ctx->prev_odm_pipe != NULL);
3432 
3433 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3434 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3435 	int x_pos = pos_cpy.x;
3436 	int y_pos = pos_cpy.y;
3437 
3438 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3439 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3440 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3441 			pipe_split_on = true;
3442 		}
3443 	}
3444 
3445 	/**
3446 	 * DC cursor is stream space, HW cursor is plane space and drawn
3447 	 * as part of the framebuffer.
3448 	 *
3449 	 * Cursor position can't be negative, but hotspot can be used to
3450 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3451 	 * than the cursor size.
3452 	 */
3453 
3454 	/**
3455 	 * Translate cursor from stream space to plane space.
3456 	 *
3457 	 * If the cursor is scaled then we need to scale the position
3458 	 * to be in the approximately correct place. We can't do anything
3459 	 * about the actual size being incorrect, that's a limitation of
3460 	 * the hardware.
3461 	 */
3462 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3463 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3464 				pipe_ctx->plane_state->dst_rect.width;
3465 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3466 				pipe_ctx->plane_state->dst_rect.height;
3467 	} else {
3468 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3469 				pipe_ctx->plane_state->dst_rect.width;
3470 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3471 				pipe_ctx->plane_state->dst_rect.height;
3472 	}
3473 
3474 	/**
3475 	 * If the cursor's source viewport is clipped then we need to
3476 	 * translate the cursor to appear in the correct position on
3477 	 * the screen.
3478 	 *
3479 	 * This translation isn't affected by scaling so it needs to be
3480 	 * done *after* we adjust the position for the scale factor.
3481 	 *
3482 	 * This is only done by opt-in for now since there are still
3483 	 * some usecases like tiled display that might enable the
3484 	 * cursor on both streams while expecting dc to clip it.
3485 	 */
3486 	if (pos_cpy.translate_by_source) {
3487 		x_pos += pipe_ctx->plane_state->src_rect.x;
3488 		y_pos += pipe_ctx->plane_state->src_rect.y;
3489 	}
3490 
3491 	/**
3492 	 * If the position is negative then we need to add to the hotspot
3493 	 * to shift the cursor outside the plane.
3494 	 */
3495 
3496 	if (x_pos < 0) {
3497 		pos_cpy.x_hotspot -= x_pos;
3498 		x_pos = 0;
3499 	}
3500 
3501 	if (y_pos < 0) {
3502 		pos_cpy.y_hotspot -= y_pos;
3503 		y_pos = 0;
3504 	}
3505 
3506 	pos_cpy.x = (uint32_t)x_pos;
3507 	pos_cpy.y = (uint32_t)y_pos;
3508 
3509 	if (pipe_ctx->plane_state->address.type
3510 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3511 		pos_cpy.enable = false;
3512 
3513 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3514 		pos_cpy.enable = false;
3515 
3516 
3517 	if (param.rotation == ROTATION_ANGLE_0) {
3518 		int viewport_width =
3519 			pipe_ctx->plane_res.scl_data.viewport.width;
3520 		int viewport_x =
3521 			pipe_ctx->plane_res.scl_data.viewport.x;
3522 
3523 		if (param.mirror) {
3524 			if (pipe_split_on || odm_combine_on) {
3525 				if (pos_cpy.x >= viewport_width + viewport_x) {
3526 					pos_cpy.x = 2 * viewport_width
3527 							- pos_cpy.x + 2 * viewport_x;
3528 				} else {
3529 					uint32_t temp_x = pos_cpy.x;
3530 
3531 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3532 					if (temp_x >= viewport_x +
3533 						(int)hubp->curs_attr.width || pos_cpy.x
3534 						<= (int)hubp->curs_attr.width +
3535 						pipe_ctx->plane_state->src_rect.x) {
3536 						pos_cpy.x = temp_x + viewport_width;
3537 					}
3538 				}
3539 			} else {
3540 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3541 			}
3542 		}
3543 	}
3544 	// Swap axis and mirror horizontally
3545 	else if (param.rotation == ROTATION_ANGLE_90) {
3546 		uint32_t temp_x = pos_cpy.x;
3547 
3548 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3549 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3550 		pos_cpy.y = temp_x;
3551 	}
3552 	// Swap axis and mirror vertically
3553 	else if (param.rotation == ROTATION_ANGLE_270) {
3554 		uint32_t temp_y = pos_cpy.y;
3555 		int viewport_height =
3556 			pipe_ctx->plane_res.scl_data.viewport.height;
3557 		int viewport_y =
3558 			pipe_ctx->plane_res.scl_data.viewport.y;
3559 
3560 		/**
3561 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3562 		 * For pipe split cases:
3563 		 * - apply offset of viewport.y to normalize pos_cpy.x
3564 		 * - calculate the pos_cpy.y as before
3565 		 * - shift pos_cpy.y back by same offset to get final value
3566 		 * - since we iterate through both pipes, use the lower
3567 		 *   viewport.y for offset
3568 		 * For non pipe split cases, use the same calculation for
3569 		 *  pos_cpy.y as the 180 degree rotation case below,
3570 		 *  but use pos_cpy.x as our input because we are rotating
3571 		 *  270 degrees
3572 		 */
3573 		if (pipe_split_on || odm_combine_on) {
3574 			int pos_cpy_x_offset;
3575 			int other_pipe_viewport_y;
3576 
3577 			if (pipe_split_on) {
3578 				if (pipe_ctx->bottom_pipe) {
3579 					other_pipe_viewport_y =
3580 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3581 				} else {
3582 					other_pipe_viewport_y =
3583 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3584 				}
3585 			} else {
3586 				if (pipe_ctx->next_odm_pipe) {
3587 					other_pipe_viewport_y =
3588 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3589 				} else {
3590 					other_pipe_viewport_y =
3591 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3592 				}
3593 			}
3594 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3595 				other_pipe_viewport_y : viewport_y;
3596 			pos_cpy.x -= pos_cpy_x_offset;
3597 			if (pos_cpy.x > viewport_height) {
3598 				pos_cpy.x = pos_cpy.x - viewport_height;
3599 				pos_cpy.y = viewport_height - pos_cpy.x;
3600 			} else {
3601 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3602 			}
3603 			pos_cpy.y += pos_cpy_x_offset;
3604 		} else {
3605 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3606 		}
3607 		pos_cpy.x = temp_y;
3608 	}
3609 	// Mirror horizontally and vertically
3610 	else if (param.rotation == ROTATION_ANGLE_180) {
3611 		int viewport_width =
3612 			pipe_ctx->plane_res.scl_data.viewport.width;
3613 		int viewport_x =
3614 			pipe_ctx->plane_res.scl_data.viewport.x;
3615 
3616 		if (!param.mirror) {
3617 			if (pipe_split_on || odm_combine_on) {
3618 				if (pos_cpy.x >= viewport_width + viewport_x) {
3619 					pos_cpy.x = 2 * viewport_width
3620 							- pos_cpy.x + 2 * viewport_x;
3621 				} else {
3622 					uint32_t temp_x = pos_cpy.x;
3623 
3624 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3625 					if (temp_x >= viewport_x +
3626 						(int)hubp->curs_attr.width || pos_cpy.x
3627 						<= (int)hubp->curs_attr.width +
3628 						pipe_ctx->plane_state->src_rect.x) {
3629 						pos_cpy.x = temp_x + viewport_width;
3630 					}
3631 				}
3632 			} else {
3633 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3634 			}
3635 		}
3636 
3637 		/**
3638 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3639 		 * Calculation:
3640 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3641 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3642 		 * Simplify it as:
3643 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3644 		 */
3645 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3646 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3647 	}
3648 
3649 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3650 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3651 }
3652 
3653 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3654 {
3655 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3656 
3657 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3658 			pipe_ctx->plane_res.hubp, attributes);
3659 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3660 		pipe_ctx->plane_res.dpp, attributes);
3661 }
3662 
3663 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3664 {
3665 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3666 	struct fixed31_32 multiplier;
3667 	struct dpp_cursor_attributes opt_attr = { 0 };
3668 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3669 	struct custom_float_format fmt;
3670 
3671 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3672 		return;
3673 
3674 	fmt.exponenta_bits = 5;
3675 	fmt.mantissa_bits = 10;
3676 	fmt.sign = true;
3677 
3678 	if (sdr_white_level > 80) {
3679 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3680 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3681 	}
3682 
3683 	opt_attr.scale = hw_scale;
3684 	opt_attr.bias = 0;
3685 
3686 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3687 			pipe_ctx->plane_res.dpp, &opt_attr);
3688 }
3689 
3690 /*
3691  * apply_front_porch_workaround  TODO FPGA still need?
3692  *
3693  * This is a workaround for a bug that has existed since R5xx and has not been
3694  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3695  */
3696 static void apply_front_porch_workaround(
3697 	struct dc_crtc_timing *timing)
3698 {
3699 	if (timing->flags.INTERLACE == 1) {
3700 		if (timing->v_front_porch < 2)
3701 			timing->v_front_porch = 2;
3702 	} else {
3703 		if (timing->v_front_porch < 1)
3704 			timing->v_front_porch = 1;
3705 	}
3706 }
3707 
3708 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3709 {
3710 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3711 	struct dc_crtc_timing patched_crtc_timing;
3712 	int vesa_sync_start;
3713 	int asic_blank_end;
3714 	int interlace_factor;
3715 
3716 	patched_crtc_timing = *dc_crtc_timing;
3717 	apply_front_porch_workaround(&patched_crtc_timing);
3718 
3719 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3720 
3721 	vesa_sync_start = patched_crtc_timing.v_addressable +
3722 			patched_crtc_timing.v_border_bottom +
3723 			patched_crtc_timing.v_front_porch;
3724 
3725 	asic_blank_end = (patched_crtc_timing.v_total -
3726 			vesa_sync_start -
3727 			patched_crtc_timing.v_border_top)
3728 			* interlace_factor;
3729 
3730 	return asic_blank_end -
3731 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3732 }
3733 
3734 void dcn10_calc_vupdate_position(
3735 		struct dc *dc,
3736 		struct pipe_ctx *pipe_ctx,
3737 		uint32_t *start_line,
3738 		uint32_t *end_line)
3739 {
3740 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3741 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3742 
3743 	if (vupdate_pos >= 0)
3744 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3745 	else
3746 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3747 	*end_line = (*start_line + 2) % timing->v_total;
3748 }
3749 
3750 static void dcn10_cal_vline_position(
3751 		struct dc *dc,
3752 		struct pipe_ctx *pipe_ctx,
3753 		uint32_t *start_line,
3754 		uint32_t *end_line)
3755 {
3756 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3757 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3758 
3759 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3760 		if (vline_pos > 0)
3761 			vline_pos--;
3762 		else if (vline_pos < 0)
3763 			vline_pos++;
3764 
3765 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3766 		if (vline_pos >= 0)
3767 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3768 		else
3769 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3770 		*end_line = (*start_line + 2) % timing->v_total;
3771 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3772 		// vsync is line 0 so start_line is just the requested line offset
3773 		*start_line = vline_pos;
3774 		*end_line = (*start_line + 2) % timing->v_total;
3775 	} else
3776 		ASSERT(0);
3777 }
3778 
3779 void dcn10_setup_periodic_interrupt(
3780 		struct dc *dc,
3781 		struct pipe_ctx *pipe_ctx)
3782 {
3783 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3784 	uint32_t start_line = 0;
3785 	uint32_t end_line = 0;
3786 
3787 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3788 
3789 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3790 }
3791 
3792 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3793 {
3794 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3795 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3796 
3797 	if (start_line < 0) {
3798 		ASSERT(0);
3799 		start_line = 0;
3800 	}
3801 
3802 	if (tg->funcs->setup_vertical_interrupt2)
3803 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3804 }
3805 
3806 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3807 		struct dc_link_settings *link_settings)
3808 {
3809 	struct encoder_unblank_param params = {0};
3810 	struct dc_stream_state *stream = pipe_ctx->stream;
3811 	struct dc_link *link = stream->link;
3812 	struct dce_hwseq *hws = link->dc->hwseq;
3813 
3814 	/* only 3 items below are used by unblank */
3815 	params.timing = pipe_ctx->stream->timing;
3816 
3817 	params.link_settings.link_rate = link_settings->link_rate;
3818 
3819 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3820 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3821 			params.timing.pix_clk_100hz /= 2;
3822 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3823 	}
3824 
3825 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3826 		hws->funcs.edp_backlight_control(link, true);
3827 	}
3828 }
3829 
3830 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3831 				const uint8_t *custom_sdp_message,
3832 				unsigned int sdp_message_size)
3833 {
3834 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3835 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3836 				pipe_ctx->stream_res.stream_enc,
3837 				custom_sdp_message,
3838 				sdp_message_size);
3839 	}
3840 }
3841 enum dc_status dcn10_set_clock(struct dc *dc,
3842 			enum dc_clock_type clock_type,
3843 			uint32_t clk_khz,
3844 			uint32_t stepping)
3845 {
3846 	struct dc_state *context = dc->current_state;
3847 	struct dc_clock_config clock_cfg = {0};
3848 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3849 
3850 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3851 		return DC_FAIL_UNSUPPORTED_1;
3852 
3853 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3854 		context, clock_type, &clock_cfg);
3855 
3856 	if (clk_khz > clock_cfg.max_clock_khz)
3857 		return DC_FAIL_CLK_EXCEED_MAX;
3858 
3859 	if (clk_khz < clock_cfg.min_clock_khz)
3860 		return DC_FAIL_CLK_BELOW_MIN;
3861 
3862 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3863 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3864 
3865 	/*update internal request clock for update clock use*/
3866 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3867 		current_clocks->dispclk_khz = clk_khz;
3868 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3869 		current_clocks->dppclk_khz = clk_khz;
3870 	else
3871 		return DC_ERROR_UNEXPECTED;
3872 
3873 	if (dc->clk_mgr->funcs->update_clocks)
3874 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3875 				context, true);
3876 	return DC_OK;
3877 
3878 }
3879 
3880 void dcn10_get_clock(struct dc *dc,
3881 			enum dc_clock_type clock_type,
3882 			struct dc_clock_config *clock_cfg)
3883 {
3884 	struct dc_state *context = dc->current_state;
3885 
3886 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3887 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3888 
3889 }
3890 
3891 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3892 {
3893 	struct resource_pool *pool = dc->res_pool;
3894 	int i;
3895 
3896 	for (i = 0; i < pool->pipe_count; i++) {
3897 		struct hubp *hubp = pool->hubps[i];
3898 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3899 
3900 		hubp->funcs->hubp_read_state(hubp);
3901 
3902 		if (!s->blank_en)
3903 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3904 	}
3905 }
3906