1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 
60 #define DC_LOGGER_INIT(logger)
61 
62 #define CTX \
63 	hws->ctx
64 #define REG(reg)\
65 	hws->regs->reg
66 
67 #undef FN
68 #define FN(reg_name, field_name) \
69 	hws->shifts->field_name, hws->masks->field_name
70 
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 	print_microsec(dc_ctx, log_ctx, ref_cycle)
74 
75 #define GAMMA_HW_POINTS_NUM 256
76 
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79 
80 static void print_microsec(struct dc_context *dc_ctx,
81 			   struct dc_log_buffer_ctx *log_ctx,
82 			   uint32_t ref_cycle)
83 {
84 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 	static const unsigned int frac = 1000;
86 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87 
88 	DTN_INFO("  %11d.%03d",
89 			us_x10 / frac,
90 			us_x10 % frac);
91 }
92 
93 void dcn10_lock_all_pipes(struct dc *dc,
94 	struct dc_state *context,
95 	bool lock)
96 {
97 	struct pipe_ctx *pipe_ctx;
98 	struct pipe_ctx *old_pipe_ctx;
99 	struct timing_generator *tg;
100 	int i;
101 
102 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 		tg = pipe_ctx->stream_res.tg;
106 
107 		/*
108 		 * Only lock the top pipe's tg to prevent redundant
109 		 * (un)locking. Also skip if pipe is disabled.
110 		 */
111 		if (pipe_ctx->top_pipe ||
112 		    !pipe_ctx->stream ||
113 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 		    !tg->funcs->is_tg_enabled(tg))
115 			continue;
116 
117 		if (lock)
118 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
119 		else
120 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 	}
122 }
123 
124 static void log_mpc_crc(struct dc *dc,
125 	struct dc_log_buffer_ctx *log_ctx)
126 {
127 	struct dc_context *dc_ctx = dc->ctx;
128 	struct dce_hwseq *hws = dc->hwseq;
129 
130 	if (REG(MPC_CRC_RESULT_GB))
131 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
136 }
137 
138 static void dcn10_log_hubbub_state(struct dc *dc,
139 				   struct dc_log_buffer_ctx *log_ctx)
140 {
141 	struct dc_context *dc_ctx = dc->ctx;
142 	struct dcn_hubbub_wm wm;
143 	int i;
144 
145 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
147 
148 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
149 			"         sr_enter          sr_exit  dram_clk_change\n");
150 
151 	for (i = 0; i < 4; i++) {
152 		struct dcn_hubbub_wm_set *s;
153 
154 		s = &wm.sets[i];
155 		DTN_INFO("WM_Set[%d]:", s->wm_set);
156 		DTN_INFO_MICRO_SEC(s->data_urgent);
157 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 		DTN_INFO_MICRO_SEC(s->sr_enter);
159 		DTN_INFO_MICRO_SEC(s->sr_exit);
160 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
161 		DTN_INFO("\n");
162 	}
163 
164 	DTN_INFO("\n");
165 }
166 
167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
168 {
169 	struct dc_context *dc_ctx = dc->ctx;
170 	struct resource_pool *pool = dc->res_pool;
171 	int i;
172 
173 	DTN_INFO(
174 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
175 	for (i = 0; i < pool->pipe_count; i++) {
176 		struct hubp *hubp = pool->hubps[i];
177 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
178 
179 		hubp->funcs->hubp_read_state(hubp);
180 
181 		if (!s->blank_en) {
182 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
183 					hubp->inst,
184 					s->pixel_format,
185 					s->inuse_addr_hi,
186 					s->viewport_width,
187 					s->viewport_height,
188 					s->rotation_angle,
189 					s->h_mirror_en,
190 					s->sw_mode,
191 					s->dcc_en,
192 					s->blank_en,
193 					s->clock_en,
194 					s->ttu_disable,
195 					s->underflow_status);
196 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
199 			DTN_INFO("\n");
200 		}
201 	}
202 
203 	DTN_INFO("\n=========RQ========\n");
204 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
205 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
206 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
207 	for (i = 0; i < pool->pipe_count; i++) {
208 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
210 
211 		if (!s->blank_en)
212 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
213 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
222 	}
223 
224 	DTN_INFO("========DLG========\n");
225 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
226 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
227 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
228 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
229 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
230 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
231 			"  x_rp_dlay  x_rr_sfl\n");
232 	for (i = 0; i < pool->pipe_count; i++) {
233 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
235 
236 		if (!s->blank_en)
237 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
238 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
239 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
240 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 				dlg_regs->xfc_reg_remote_surface_flip_latency);
258 	}
259 
260 	DTN_INFO("========TTU========\n");
261 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
262 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
263 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
264 	for (i = 0; i < pool->pipe_count; i++) {
265 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
267 
268 		if (!s->blank_en)
269 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
270 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
277 	}
278 	DTN_INFO("\n");
279 }
280 
281 void dcn10_log_hw_state(struct dc *dc,
282 	struct dc_log_buffer_ctx *log_ctx)
283 {
284 	struct dc_context *dc_ctx = dc->ctx;
285 	struct resource_pool *pool = dc->res_pool;
286 	int i;
287 
288 	DTN_INFO_BEGIN();
289 
290 	dcn10_log_hubbub_state(dc, log_ctx);
291 
292 	dcn10_log_hubp_states(dc, log_ctx);
293 
294 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
295 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
296 			"C31 C32   C33 C34\n");
297 	for (i = 0; i < pool->pipe_count; i++) {
298 		struct dpp *dpp = pool->dpps[i];
299 		struct dcn_dpp_state s = {0};
300 
301 		dpp->funcs->dpp_read_state(dpp, &s);
302 
303 		if (!s.is_enabled)
304 			continue;
305 
306 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
307 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
308 				dpp->inst,
309 				s.igam_input_format,
310 				(s.igam_lut_mode == 0) ? "BypassFixed" :
311 					((s.igam_lut_mode == 1) ? "BypassFloat" :
312 					((s.igam_lut_mode == 2) ? "RAM" :
313 					((s.igam_lut_mode == 3) ? "RAM" :
314 								 "Unknown"))),
315 				(s.dgam_lut_mode == 0) ? "Bypass" :
316 					((s.dgam_lut_mode == 1) ? "sRGB" :
317 					((s.dgam_lut_mode == 2) ? "Ycc" :
318 					((s.dgam_lut_mode == 3) ? "RAM" :
319 					((s.dgam_lut_mode == 4) ? "RAM" :
320 								 "Unknown")))),
321 				(s.rgam_lut_mode == 0) ? "Bypass" :
322 					((s.rgam_lut_mode == 1) ? "sRGB" :
323 					((s.rgam_lut_mode == 2) ? "Ycc" :
324 					((s.rgam_lut_mode == 3) ? "RAM" :
325 					((s.rgam_lut_mode == 4) ? "RAM" :
326 								 "Unknown")))),
327 				s.gamut_remap_mode,
328 				s.gamut_remap_c11_c12,
329 				s.gamut_remap_c13_c14,
330 				s.gamut_remap_c21_c22,
331 				s.gamut_remap_c23_c24,
332 				s.gamut_remap_c31_c32,
333 				s.gamut_remap_c33_c34);
334 		DTN_INFO("\n");
335 	}
336 	DTN_INFO("\n");
337 
338 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
339 	for (i = 0; i < pool->pipe_count; i++) {
340 		struct mpcc_state s = {0};
341 
342 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
343 		if (s.opp_id != 0xf)
344 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
345 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
347 				s.idle);
348 	}
349 	DTN_INFO("\n");
350 
351 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
352 
353 	for (i = 0; i < pool->timing_generator_count; i++) {
354 		struct timing_generator *tg = pool->timing_generators[i];
355 		struct dcn_otg_state s = {0};
356 		/* Read shared OTG state registers for all DCNx */
357 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
358 
359 		/*
360 		 * For DCN2 and greater, a register on the OPP is used to
361 		 * determine if the CRTC is blanked instead of the OTG. So use
362 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
363 		 *
364 		 * TODO: Implement DCN-specific read_otg_state hooks.
365 		 */
366 		if (pool->opps[i]->funcs->dpg_is_blanked)
367 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
368 		else
369 			s.blank_enabled = tg->funcs->is_blanked(tg);
370 
371 		//only print if OTG master is enabled
372 		if ((s.otg_enabled & 1) == 0)
373 			continue;
374 
375 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
376 				tg->inst,
377 				s.v_blank_start,
378 				s.v_blank_end,
379 				s.v_sync_a_start,
380 				s.v_sync_a_end,
381 				s.v_sync_a_pol,
382 				s.v_total_max,
383 				s.v_total_min,
384 				s.v_total_max_sel,
385 				s.v_total_min_sel,
386 				s.h_blank_start,
387 				s.h_blank_end,
388 				s.h_sync_a_start,
389 				s.h_sync_a_end,
390 				s.h_sync_a_pol,
391 				s.h_total,
392 				s.v_total,
393 				s.underflow_occurred_status,
394 				s.blank_enabled);
395 
396 		// Clear underflow for debug purposes
397 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
399 		// it from here without affecting the original intent.
400 		tg->funcs->clear_optc_underflow(tg);
401 	}
402 	DTN_INFO("\n");
403 
404 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 	// TODO: Update golden log header to reflect this name change
406 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
407 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 		struct display_stream_compressor *dsc = pool->dscs[i];
409 		struct dcn_dsc_state s = {0};
410 
411 		dsc->funcs->dsc_read_state(dsc, &s);
412 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 		dsc->inst,
414 			s.dsc_clock_en,
415 			s.dsc_slice_width,
416 			s.dsc_bits_per_pixel);
417 		DTN_INFO("\n");
418 	}
419 	DTN_INFO("\n");
420 
421 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
422 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
423 	for (i = 0; i < pool->stream_enc_count; i++) {
424 		struct stream_encoder *enc = pool->stream_enc[i];
425 		struct enc_state s = {0};
426 
427 		if (enc->funcs->enc_read_state) {
428 			enc->funcs->enc_read_state(enc, &s);
429 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
430 				enc->id,
431 				s.dsc_mode,
432 				s.sec_gsp_pps_line_num,
433 				s.vbid6_line_reference,
434 				s.vbid6_line_num,
435 				s.sec_gsp_pps_enable,
436 				s.sec_stream_enable);
437 			DTN_INFO("\n");
438 		}
439 	}
440 	DTN_INFO("\n");
441 
442 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
443 	for (i = 0; i < dc->link_count; i++) {
444 		struct link_encoder *lenc = dc->links[i]->link_enc;
445 
446 		struct link_enc_state s = {0};
447 
448 		if (lenc && lenc->funcs->read_state) {
449 			lenc->funcs->read_state(lenc, &s);
450 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
451 				i,
452 				s.dphy_fec_en,
453 				s.dphy_fec_ready_shadow,
454 				s.dphy_fec_active_status,
455 				s.dp_link_training_complete);
456 			DTN_INFO("\n");
457 		}
458 	}
459 	DTN_INFO("\n");
460 
461 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
462 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
463 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
470 
471 	log_mpc_crc(dc, log_ctx);
472 
473 	{
474 		if (pool->hpo_dp_stream_enc_count > 0) {
475 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
476 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
479 
480 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
482 
483 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
484 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 							hpo_dp_se_state.stream_enc_enabled,
486 							hpo_dp_se_state.otg_inst,
487 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 							(hpo_dp_se_state.component_depth == 0) ? 6 :
491 									((hpo_dp_se_state.component_depth == 1) ? 8 :
492 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 							hpo_dp_se_state.vid_stream_enabled,
494 							hpo_dp_se_state.sdp_enabled,
495 							hpo_dp_se_state.compressed_format,
496 							hpo_dp_se_state.mapped_to_link_enc);
497 				}
498 			}
499 
500 			DTN_INFO("\n");
501 		}
502 
503 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 		if (pool->hpo_dp_link_enc_count) {
505 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
506 
507 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
510 
511 				if (hpo_dp_link_enc->funcs->read_state) {
512 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
514 							hpo_dp_link_enc->inst,
515 							hpo_dp_le_state.link_enc_enabled,
516 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 							hpo_dp_le_state.lane_count,
520 							hpo_dp_le_state.stream_src[0],
521 							hpo_dp_le_state.slot_count[0],
522 							hpo_dp_le_state.vc_rate_x[0],
523 							hpo_dp_le_state.vc_rate_y[0]);
524 					DTN_INFO("\n");
525 				}
526 			}
527 
528 			DTN_INFO("\n");
529 		}
530 	}
531 
532 	DTN_INFO_END();
533 }
534 
535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
536 {
537 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
539 
540 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 		tg->funcs->clear_optc_underflow(tg);
542 		return true;
543 	}
544 
545 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 		hubp->funcs->hubp_clear_underflow(hubp);
547 		return true;
548 	}
549 	return false;
550 }
551 
552 void dcn10_enable_power_gating_plane(
553 	struct dce_hwseq *hws,
554 	bool enable)
555 {
556 	bool force_on = true; /* disable power gating */
557 
558 	if (enable)
559 		force_on = false;
560 
561 	/* DCHUBP0/1/2/3 */
562 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
566 
567 	/* DPP0/1/2/3 */
568 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
572 }
573 
574 void dcn10_disable_vga(
575 	struct dce_hwseq *hws)
576 {
577 	unsigned int in_vga1_mode = 0;
578 	unsigned int in_vga2_mode = 0;
579 	unsigned int in_vga3_mode = 0;
580 	unsigned int in_vga4_mode = 0;
581 
582 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
586 
587 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 			in_vga3_mode == 0 && in_vga4_mode == 0)
589 		return;
590 
591 	REG_WRITE(D1VGA_CONTROL, 0);
592 	REG_WRITE(D2VGA_CONTROL, 0);
593 	REG_WRITE(D3VGA_CONTROL, 0);
594 	REG_WRITE(D4VGA_CONTROL, 0);
595 
596 	/* HW Engineer's Notes:
597 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
599 	 *
600 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
602 	 */
603 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 }
606 
607 /**
608  * dcn10_dpp_pg_control - DPP power gate control.
609  *
610  * @hws: dce_hwseq reference.
611  * @dpp_inst: DPP instance reference.
612  * @power_on: true if we want to enable power gate, false otherwise.
613  *
614  * Enable or disable power gate in the specific DPP instance.
615  */
616 void dcn10_dpp_pg_control(
617 		struct dce_hwseq *hws,
618 		unsigned int dpp_inst,
619 		bool power_on)
620 {
621 	uint32_t power_gate = power_on ? 0 : 1;
622 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
623 
624 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
625 		return;
626 	if (REG(DOMAIN1_PG_CONFIG) == 0)
627 		return;
628 
629 	switch (dpp_inst) {
630 	case 0: /* DPP0 */
631 		REG_UPDATE(DOMAIN1_PG_CONFIG,
632 				DOMAIN1_POWER_GATE, power_gate);
633 
634 		REG_WAIT(DOMAIN1_PG_STATUS,
635 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 				1, 1000);
637 		break;
638 	case 1: /* DPP1 */
639 		REG_UPDATE(DOMAIN3_PG_CONFIG,
640 				DOMAIN3_POWER_GATE, power_gate);
641 
642 		REG_WAIT(DOMAIN3_PG_STATUS,
643 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 				1, 1000);
645 		break;
646 	case 2: /* DPP2 */
647 		REG_UPDATE(DOMAIN5_PG_CONFIG,
648 				DOMAIN5_POWER_GATE, power_gate);
649 
650 		REG_WAIT(DOMAIN5_PG_STATUS,
651 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 				1, 1000);
653 		break;
654 	case 3: /* DPP3 */
655 		REG_UPDATE(DOMAIN7_PG_CONFIG,
656 				DOMAIN7_POWER_GATE, power_gate);
657 
658 		REG_WAIT(DOMAIN7_PG_STATUS,
659 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
660 				1, 1000);
661 		break;
662 	default:
663 		BREAK_TO_DEBUGGER();
664 		break;
665 	}
666 }
667 
668 /**
669  * dcn10_hubp_pg_control - HUBP power gate control.
670  *
671  * @hws: dce_hwseq reference.
672  * @hubp_inst: DPP instance reference.
673  * @power_on: true if we want to enable power gate, false otherwise.
674  *
675  * Enable or disable power gate in the specific HUBP instance.
676  */
677 void dcn10_hubp_pg_control(
678 		struct dce_hwseq *hws,
679 		unsigned int hubp_inst,
680 		bool power_on)
681 {
682 	uint32_t power_gate = power_on ? 0 : 1;
683 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684 
685 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
686 		return;
687 	if (REG(DOMAIN0_PG_CONFIG) == 0)
688 		return;
689 
690 	switch (hubp_inst) {
691 	case 0: /* DCHUBP0 */
692 		REG_UPDATE(DOMAIN0_PG_CONFIG,
693 				DOMAIN0_POWER_GATE, power_gate);
694 
695 		REG_WAIT(DOMAIN0_PG_STATUS,
696 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
697 				1, 1000);
698 		break;
699 	case 1: /* DCHUBP1 */
700 		REG_UPDATE(DOMAIN2_PG_CONFIG,
701 				DOMAIN2_POWER_GATE, power_gate);
702 
703 		REG_WAIT(DOMAIN2_PG_STATUS,
704 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
705 				1, 1000);
706 		break;
707 	case 2: /* DCHUBP2 */
708 		REG_UPDATE(DOMAIN4_PG_CONFIG,
709 				DOMAIN4_POWER_GATE, power_gate);
710 
711 		REG_WAIT(DOMAIN4_PG_STATUS,
712 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
713 				1, 1000);
714 		break;
715 	case 3: /* DCHUBP3 */
716 		REG_UPDATE(DOMAIN6_PG_CONFIG,
717 				DOMAIN6_POWER_GATE, power_gate);
718 
719 		REG_WAIT(DOMAIN6_PG_STATUS,
720 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
721 				1, 1000);
722 		break;
723 	default:
724 		BREAK_TO_DEBUGGER();
725 		break;
726 	}
727 }
728 
729 static void power_on_plane(
730 	struct dce_hwseq *hws,
731 	int plane_id)
732 {
733 	DC_LOGGER_INIT(hws->ctx->logger);
734 	if (REG(DC_IP_REQUEST_CNTL)) {
735 		REG_SET(DC_IP_REQUEST_CNTL, 0,
736 				IP_REQUEST_EN, 1);
737 
738 		if (hws->funcs.dpp_pg_control)
739 			hws->funcs.dpp_pg_control(hws, plane_id, true);
740 
741 		if (hws->funcs.hubp_pg_control)
742 			hws->funcs.hubp_pg_control(hws, plane_id, true);
743 
744 		REG_SET(DC_IP_REQUEST_CNTL, 0,
745 				IP_REQUEST_EN, 0);
746 		DC_LOG_DEBUG(
747 				"Un-gated front end for pipe %d\n", plane_id);
748 	}
749 }
750 
751 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
752 {
753 	struct dce_hwseq *hws = dc->hwseq;
754 	struct hubp *hubp = dc->res_pool->hubps[0];
755 
756 	if (!hws->wa_state.DEGVIDCN10_253_applied)
757 		return;
758 
759 	hubp->funcs->set_blank(hubp, true);
760 
761 	REG_SET(DC_IP_REQUEST_CNTL, 0,
762 			IP_REQUEST_EN, 1);
763 
764 	hws->funcs.hubp_pg_control(hws, 0, false);
765 	REG_SET(DC_IP_REQUEST_CNTL, 0,
766 			IP_REQUEST_EN, 0);
767 
768 	hws->wa_state.DEGVIDCN10_253_applied = false;
769 }
770 
771 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
772 {
773 	struct dce_hwseq *hws = dc->hwseq;
774 	struct hubp *hubp = dc->res_pool->hubps[0];
775 	int i;
776 
777 	if (dc->debug.disable_stutter)
778 		return;
779 
780 	if (!hws->wa.DEGVIDCN10_253)
781 		return;
782 
783 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
784 		if (!dc->res_pool->hubps[i]->power_gated)
785 			return;
786 	}
787 
788 	/* all pipe power gated, apply work around to enable stutter. */
789 
790 	REG_SET(DC_IP_REQUEST_CNTL, 0,
791 			IP_REQUEST_EN, 1);
792 
793 	hws->funcs.hubp_pg_control(hws, 0, true);
794 	REG_SET(DC_IP_REQUEST_CNTL, 0,
795 			IP_REQUEST_EN, 0);
796 
797 	hubp->funcs->set_hubp_blank_en(hubp, false);
798 	hws->wa_state.DEGVIDCN10_253_applied = true;
799 }
800 
801 void dcn10_bios_golden_init(struct dc *dc)
802 {
803 	struct dce_hwseq *hws = dc->hwseq;
804 	struct dc_bios *bp = dc->ctx->dc_bios;
805 	int i;
806 	bool allow_self_fresh_force_enable = true;
807 
808 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
809 		return;
810 
811 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
812 		allow_self_fresh_force_enable =
813 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
814 
815 
816 	/* WA for making DF sleep when idle after resume from S0i3.
817 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
818 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
819 	 * before calling command table and it changed to 1 after,
820 	 * it should be set back to 0.
821 	 */
822 
823 	/* initialize dcn global */
824 	bp->funcs->enable_disp_power_gating(bp,
825 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
826 
827 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
828 		/* initialize dcn per pipe */
829 		bp->funcs->enable_disp_power_gating(bp,
830 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
831 	}
832 
833 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
834 		if (allow_self_fresh_force_enable == false &&
835 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
836 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
837 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
838 
839 }
840 
841 static void false_optc_underflow_wa(
842 		struct dc *dc,
843 		const struct dc_stream_state *stream,
844 		struct timing_generator *tg)
845 {
846 	int i;
847 	bool underflow;
848 
849 	if (!dc->hwseq->wa.false_optc_underflow)
850 		return;
851 
852 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
853 
854 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
855 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
856 
857 		if (old_pipe_ctx->stream != stream)
858 			continue;
859 
860 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
861 	}
862 
863 	if (tg->funcs->set_blank_data_double_buffer)
864 		tg->funcs->set_blank_data_double_buffer(tg, true);
865 
866 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
867 		tg->funcs->clear_optc_underflow(tg);
868 }
869 
870 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
871 {
872 	struct pipe_ctx *other_pipe;
873 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
874 
875 	/* Always use the largest vready_offset of all connected pipes */
876 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
877 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
878 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
879 	}
880 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
881 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
882 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
883 	}
884 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
885 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
887 	}
888 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
889 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
891 	}
892 
893 	return vready_offset;
894 }
895 
896 enum dc_status dcn10_enable_stream_timing(
897 		struct pipe_ctx *pipe_ctx,
898 		struct dc_state *context,
899 		struct dc *dc)
900 {
901 	struct dc_stream_state *stream = pipe_ctx->stream;
902 	enum dc_color_space color_space;
903 	struct tg_color black_color = {0};
904 
905 	/* by upper caller loop, pipe0 is parent pipe and be called first.
906 	 * back end is set up by for pipe0. Other children pipe share back end
907 	 * with pipe 0. No program is needed.
908 	 */
909 	if (pipe_ctx->top_pipe != NULL)
910 		return DC_OK;
911 
912 	/* TODO check if timing_changed, disable stream if timing changed */
913 
914 	/* HW program guide assume display already disable
915 	 * by unplug sequence. OTG assume stop.
916 	 */
917 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
918 
919 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
920 			pipe_ctx->clock_source,
921 			&pipe_ctx->stream_res.pix_clk_params,
922 			link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
923 			&pipe_ctx->pll_settings)) {
924 		BREAK_TO_DEBUGGER();
925 		return DC_ERROR_UNEXPECTED;
926 	}
927 
928 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
929 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
930 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
931 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
932 		else
933 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
934 	}
935 
936 	pipe_ctx->stream_res.tg->funcs->program_timing(
937 			pipe_ctx->stream_res.tg,
938 			&stream->timing,
939 			calculate_vready_offset_for_group(pipe_ctx),
940 			pipe_ctx->pipe_dlg_param.vstartup_start,
941 			pipe_ctx->pipe_dlg_param.vupdate_offset,
942 			pipe_ctx->pipe_dlg_param.vupdate_width,
943 			pipe_ctx->stream->signal,
944 			true);
945 
946 #if 0 /* move to after enable_crtc */
947 	/* TODO: OPP FMT, ABM. etc. should be done here. */
948 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
949 
950 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
951 
952 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
953 				pipe_ctx->stream_res.opp,
954 				&stream->bit_depth_params,
955 				&stream->clamping);
956 #endif
957 	/* program otg blank color */
958 	color_space = stream->output_color_space;
959 	color_space_to_black_color(dc, color_space, &black_color);
960 
961 	/*
962 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
963 	 * alternate between Cb and Cr, so both channels need the pixel
964 	 * value for Y
965 	 */
966 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
967 		black_color.color_r_cr = black_color.color_g_y;
968 
969 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
970 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
971 				pipe_ctx->stream_res.tg,
972 				&black_color);
973 
974 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
975 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
976 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
977 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
978 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
979 	}
980 
981 	/* VTG is  within DCHUB command block. DCFCLK is always on */
982 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
983 		BREAK_TO_DEBUGGER();
984 		return DC_ERROR_UNEXPECTED;
985 	}
986 
987 	/* TODO program crtc source select for non-virtual signal*/
988 	/* TODO program FMT */
989 	/* TODO setup link_enc */
990 	/* TODO set stream attributes */
991 	/* TODO program audio */
992 	/* TODO enable stream if timing changed */
993 	/* TODO unblank stream if DP */
994 
995 	return DC_OK;
996 }
997 
998 static void dcn10_reset_back_end_for_pipe(
999 		struct dc *dc,
1000 		struct pipe_ctx *pipe_ctx,
1001 		struct dc_state *context)
1002 {
1003 	int i;
1004 	struct dc_link *link;
1005 	DC_LOGGER_INIT(dc->ctx->logger);
1006 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1007 		pipe_ctx->stream = NULL;
1008 		return;
1009 	}
1010 
1011 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1012 		link = pipe_ctx->stream->link;
1013 		/* DPMS may already disable or */
1014 		/* dpms_off status is incorrect due to fastboot
1015 		 * feature. When system resume from S4 with second
1016 		 * screen only, the dpms_off would be true but
1017 		 * VBIOS lit up eDP, so check link status too.
1018 		 */
1019 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1020 			link_set_dpms_off(pipe_ctx);
1021 		else if (pipe_ctx->stream_res.audio)
1022 			dc->hwss.disable_audio_stream(pipe_ctx);
1023 
1024 		if (pipe_ctx->stream_res.audio) {
1025 			/*disable az_endpoint*/
1026 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1027 
1028 			/*free audio*/
1029 			if (dc->caps.dynamic_audio == true) {
1030 				/*we have to dynamic arbitrate the audio endpoints*/
1031 				/*we free the resource, need reset is_audio_acquired*/
1032 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1033 						pipe_ctx->stream_res.audio, false);
1034 				pipe_ctx->stream_res.audio = NULL;
1035 			}
1036 		}
1037 	}
1038 
1039 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1040 	 * back end share by all pipes and will be disable only when disable
1041 	 * parent pipe.
1042 	 */
1043 	if (pipe_ctx->top_pipe == NULL) {
1044 
1045 		if (pipe_ctx->stream_res.abm)
1046 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1047 
1048 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1049 
1050 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1051 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1052 			pipe_ctx->stream_res.tg->funcs->set_drr(
1053 					pipe_ctx->stream_res.tg, NULL);
1054 		pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1055 	}
1056 
1057 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1058 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1059 			break;
1060 
1061 	if (i == dc->res_pool->pipe_count)
1062 		return;
1063 
1064 	pipe_ctx->stream = NULL;
1065 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1066 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1067 }
1068 
1069 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1070 {
1071 	struct hubp *hubp ;
1072 	unsigned int i;
1073 	bool need_recover = true;
1074 
1075 	if (!dc->debug.recovery_enabled)
1076 		return false;
1077 
1078 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1079 		struct pipe_ctx *pipe_ctx =
1080 			&dc->current_state->res_ctx.pipe_ctx[i];
1081 		if (pipe_ctx != NULL) {
1082 			hubp = pipe_ctx->plane_res.hubp;
1083 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1084 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1085 					/* one pipe underflow, we will reset all the pipes*/
1086 					need_recover = true;
1087 				}
1088 			}
1089 		}
1090 	}
1091 	if (!need_recover)
1092 		return false;
1093 	/*
1094 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1095 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1096 	DCHUBP_CNTL:HUBP_DISABLE=1
1097 	DCHUBP_CNTL:HUBP_DISABLE=0
1098 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1099 	DCSURF_PRIMARY_SURFACE_ADDRESS
1100 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1101 	*/
1102 
1103 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1104 		struct pipe_ctx *pipe_ctx =
1105 			&dc->current_state->res_ctx.pipe_ctx[i];
1106 		if (pipe_ctx != NULL) {
1107 			hubp = pipe_ctx->plane_res.hubp;
1108 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1109 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1110 				hubp->funcs->set_hubp_blank_en(hubp, true);
1111 		}
1112 	}
1113 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1114 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1115 
1116 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1117 		struct pipe_ctx *pipe_ctx =
1118 			&dc->current_state->res_ctx.pipe_ctx[i];
1119 		if (pipe_ctx != NULL) {
1120 			hubp = pipe_ctx->plane_res.hubp;
1121 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1122 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1123 				hubp->funcs->hubp_disable_control(hubp, true);
1124 		}
1125 	}
1126 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1127 		struct pipe_ctx *pipe_ctx =
1128 			&dc->current_state->res_ctx.pipe_ctx[i];
1129 		if (pipe_ctx != NULL) {
1130 			hubp = pipe_ctx->plane_res.hubp;
1131 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1132 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1133 				hubp->funcs->hubp_disable_control(hubp, true);
1134 		}
1135 	}
1136 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1137 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1138 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1139 		struct pipe_ctx *pipe_ctx =
1140 			&dc->current_state->res_ctx.pipe_ctx[i];
1141 		if (pipe_ctx != NULL) {
1142 			hubp = pipe_ctx->plane_res.hubp;
1143 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1144 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1145 				hubp->funcs->set_hubp_blank_en(hubp, true);
1146 		}
1147 	}
1148 	return true;
1149 
1150 }
1151 
1152 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1153 {
1154 	struct hubbub *hubbub = dc->res_pool->hubbub;
1155 	static bool should_log_hw_state; /* prevent hw state log by default */
1156 
1157 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1158 		return;
1159 
1160 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1161 		int i = 0;
1162 
1163 		if (should_log_hw_state)
1164 			dcn10_log_hw_state(dc, NULL);
1165 
1166 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1167 		BREAK_TO_DEBUGGER();
1168 		if (dcn10_hw_wa_force_recovery(dc)) {
1169 			/*check again*/
1170 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1171 				BREAK_TO_DEBUGGER();
1172 		}
1173 	}
1174 }
1175 
1176 /* trigger HW to start disconnect plane from stream on the next vsync */
1177 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1178 {
1179 	struct dce_hwseq *hws = dc->hwseq;
1180 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1181 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1182 	struct mpc *mpc = dc->res_pool->mpc;
1183 	struct mpc_tree *mpc_tree_params;
1184 	struct mpcc *mpcc_to_remove = NULL;
1185 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1186 
1187 	mpc_tree_params = &(opp->mpc_tree_params);
1188 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1189 
1190 	/*Already reset*/
1191 	if (mpcc_to_remove == NULL)
1192 		return;
1193 
1194 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1195 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1196 	// so don't wait for MPCC_IDLE in the programming sequence
1197 	if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1198 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1199 
1200 	dc->optimized_required = true;
1201 
1202 	if (hubp->funcs->hubp_disconnect)
1203 		hubp->funcs->hubp_disconnect(hubp);
1204 
1205 	if (dc->debug.sanity_checks)
1206 		hws->funcs.verify_allow_pstate_change_high(dc);
1207 }
1208 
1209 /**
1210  * dcn10_plane_atomic_power_down - Power down plane components.
1211  *
1212  * @dc: dc struct reference. used for grab hwseq.
1213  * @dpp: dpp struct reference.
1214  * @hubp: hubp struct reference.
1215  *
1216  * Keep in mind that this operation requires a power gate configuration;
1217  * however, requests for switch power gate are precisely controlled to avoid
1218  * problems. For this reason, power gate request is usually disabled. This
1219  * function first needs to enable the power gate request before disabling DPP
1220  * and HUBP. Finally, it disables the power gate request again.
1221  */
1222 void dcn10_plane_atomic_power_down(struct dc *dc,
1223 		struct dpp *dpp,
1224 		struct hubp *hubp)
1225 {
1226 	struct dce_hwseq *hws = dc->hwseq;
1227 	DC_LOGGER_INIT(dc->ctx->logger);
1228 
1229 	if (REG(DC_IP_REQUEST_CNTL)) {
1230 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1231 				IP_REQUEST_EN, 1);
1232 
1233 		if (hws->funcs.dpp_pg_control)
1234 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1235 
1236 		if (hws->funcs.hubp_pg_control)
1237 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1238 
1239 		dpp->funcs->dpp_reset(dpp);
1240 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1241 				IP_REQUEST_EN, 0);
1242 		DC_LOG_DEBUG(
1243 				"Power gated front end %d\n", hubp->inst);
1244 	}
1245 }
1246 
1247 /* disable HW used by plane.
1248  * note:  cannot disable until disconnect is complete
1249  */
1250 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1251 {
1252 	struct dce_hwseq *hws = dc->hwseq;
1253 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1254 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1255 	int opp_id = hubp->opp_id;
1256 
1257 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1258 
1259 	hubp->funcs->hubp_clk_cntl(hubp, false);
1260 
1261 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1262 
1263 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1264 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1265 				pipe_ctx->stream_res.opp,
1266 				false);
1267 
1268 	hubp->power_gated = true;
1269 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1270 
1271 	hws->funcs.plane_atomic_power_down(dc,
1272 			pipe_ctx->plane_res.dpp,
1273 			pipe_ctx->plane_res.hubp);
1274 
1275 	pipe_ctx->stream = NULL;
1276 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1277 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1278 	pipe_ctx->top_pipe = NULL;
1279 	pipe_ctx->bottom_pipe = NULL;
1280 	pipe_ctx->plane_state = NULL;
1281 }
1282 
1283 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1284 {
1285 	struct dce_hwseq *hws = dc->hwseq;
1286 	DC_LOGGER_INIT(dc->ctx->logger);
1287 
1288 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1289 		return;
1290 
1291 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1292 
1293 	apply_DEGVIDCN10_253_wa(dc);
1294 
1295 	DC_LOG_DC("Power down front end %d\n",
1296 					pipe_ctx->pipe_idx);
1297 }
1298 
1299 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1300 {
1301 	int i;
1302 	struct dce_hwseq *hws = dc->hwseq;
1303 	struct hubbub *hubbub = dc->res_pool->hubbub;
1304 	bool can_apply_seamless_boot = false;
1305 
1306 	for (i = 0; i < context->stream_count; i++) {
1307 		if (context->streams[i]->apply_seamless_boot_optimization) {
1308 			can_apply_seamless_boot = true;
1309 			break;
1310 		}
1311 	}
1312 
1313 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1314 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1315 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1316 
1317 		/* There is assumption that pipe_ctx is not mapping irregularly
1318 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1319 		 * we will use the pipe, so don't disable
1320 		 */
1321 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1322 			continue;
1323 
1324 		/* Blank controller using driver code instead of
1325 		 * command table.
1326 		 */
1327 		if (tg->funcs->is_tg_enabled(tg)) {
1328 			if (hws->funcs.init_blank != NULL) {
1329 				hws->funcs.init_blank(dc, tg);
1330 				tg->funcs->lock(tg);
1331 			} else {
1332 				tg->funcs->lock(tg);
1333 				tg->funcs->set_blank(tg, true);
1334 				hwss_wait_for_blank_complete(tg);
1335 			}
1336 		}
1337 	}
1338 
1339 	/* Reset det size */
1340 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1341 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1342 		struct hubp *hubp = dc->res_pool->hubps[i];
1343 
1344 		/* Do not need to reset for seamless boot */
1345 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1346 			continue;
1347 
1348 		if (hubbub && hubp) {
1349 			if (hubbub->funcs->program_det_size)
1350 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1351 		}
1352 	}
1353 
1354 	/* num_opp will be equal to number of mpcc */
1355 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1356 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1357 
1358 		/* Cannot reset the MPC mux if seamless boot */
1359 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1360 			continue;
1361 
1362 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1363 				dc->res_pool->mpc, i);
1364 	}
1365 
1366 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1367 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1368 		struct hubp *hubp = dc->res_pool->hubps[i];
1369 		struct dpp *dpp = dc->res_pool->dpps[i];
1370 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1371 
1372 		/* There is assumption that pipe_ctx is not mapping irregularly
1373 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1374 		 * we will use the pipe, so don't disable
1375 		 */
1376 		if (can_apply_seamless_boot &&
1377 			pipe_ctx->stream != NULL &&
1378 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1379 				pipe_ctx->stream_res.tg)) {
1380 			// Enable double buffering for OTG_BLANK no matter if
1381 			// seamless boot is enabled or not to suppress global sync
1382 			// signals when OTG blanked. This is to prevent pipe from
1383 			// requesting data while in PSR.
1384 			tg->funcs->tg_init(tg);
1385 			hubp->power_gated = true;
1386 			continue;
1387 		}
1388 
1389 		/* Disable on the current state so the new one isn't cleared. */
1390 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1391 
1392 		dpp->funcs->dpp_reset(dpp);
1393 
1394 		pipe_ctx->stream_res.tg = tg;
1395 		pipe_ctx->pipe_idx = i;
1396 
1397 		pipe_ctx->plane_res.hubp = hubp;
1398 		pipe_ctx->plane_res.dpp = dpp;
1399 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1400 		hubp->mpcc_id = dpp->inst;
1401 		hubp->opp_id = OPP_ID_INVALID;
1402 		hubp->power_gated = false;
1403 
1404 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1405 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1406 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1407 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1408 
1409 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1410 
1411 		if (tg->funcs->is_tg_enabled(tg))
1412 			tg->funcs->unlock(tg);
1413 
1414 		dc->hwss.disable_plane(dc, pipe_ctx);
1415 
1416 		pipe_ctx->stream_res.tg = NULL;
1417 		pipe_ctx->plane_res.hubp = NULL;
1418 
1419 		if (tg->funcs->is_tg_enabled(tg)) {
1420 			if (tg->funcs->init_odm)
1421 				tg->funcs->init_odm(tg);
1422 		}
1423 
1424 		tg->funcs->tg_init(tg);
1425 	}
1426 
1427 	/* Power gate DSCs */
1428 	if (hws->funcs.dsc_pg_control != NULL) {
1429 		uint32_t num_opps = 0;
1430 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1431 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1432 
1433 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1434 		// We can't use res_pool->res_cap->num_timing_generator to check
1435 		// Because it records display pipes default setting built in driver,
1436 		// not display pipes of the current chip.
1437 		// Some ASICs would be fused display pipes less than the default setting.
1438 		// In dcnxx_resource_construct function, driver would obatin real information.
1439 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1440 			uint32_t optc_dsc_state = 0;
1441 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1442 
1443 			if (tg->funcs->is_tg_enabled(tg)) {
1444 				if (tg->funcs->get_dsc_status)
1445 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1446 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1447 				// non-zero value is DSC enabled
1448 				if (optc_dsc_state != 0) {
1449 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1450 					break;
1451 				}
1452 			}
1453 		}
1454 
1455 		// Step 2: To power down DSC but skip DSC  of running OPTC
1456 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1457 			struct dcn_dsc_state s  = {0};
1458 
1459 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1460 
1461 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1462 				s.dsc_clock_en && s.dsc_fw_en)
1463 				continue;
1464 
1465 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1466 		}
1467 	}
1468 }
1469 
1470 void dcn10_init_hw(struct dc *dc)
1471 {
1472 	int i;
1473 	struct abm *abm = dc->res_pool->abm;
1474 	struct dmcu *dmcu = dc->res_pool->dmcu;
1475 	struct dce_hwseq *hws = dc->hwseq;
1476 	struct dc_bios *dcb = dc->ctx->dc_bios;
1477 	struct resource_pool *res_pool = dc->res_pool;
1478 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1479 	bool   is_optimized_init_done = false;
1480 
1481 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1482 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1483 
1484 	/* Align bw context with hw config when system resume. */
1485 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1486 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1487 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1488 	}
1489 
1490 	// Initialize the dccg
1491 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1492 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1493 
1494 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1495 
1496 		REG_WRITE(REFCLK_CNTL, 0);
1497 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1498 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1499 
1500 		if (!dc->debug.disable_clock_gate) {
1501 			/* enable all DCN clock gating */
1502 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1503 
1504 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1505 
1506 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1507 		}
1508 
1509 		//Enable ability to power gate / don't force power on permanently
1510 		if (hws->funcs.enable_power_gating_plane)
1511 			hws->funcs.enable_power_gating_plane(hws, true);
1512 
1513 		return;
1514 	}
1515 
1516 	if (!dcb->funcs->is_accelerated_mode(dcb))
1517 		hws->funcs.disable_vga(dc->hwseq);
1518 
1519 	hws->funcs.bios_golden_init(dc);
1520 
1521 	if (dc->ctx->dc_bios->fw_info_valid) {
1522 		res_pool->ref_clocks.xtalin_clock_inKhz =
1523 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1524 
1525 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1526 			if (res_pool->dccg && res_pool->hubbub) {
1527 
1528 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1529 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1530 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1531 
1532 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1533 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1534 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1535 			} else {
1536 				// Not all ASICs have DCCG sw component
1537 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1538 						res_pool->ref_clocks.xtalin_clock_inKhz;
1539 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1540 						res_pool->ref_clocks.xtalin_clock_inKhz;
1541 			}
1542 		}
1543 	} else
1544 		ASSERT_CRITICAL(false);
1545 
1546 	for (i = 0; i < dc->link_count; i++) {
1547 		/* Power up AND update implementation according to the
1548 		 * required signal (which may be different from the
1549 		 * default signal on connector).
1550 		 */
1551 		struct dc_link *link = dc->links[i];
1552 
1553 		if (!is_optimized_init_done)
1554 			link->link_enc->funcs->hw_init(link->link_enc);
1555 
1556 		/* Check for enabled DIG to identify enabled display */
1557 		if (link->link_enc->funcs->is_dig_enabled &&
1558 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1559 			link->link_status.link_active = true;
1560 			if (link->link_enc->funcs->fec_is_active &&
1561 					link->link_enc->funcs->fec_is_active(link->link_enc))
1562 				link->fec_state = dc_link_fec_enabled;
1563 		}
1564 	}
1565 
1566 	/* we want to turn off all dp displays before doing detection */
1567 	link_blank_all_dp_displays(dc);
1568 
1569 	if (hws->funcs.enable_power_gating_plane)
1570 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1571 
1572 	/* If taking control over from VBIOS, we may want to optimize our first
1573 	 * mode set, so we need to skip powering down pipes until we know which
1574 	 * pipes we want to use.
1575 	 * Otherwise, if taking control is not possible, we need to power
1576 	 * everything down.
1577 	 */
1578 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1579 		if (!is_optimized_init_done) {
1580 			hws->funcs.init_pipes(dc, dc->current_state);
1581 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1582 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1583 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1584 		}
1585 	}
1586 
1587 	if (!is_optimized_init_done) {
1588 
1589 		for (i = 0; i < res_pool->audio_count; i++) {
1590 			struct audio *audio = res_pool->audios[i];
1591 
1592 			audio->funcs->hw_init(audio);
1593 		}
1594 
1595 		for (i = 0; i < dc->link_count; i++) {
1596 			struct dc_link *link = dc->links[i];
1597 
1598 			if (link->panel_cntl)
1599 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1600 		}
1601 
1602 		if (abm != NULL)
1603 			abm->funcs->abm_init(abm, backlight);
1604 
1605 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1606 			dmcu->funcs->dmcu_init(dmcu);
1607 	}
1608 
1609 	if (abm != NULL && dmcu != NULL)
1610 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1611 
1612 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1613 	if (!is_optimized_init_done)
1614 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1615 
1616 	if (!dc->debug.disable_clock_gate) {
1617 		/* enable all DCN clock gating */
1618 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1619 
1620 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1621 
1622 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1623 	}
1624 
1625 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1626 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1627 }
1628 
1629 /* In headless boot cases, DIG may be turned
1630  * on which causes HW/SW discrepancies.
1631  * To avoid this, power down hardware on boot
1632  * if DIG is turned on
1633  */
1634 void dcn10_power_down_on_boot(struct dc *dc)
1635 {
1636 	struct dc_link *edp_links[MAX_NUM_EDP];
1637 	struct dc_link *edp_link = NULL;
1638 	int edp_num;
1639 	int i = 0;
1640 
1641 	get_edp_links(dc, edp_links, &edp_num);
1642 	if (edp_num)
1643 		edp_link = edp_links[0];
1644 
1645 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1646 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1647 			dc->hwseq->funcs.edp_backlight_control &&
1648 			dc->hwss.power_down &&
1649 			dc->hwss.edp_power_control) {
1650 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1651 		dc->hwss.power_down(dc);
1652 		dc->hwss.edp_power_control(edp_link, false);
1653 	} else {
1654 		for (i = 0; i < dc->link_count; i++) {
1655 			struct dc_link *link = dc->links[i];
1656 
1657 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1658 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1659 					dc->hwss.power_down) {
1660 				dc->hwss.power_down(dc);
1661 				break;
1662 			}
1663 
1664 		}
1665 	}
1666 
1667 	/*
1668 	 * Call update_clocks with empty context
1669 	 * to send DISPLAY_OFF
1670 	 * Otherwise DISPLAY_OFF may not be asserted
1671 	 */
1672 	if (dc->clk_mgr->funcs->set_low_power_state)
1673 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1674 }
1675 
1676 void dcn10_reset_hw_ctx_wrap(
1677 		struct dc *dc,
1678 		struct dc_state *context)
1679 {
1680 	int i;
1681 	struct dce_hwseq *hws = dc->hwseq;
1682 
1683 	/* Reset Back End*/
1684 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1685 		struct pipe_ctx *pipe_ctx_old =
1686 			&dc->current_state->res_ctx.pipe_ctx[i];
1687 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1688 
1689 		if (!pipe_ctx_old->stream)
1690 			continue;
1691 
1692 		if (pipe_ctx_old->top_pipe)
1693 			continue;
1694 
1695 		if (!pipe_ctx->stream ||
1696 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1697 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1698 
1699 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1700 			if (hws->funcs.enable_stream_gating)
1701 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1702 			if (old_clk)
1703 				old_clk->funcs->cs_power_down(old_clk);
1704 		}
1705 	}
1706 }
1707 
1708 static bool patch_address_for_sbs_tb_stereo(
1709 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1710 {
1711 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1712 	bool sec_split = pipe_ctx->top_pipe &&
1713 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1714 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1715 		(pipe_ctx->stream->timing.timing_3d_format ==
1716 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1717 		 pipe_ctx->stream->timing.timing_3d_format ==
1718 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1719 		*addr = plane_state->address.grph_stereo.left_addr;
1720 		plane_state->address.grph_stereo.left_addr =
1721 		plane_state->address.grph_stereo.right_addr;
1722 		return true;
1723 	} else {
1724 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1725 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1726 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1727 			plane_state->address.grph_stereo.right_addr =
1728 			plane_state->address.grph_stereo.left_addr;
1729 			plane_state->address.grph_stereo.right_meta_addr =
1730 			plane_state->address.grph_stereo.left_meta_addr;
1731 		}
1732 	}
1733 	return false;
1734 }
1735 
1736 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1737 {
1738 	bool addr_patched = false;
1739 	PHYSICAL_ADDRESS_LOC addr;
1740 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1741 
1742 	if (plane_state == NULL)
1743 		return;
1744 
1745 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1746 
1747 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1748 			pipe_ctx->plane_res.hubp,
1749 			&plane_state->address,
1750 			plane_state->flip_immediate);
1751 
1752 	plane_state->status.requested_address = plane_state->address;
1753 
1754 	if (plane_state->flip_immediate)
1755 		plane_state->status.current_address = plane_state->address;
1756 
1757 	if (addr_patched)
1758 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1759 }
1760 
1761 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1762 			const struct dc_plane_state *plane_state)
1763 {
1764 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1765 	const struct dc_transfer_func *tf = NULL;
1766 	bool result = true;
1767 
1768 	if (dpp_base == NULL)
1769 		return false;
1770 
1771 	if (plane_state->in_transfer_func)
1772 		tf = plane_state->in_transfer_func;
1773 
1774 	if (plane_state->gamma_correction &&
1775 		!dpp_base->ctx->dc->debug.always_use_regamma
1776 		&& !plane_state->gamma_correction->is_identity
1777 			&& dce_use_lut(plane_state->format))
1778 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1779 
1780 	if (tf == NULL)
1781 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1782 	else if (tf->type == TF_TYPE_PREDEFINED) {
1783 		switch (tf->tf) {
1784 		case TRANSFER_FUNCTION_SRGB:
1785 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1786 			break;
1787 		case TRANSFER_FUNCTION_BT709:
1788 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1789 			break;
1790 		case TRANSFER_FUNCTION_LINEAR:
1791 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1792 			break;
1793 		case TRANSFER_FUNCTION_PQ:
1794 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1795 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1796 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1797 			result = true;
1798 			break;
1799 		default:
1800 			result = false;
1801 			break;
1802 		}
1803 	} else if (tf->type == TF_TYPE_BYPASS) {
1804 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1805 	} else {
1806 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1807 					&dpp_base->degamma_params);
1808 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1809 				&dpp_base->degamma_params);
1810 		result = true;
1811 	}
1812 
1813 	return result;
1814 }
1815 
1816 #define MAX_NUM_HW_POINTS 0x200
1817 
1818 static void log_tf(struct dc_context *ctx,
1819 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1820 {
1821 	// DC_LOG_GAMMA is default logging of all hw points
1822 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1823 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1824 	int i = 0;
1825 
1826 	DC_LOGGER_INIT(ctx->logger);
1827 	DC_LOG_GAMMA("Gamma Correction TF");
1828 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1829 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1830 
1831 	for (i = 0; i < hw_points_num; i++) {
1832 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1833 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1834 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1835 	}
1836 
1837 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1838 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1839 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1840 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1841 	}
1842 }
1843 
1844 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1845 				const struct dc_stream_state *stream)
1846 {
1847 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1848 
1849 	if (dpp == NULL)
1850 		return false;
1851 
1852 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1853 
1854 	if (stream->out_transfer_func &&
1855 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1856 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1857 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1858 
1859 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1860 	 * update.
1861 	 */
1862 	else if (cm_helper_translate_curve_to_hw_format(
1863 			stream->out_transfer_func,
1864 			&dpp->regamma_params, false)) {
1865 		dpp->funcs->dpp_program_regamma_pwl(
1866 				dpp,
1867 				&dpp->regamma_params, OPP_REGAMMA_USER);
1868 	} else
1869 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1870 
1871 	if (stream != NULL && stream->ctx != NULL &&
1872 			stream->out_transfer_func != NULL) {
1873 		log_tf(stream->ctx,
1874 				stream->out_transfer_func,
1875 				dpp->regamma_params.hw_points_num);
1876 	}
1877 
1878 	return true;
1879 }
1880 
1881 void dcn10_pipe_control_lock(
1882 	struct dc *dc,
1883 	struct pipe_ctx *pipe,
1884 	bool lock)
1885 {
1886 	struct dce_hwseq *hws = dc->hwseq;
1887 
1888 	/* use TG master update lock to lock everything on the TG
1889 	 * therefore only top pipe need to lock
1890 	 */
1891 	if (!pipe || pipe->top_pipe)
1892 		return;
1893 
1894 	if (dc->debug.sanity_checks)
1895 		hws->funcs.verify_allow_pstate_change_high(dc);
1896 
1897 	if (lock)
1898 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1899 	else
1900 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1901 
1902 	if (dc->debug.sanity_checks)
1903 		hws->funcs.verify_allow_pstate_change_high(dc);
1904 }
1905 
1906 /**
1907  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1908  *
1909  * Software keepout workaround to prevent cursor update locking from stalling
1910  * out cursor updates indefinitely or from old values from being retained in
1911  * the case where the viewport changes in the same frame as the cursor.
1912  *
1913  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1914  * too close to VUPDATE, then stall out until VUPDATE finishes.
1915  *
1916  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1917  *       to avoid the need for this workaround.
1918  */
1919 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1920 {
1921 	struct dc_stream_state *stream = pipe_ctx->stream;
1922 	struct crtc_position position;
1923 	uint32_t vupdate_start, vupdate_end;
1924 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1925 	unsigned int us_per_line, us_vupdate;
1926 
1927 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1928 		return;
1929 
1930 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1931 		return;
1932 
1933 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1934 				       &vupdate_end);
1935 
1936 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1937 	vpos = position.vertical_count;
1938 
1939 	/* Avoid wraparound calculation issues */
1940 	vupdate_start += stream->timing.v_total;
1941 	vupdate_end += stream->timing.v_total;
1942 	vpos += stream->timing.v_total;
1943 
1944 	if (vpos <= vupdate_start) {
1945 		/* VPOS is in VACTIVE or back porch. */
1946 		lines_to_vupdate = vupdate_start - vpos;
1947 	} else if (vpos > vupdate_end) {
1948 		/* VPOS is in the front porch. */
1949 		return;
1950 	} else {
1951 		/* VPOS is in VUPDATE. */
1952 		lines_to_vupdate = 0;
1953 	}
1954 
1955 	/* Calculate time until VUPDATE in microseconds. */
1956 	us_per_line =
1957 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1958 	us_to_vupdate = lines_to_vupdate * us_per_line;
1959 
1960 	/* 70 us is a conservative estimate of cursor update time*/
1961 	if (us_to_vupdate > 70)
1962 		return;
1963 
1964 	/* Stall out until the cursor update completes. */
1965 	if (vupdate_end < vupdate_start)
1966 		vupdate_end += stream->timing.v_total;
1967 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1968 	udelay(us_to_vupdate + us_vupdate);
1969 }
1970 
1971 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1972 {
1973 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1974 	if (!pipe || pipe->top_pipe)
1975 		return;
1976 
1977 	/* Prevent cursor lock from stalling out cursor updates. */
1978 	if (lock)
1979 		delay_cursor_until_vupdate(dc, pipe);
1980 
1981 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1982 		union dmub_hw_lock_flags hw_locks = { 0 };
1983 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1984 
1985 		hw_locks.bits.lock_cursor = 1;
1986 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1987 
1988 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1989 					lock,
1990 					&hw_locks,
1991 					&inst_flags);
1992 	} else
1993 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1994 				pipe->stream_res.opp->inst, lock);
1995 }
1996 
1997 static bool wait_for_reset_trigger_to_occur(
1998 	struct dc_context *dc_ctx,
1999 	struct timing_generator *tg)
2000 {
2001 	bool rc = false;
2002 
2003 	/* To avoid endless loop we wait at most
2004 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2005 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2006 	int i;
2007 
2008 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2009 
2010 		if (!tg->funcs->is_counter_moving(tg)) {
2011 			DC_ERROR("TG counter is not moving!\n");
2012 			break;
2013 		}
2014 
2015 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2016 			rc = true;
2017 			/* usually occurs at i=1 */
2018 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2019 					i);
2020 			break;
2021 		}
2022 
2023 		/* Wait for one frame. */
2024 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2025 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2026 	}
2027 
2028 	if (false == rc)
2029 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2030 
2031 	return rc;
2032 }
2033 
2034 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2035 				      uint64_t *denominator,
2036 				      bool checkUint32Bounary)
2037 {
2038 	int i;
2039 	bool ret = checkUint32Bounary == false;
2040 	uint64_t max_int32 = 0xffffffff;
2041 	uint64_t num, denom;
2042 	static const uint16_t prime_numbers[] = {
2043 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2044 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2045 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2046 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2047 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2048 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2049 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2050 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2051 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2052 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2053 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2054 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2055 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2056 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2057 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2058 	int count = ARRAY_SIZE(prime_numbers);
2059 
2060 	num = *numerator;
2061 	denom = *denominator;
2062 	for (i = 0; i < count; i++) {
2063 		uint32_t num_remainder, denom_remainder;
2064 		uint64_t num_result, denom_result;
2065 		if (checkUint32Bounary &&
2066 			num <= max_int32 && denom <= max_int32) {
2067 			ret = true;
2068 			break;
2069 		}
2070 		do {
2071 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2072 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2073 			if (num_remainder == 0 && denom_remainder == 0) {
2074 				num = num_result;
2075 				denom = denom_result;
2076 			}
2077 		} while (num_remainder == 0 && denom_remainder == 0);
2078 	}
2079 	*numerator = num;
2080 	*denominator = denom;
2081 	return ret;
2082 }
2083 
2084 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2085 {
2086 	uint32_t master_pipe_refresh_rate =
2087 		pipe->stream->timing.pix_clk_100hz * 100 /
2088 		pipe->stream->timing.h_total /
2089 		pipe->stream->timing.v_total;
2090 	return master_pipe_refresh_rate <= 30;
2091 }
2092 
2093 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2094 				 bool account_low_refresh_rate)
2095 {
2096 	uint32_t clock_divider = 1;
2097 	uint32_t numpipes = 1;
2098 
2099 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2100 		clock_divider *= 2;
2101 
2102 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2103 		clock_divider *= 2;
2104 
2105 	while (pipe->next_odm_pipe) {
2106 		pipe = pipe->next_odm_pipe;
2107 		numpipes++;
2108 	}
2109 	clock_divider *= numpipes;
2110 
2111 	return clock_divider;
2112 }
2113 
2114 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2115 				    struct pipe_ctx *grouped_pipes[])
2116 {
2117 	struct dc_context *dc_ctx = dc->ctx;
2118 	int i, master = -1, embedded = -1;
2119 	struct dc_crtc_timing *hw_crtc_timing;
2120 	uint64_t phase[MAX_PIPES];
2121 	uint64_t modulo[MAX_PIPES];
2122 	unsigned int pclk;
2123 
2124 	uint32_t embedded_pix_clk_100hz;
2125 	uint16_t embedded_h_total;
2126 	uint16_t embedded_v_total;
2127 	uint32_t dp_ref_clk_100hz =
2128 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2129 
2130 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2131 	if (!hw_crtc_timing)
2132 		return master;
2133 
2134 	if (dc->config.vblank_alignment_dto_params &&
2135 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2136 		embedded_h_total =
2137 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2138 		embedded_v_total =
2139 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2140 		embedded_pix_clk_100hz =
2141 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2142 
2143 		for (i = 0; i < group_size; i++) {
2144 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2145 					grouped_pipes[i]->stream_res.tg,
2146 					&hw_crtc_timing[i]);
2147 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2148 				dc->res_pool->dp_clock_source,
2149 				grouped_pipes[i]->stream_res.tg->inst,
2150 				&pclk);
2151 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2152 			if (dc_is_embedded_signal(
2153 					grouped_pipes[i]->stream->signal)) {
2154 				embedded = i;
2155 				master = i;
2156 				phase[i] = embedded_pix_clk_100hz*100;
2157 				modulo[i] = dp_ref_clk_100hz*100;
2158 			} else {
2159 
2160 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2161 					hw_crtc_timing[i].h_total*
2162 					hw_crtc_timing[i].v_total;
2163 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2164 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2165 					embedded_h_total*
2166 					embedded_v_total;
2167 
2168 				if (reduceSizeAndFraction(&phase[i],
2169 						&modulo[i], true) == false) {
2170 					/*
2171 					 * this will help to stop reporting
2172 					 * this timing synchronizable
2173 					 */
2174 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2175 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2176 				}
2177 			}
2178 		}
2179 
2180 		for (i = 0; i < group_size; i++) {
2181 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2182 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2183 					dc->res_pool->dp_clock_source,
2184 					grouped_pipes[i]->stream_res.tg->inst,
2185 					phase[i], modulo[i]);
2186 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2187 					dc->res_pool->dp_clock_source,
2188 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2189 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2190 					pclk*get_clock_divider(grouped_pipes[i], false);
2191 				if (master == -1)
2192 					master = i;
2193 			}
2194 		}
2195 
2196 	}
2197 
2198 	kfree(hw_crtc_timing);
2199 	return master;
2200 }
2201 
2202 void dcn10_enable_vblanks_synchronization(
2203 	struct dc *dc,
2204 	int group_index,
2205 	int group_size,
2206 	struct pipe_ctx *grouped_pipes[])
2207 {
2208 	struct dc_context *dc_ctx = dc->ctx;
2209 	struct output_pixel_processor *opp;
2210 	struct timing_generator *tg;
2211 	int i, width, height, master;
2212 
2213 	for (i = 1; i < group_size; i++) {
2214 		opp = grouped_pipes[i]->stream_res.opp;
2215 		tg = grouped_pipes[i]->stream_res.tg;
2216 		tg->funcs->get_otg_active_size(tg, &width, &height);
2217 
2218 		if (!tg->funcs->is_tg_enabled(tg)) {
2219 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2220 			return;
2221 		}
2222 
2223 		if (opp->funcs->opp_program_dpg_dimensions)
2224 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2225 	}
2226 
2227 	for (i = 0; i < group_size; i++) {
2228 		if (grouped_pipes[i]->stream == NULL)
2229 			continue;
2230 		grouped_pipes[i]->stream->vblank_synchronized = false;
2231 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2232 	}
2233 
2234 	DC_SYNC_INFO("Aligning DP DTOs\n");
2235 
2236 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2237 
2238 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2239 
2240 	if (master >= 0) {
2241 		for (i = 0; i < group_size; i++) {
2242 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2243 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2244 					grouped_pipes[master]->stream_res.tg,
2245 					grouped_pipes[i]->stream_res.tg,
2246 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2247 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2248 					get_clock_divider(grouped_pipes[master], false),
2249 					get_clock_divider(grouped_pipes[i], false));
2250 			grouped_pipes[i]->stream->vblank_synchronized = true;
2251 		}
2252 		grouped_pipes[master]->stream->vblank_synchronized = true;
2253 		DC_SYNC_INFO("Sync complete\n");
2254 	}
2255 
2256 	for (i = 1; i < group_size; i++) {
2257 		opp = grouped_pipes[i]->stream_res.opp;
2258 		tg = grouped_pipes[i]->stream_res.tg;
2259 		tg->funcs->get_otg_active_size(tg, &width, &height);
2260 		if (opp->funcs->opp_program_dpg_dimensions)
2261 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2262 	}
2263 }
2264 
2265 void dcn10_enable_timing_synchronization(
2266 	struct dc *dc,
2267 	int group_index,
2268 	int group_size,
2269 	struct pipe_ctx *grouped_pipes[])
2270 {
2271 	struct dc_context *dc_ctx = dc->ctx;
2272 	struct output_pixel_processor *opp;
2273 	struct timing_generator *tg;
2274 	int i, width, height;
2275 
2276 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2277 
2278 	for (i = 1; i < group_size; i++) {
2279 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2280 			continue;
2281 
2282 		opp = grouped_pipes[i]->stream_res.opp;
2283 		tg = grouped_pipes[i]->stream_res.tg;
2284 		tg->funcs->get_otg_active_size(tg, &width, &height);
2285 
2286 		if (!tg->funcs->is_tg_enabled(tg)) {
2287 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2288 			return;
2289 		}
2290 
2291 		if (opp->funcs->opp_program_dpg_dimensions)
2292 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2293 	}
2294 
2295 	for (i = 0; i < group_size; i++) {
2296 		if (grouped_pipes[i]->stream == NULL)
2297 			continue;
2298 
2299 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2300 			continue;
2301 
2302 		grouped_pipes[i]->stream->vblank_synchronized = false;
2303 	}
2304 
2305 	for (i = 1; i < group_size; i++) {
2306 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2307 			continue;
2308 
2309 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2310 				grouped_pipes[i]->stream_res.tg,
2311 				grouped_pipes[0]->stream_res.tg->inst);
2312 	}
2313 
2314 	DC_SYNC_INFO("Waiting for trigger\n");
2315 
2316 	/* Need to get only check 1 pipe for having reset as all the others are
2317 	 * synchronized. Look at last pipe programmed to reset.
2318 	 */
2319 
2320 	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2321 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2322 
2323 	for (i = 1; i < group_size; i++) {
2324 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2325 			continue;
2326 
2327 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2328 				grouped_pipes[i]->stream_res.tg);
2329 	}
2330 
2331 	for (i = 1; i < group_size; i++) {
2332 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2333 			continue;
2334 
2335 		opp = grouped_pipes[i]->stream_res.opp;
2336 		tg = grouped_pipes[i]->stream_res.tg;
2337 		tg->funcs->get_otg_active_size(tg, &width, &height);
2338 		if (opp->funcs->opp_program_dpg_dimensions)
2339 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2340 	}
2341 
2342 	DC_SYNC_INFO("Sync complete\n");
2343 }
2344 
2345 void dcn10_enable_per_frame_crtc_position_reset(
2346 	struct dc *dc,
2347 	int group_size,
2348 	struct pipe_ctx *grouped_pipes[])
2349 {
2350 	struct dc_context *dc_ctx = dc->ctx;
2351 	int i;
2352 
2353 	DC_SYNC_INFO("Setting up\n");
2354 	for (i = 0; i < group_size; i++)
2355 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2356 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2357 					grouped_pipes[i]->stream_res.tg,
2358 					0,
2359 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2360 
2361 	DC_SYNC_INFO("Waiting for trigger\n");
2362 
2363 	for (i = 0; i < group_size; i++)
2364 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2365 
2366 	DC_SYNC_INFO("Multi-display sync is complete\n");
2367 }
2368 
2369 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2370 		struct vm_system_aperture_param *apt,
2371 		struct dce_hwseq *hws)
2372 {
2373 	PHYSICAL_ADDRESS_LOC physical_page_number;
2374 	uint32_t logical_addr_low;
2375 	uint32_t logical_addr_high;
2376 
2377 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2378 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2379 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2380 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2381 
2382 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2383 			LOGICAL_ADDR, &logical_addr_low);
2384 
2385 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2386 			LOGICAL_ADDR, &logical_addr_high);
2387 
2388 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2389 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2390 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2391 }
2392 
2393 /* Temporary read settings, future will get values from kmd directly */
2394 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2395 		struct vm_context0_param *vm0,
2396 		struct dce_hwseq *hws)
2397 {
2398 	PHYSICAL_ADDRESS_LOC fb_base;
2399 	PHYSICAL_ADDRESS_LOC fb_offset;
2400 	uint32_t fb_base_value;
2401 	uint32_t fb_offset_value;
2402 
2403 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2404 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2405 
2406 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2407 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2408 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2409 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2410 
2411 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2412 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2413 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2414 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2415 
2416 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2417 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2418 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2419 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2420 
2421 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2422 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2423 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2424 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2425 
2426 	/*
2427 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2428 	 * Therefore we need to do
2429 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2430 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2431 	 */
2432 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2433 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2434 	vm0->pte_base.quad_part += fb_base.quad_part;
2435 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2436 }
2437 
2438 
2439 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2440 {
2441 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2442 	struct vm_system_aperture_param apt = {0};
2443 	struct vm_context0_param vm0 = {0};
2444 
2445 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2446 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2447 
2448 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2449 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2450 }
2451 
2452 static void dcn10_enable_plane(
2453 	struct dc *dc,
2454 	struct pipe_ctx *pipe_ctx,
2455 	struct dc_state *context)
2456 {
2457 	struct dce_hwseq *hws = dc->hwseq;
2458 
2459 	if (dc->debug.sanity_checks) {
2460 		hws->funcs.verify_allow_pstate_change_high(dc);
2461 	}
2462 
2463 	undo_DEGVIDCN10_253_wa(dc);
2464 
2465 	power_on_plane(dc->hwseq,
2466 		pipe_ctx->plane_res.hubp->inst);
2467 
2468 	/* enable DCFCLK current DCHUB */
2469 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2470 
2471 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2472 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2473 			pipe_ctx->stream_res.opp,
2474 			true);
2475 
2476 	if (dc->config.gpu_vm_support)
2477 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2478 
2479 	if (dc->debug.sanity_checks) {
2480 		hws->funcs.verify_allow_pstate_change_high(dc);
2481 	}
2482 
2483 	if (!pipe_ctx->top_pipe
2484 		&& pipe_ctx->plane_state
2485 		&& pipe_ctx->plane_state->flip_int_enabled
2486 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2487 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2488 
2489 }
2490 
2491 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2492 {
2493 	int i = 0;
2494 	struct dpp_grph_csc_adjustment adjust;
2495 	memset(&adjust, 0, sizeof(adjust));
2496 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2497 
2498 
2499 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2500 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2501 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2502 			adjust.temperature_matrix[i] =
2503 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2504 	} else if (pipe_ctx->plane_state &&
2505 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2506 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2507 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2508 			adjust.temperature_matrix[i] =
2509 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2510 	}
2511 
2512 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2513 }
2514 
2515 
2516 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2517 {
2518 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2519 		if (pipe_ctx->top_pipe) {
2520 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2521 
2522 			while (top->top_pipe)
2523 				top = top->top_pipe; // Traverse to top pipe_ctx
2524 			if (top->plane_state && top->plane_state->layer_index == 0)
2525 				return true; // Front MPO plane not hidden
2526 		}
2527 	}
2528 	return false;
2529 }
2530 
2531 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2532 {
2533 	// Override rear plane RGB bias to fix MPO brightness
2534 	uint16_t rgb_bias = matrix[3];
2535 
2536 	matrix[3] = 0;
2537 	matrix[7] = 0;
2538 	matrix[11] = 0;
2539 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2540 	matrix[3] = rgb_bias;
2541 	matrix[7] = rgb_bias;
2542 	matrix[11] = rgb_bias;
2543 }
2544 
2545 void dcn10_program_output_csc(struct dc *dc,
2546 		struct pipe_ctx *pipe_ctx,
2547 		enum dc_color_space colorspace,
2548 		uint16_t *matrix,
2549 		int opp_id)
2550 {
2551 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2552 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2553 
2554 			/* MPO is broken with RGB colorspaces when OCSC matrix
2555 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2556 			 * Blending adds offsets from front + rear to rear plane
2557 			 *
2558 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2559 			 * black value pixels add offset instead of rear + front
2560 			 */
2561 
2562 			int16_t rgb_bias = matrix[3];
2563 			// matrix[3/7/11] are all the same offset value
2564 
2565 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2566 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2567 			} else {
2568 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2569 			}
2570 		}
2571 	} else {
2572 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2573 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2574 	}
2575 }
2576 
2577 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2578 {
2579 	struct dc_bias_and_scale bns_params = {0};
2580 
2581 	// program the input csc
2582 	dpp->funcs->dpp_setup(dpp,
2583 			plane_state->format,
2584 			EXPANSION_MODE_ZERO,
2585 			plane_state->input_csc_color_matrix,
2586 			plane_state->color_space,
2587 			NULL);
2588 
2589 	//set scale and bias registers
2590 	build_prescale_params(&bns_params, plane_state);
2591 	if (dpp->funcs->dpp_program_bias_and_scale)
2592 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2593 }
2594 
2595 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2596 {
2597 	struct mpc *mpc = dc->res_pool->mpc;
2598 
2599 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2600 		get_hdr_visual_confirm_color(pipe_ctx, color);
2601 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2602 		get_surface_visual_confirm_color(pipe_ctx, color);
2603 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2604 		get_surface_tile_visual_confirm_color(pipe_ctx, color);
2605 	else
2606 		color_space_to_black_color(
2607 				dc, pipe_ctx->stream->output_color_space, color);
2608 
2609 	if (mpc->funcs->set_bg_color) {
2610 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
2611 		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2612 	}
2613 }
2614 
2615 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2616 {
2617 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2618 	struct mpcc_blnd_cfg blnd_cfg = {0};
2619 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2620 	int mpcc_id;
2621 	struct mpcc *new_mpcc;
2622 	struct mpc *mpc = dc->res_pool->mpc;
2623 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2624 
2625 	blnd_cfg.overlap_only = false;
2626 	blnd_cfg.global_gain = 0xff;
2627 
2628 	if (per_pixel_alpha) {
2629 		/* DCN1.0 has output CM before MPC which seems to screw with
2630 		 * pre-multiplied alpha.
2631 		 */
2632 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2633 				pipe_ctx->stream->output_color_space)
2634 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2635 		if (pipe_ctx->plane_state->global_alpha) {
2636 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2637 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2638 		} else {
2639 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2640 		}
2641 	} else {
2642 		blnd_cfg.pre_multiplied_alpha = false;
2643 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2644 	}
2645 
2646 	if (pipe_ctx->plane_state->global_alpha)
2647 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2648 	else
2649 		blnd_cfg.global_alpha = 0xff;
2650 
2651 	/*
2652 	 * TODO: remove hack
2653 	 * Note: currently there is a bug in init_hw such that
2654 	 * on resume from hibernate, BIOS sets up MPCC0, and
2655 	 * we do mpcc_remove but the mpcc cannot go to idle
2656 	 * after remove. This cause us to pick mpcc1 here,
2657 	 * which causes a pstate hang for yet unknown reason.
2658 	 */
2659 	mpcc_id = hubp->inst;
2660 
2661 	/* If there is no full update, don't need to touch MPC tree*/
2662 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2663 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2664 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2665 		return;
2666 	}
2667 
2668 	/* check if this MPCC is already being used */
2669 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2670 	/* remove MPCC if being used */
2671 	if (new_mpcc != NULL)
2672 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2673 	else
2674 		if (dc->debug.sanity_checks)
2675 			mpc->funcs->assert_mpcc_idle_before_connect(
2676 					dc->res_pool->mpc, mpcc_id);
2677 
2678 	/* Call MPC to insert new plane */
2679 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2680 			mpc_tree_params,
2681 			&blnd_cfg,
2682 			NULL,
2683 			NULL,
2684 			hubp->inst,
2685 			mpcc_id);
2686 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2687 
2688 	ASSERT(new_mpcc != NULL);
2689 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2690 	hubp->mpcc_id = mpcc_id;
2691 }
2692 
2693 static void update_scaler(struct pipe_ctx *pipe_ctx)
2694 {
2695 	bool per_pixel_alpha =
2696 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2697 
2698 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2699 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2700 	/* scaler configuration */
2701 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2702 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2703 }
2704 
2705 static void dcn10_update_dchubp_dpp(
2706 	struct dc *dc,
2707 	struct pipe_ctx *pipe_ctx,
2708 	struct dc_state *context)
2709 {
2710 	struct dce_hwseq *hws = dc->hwseq;
2711 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2712 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2713 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2714 	struct plane_size size = plane_state->plane_size;
2715 	unsigned int compat_level = 0;
2716 	bool should_divided_by_2 = false;
2717 
2718 	/* depends on DML calculation, DPP clock value may change dynamically */
2719 	/* If request max dpp clk is lower than current dispclk, no need to
2720 	 * divided by 2
2721 	 */
2722 	if (plane_state->update_flags.bits.full_update) {
2723 
2724 		/* new calculated dispclk, dppclk are stored in
2725 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2726 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2727 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2728 		 * dispclk will put in use after optimize_bandwidth when
2729 		 * ramp_up_dispclk_with_dpp is called.
2730 		 * there are two places for dppclk be put in use. One location
2731 		 * is the same as the location as dispclk. Another is within
2732 		 * update_dchubp_dpp which happens between pre_bandwidth and
2733 		 * optimize_bandwidth.
2734 		 * dppclk updated within update_dchubp_dpp will cause new
2735 		 * clock values of dispclk and dppclk not be in use at the same
2736 		 * time. when clocks are decreased, this may cause dppclk is
2737 		 * lower than previous configuration and let pipe stuck.
2738 		 * for example, eDP + external dp,  change resolution of DP from
2739 		 * 1920x1080x144hz to 1280x960x60hz.
2740 		 * before change: dispclk = 337889 dppclk = 337889
2741 		 * change mode, dcn10_validate_bandwidth calculate
2742 		 *                dispclk = 143122 dppclk = 143122
2743 		 * update_dchubp_dpp be executed before dispclk be updated,
2744 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2745 		 * 168944. this will cause pipe pstate warning issue.
2746 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2747 		 * dispclk is going to be decreased, keep dppclk = dispclk
2748 		 **/
2749 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2750 				dc->clk_mgr->clks.dispclk_khz)
2751 			should_divided_by_2 = false;
2752 		else
2753 			should_divided_by_2 =
2754 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2755 					dc->clk_mgr->clks.dispclk_khz / 2;
2756 
2757 		dpp->funcs->dpp_dppclk_control(
2758 				dpp,
2759 				should_divided_by_2,
2760 				true);
2761 
2762 		if (dc->res_pool->dccg)
2763 			dc->res_pool->dccg->funcs->update_dpp_dto(
2764 					dc->res_pool->dccg,
2765 					dpp->inst,
2766 					pipe_ctx->plane_res.bw.dppclk_khz);
2767 		else
2768 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2769 						dc->clk_mgr->clks.dispclk_khz / 2 :
2770 							dc->clk_mgr->clks.dispclk_khz;
2771 	}
2772 
2773 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2774 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2775 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2776 	 */
2777 	if (plane_state->update_flags.bits.full_update) {
2778 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2779 
2780 		hubp->funcs->hubp_setup(
2781 			hubp,
2782 			&pipe_ctx->dlg_regs,
2783 			&pipe_ctx->ttu_regs,
2784 			&pipe_ctx->rq_regs,
2785 			&pipe_ctx->pipe_dlg_param);
2786 		hubp->funcs->hubp_setup_interdependent(
2787 			hubp,
2788 			&pipe_ctx->dlg_regs,
2789 			&pipe_ctx->ttu_regs);
2790 	}
2791 
2792 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2793 
2794 	if (plane_state->update_flags.bits.full_update ||
2795 		plane_state->update_flags.bits.bpp_change)
2796 		dcn10_update_dpp(dpp, plane_state);
2797 
2798 	if (plane_state->update_flags.bits.full_update ||
2799 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2800 		plane_state->update_flags.bits.global_alpha_change)
2801 		hws->funcs.update_mpcc(dc, pipe_ctx);
2802 
2803 	if (plane_state->update_flags.bits.full_update ||
2804 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2805 		plane_state->update_flags.bits.global_alpha_change ||
2806 		plane_state->update_flags.bits.scaling_change ||
2807 		plane_state->update_flags.bits.position_change) {
2808 		update_scaler(pipe_ctx);
2809 	}
2810 
2811 	if (plane_state->update_flags.bits.full_update ||
2812 		plane_state->update_flags.bits.scaling_change ||
2813 		plane_state->update_flags.bits.position_change) {
2814 		hubp->funcs->mem_program_viewport(
2815 			hubp,
2816 			&pipe_ctx->plane_res.scl_data.viewport,
2817 			&pipe_ctx->plane_res.scl_data.viewport_c);
2818 	}
2819 
2820 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2821 		dc->hwss.set_cursor_position(pipe_ctx);
2822 		dc->hwss.set_cursor_attribute(pipe_ctx);
2823 
2824 		if (dc->hwss.set_cursor_sdr_white_level)
2825 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2826 	}
2827 
2828 	if (plane_state->update_flags.bits.full_update) {
2829 		/*gamut remap*/
2830 		dc->hwss.program_gamut_remap(pipe_ctx);
2831 
2832 		dc->hwss.program_output_csc(dc,
2833 				pipe_ctx,
2834 				pipe_ctx->stream->output_color_space,
2835 				pipe_ctx->stream->csc_color_matrix.matrix,
2836 				pipe_ctx->stream_res.opp->inst);
2837 	}
2838 
2839 	if (plane_state->update_flags.bits.full_update ||
2840 		plane_state->update_flags.bits.pixel_format_change ||
2841 		plane_state->update_flags.bits.horizontal_mirror_change ||
2842 		plane_state->update_flags.bits.rotation_change ||
2843 		plane_state->update_flags.bits.swizzle_change ||
2844 		plane_state->update_flags.bits.dcc_change ||
2845 		plane_state->update_flags.bits.bpp_change ||
2846 		plane_state->update_flags.bits.scaling_change ||
2847 		plane_state->update_flags.bits.plane_size_change) {
2848 		hubp->funcs->hubp_program_surface_config(
2849 			hubp,
2850 			plane_state->format,
2851 			&plane_state->tiling_info,
2852 			&size,
2853 			plane_state->rotation,
2854 			&plane_state->dcc,
2855 			plane_state->horizontal_mirror,
2856 			compat_level);
2857 	}
2858 
2859 	hubp->power_gated = false;
2860 
2861 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2862 
2863 	if (is_pipe_tree_visible(pipe_ctx))
2864 		hubp->funcs->set_blank(hubp, false);
2865 }
2866 
2867 void dcn10_blank_pixel_data(
2868 		struct dc *dc,
2869 		struct pipe_ctx *pipe_ctx,
2870 		bool blank)
2871 {
2872 	enum dc_color_space color_space;
2873 	struct tg_color black_color = {0};
2874 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2875 	struct dc_stream_state *stream = pipe_ctx->stream;
2876 
2877 	/* program otg blank color */
2878 	color_space = stream->output_color_space;
2879 	color_space_to_black_color(dc, color_space, &black_color);
2880 
2881 	/*
2882 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2883 	 * alternate between Cb and Cr, so both channels need the pixel
2884 	 * value for Y
2885 	 */
2886 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2887 		black_color.color_r_cr = black_color.color_g_y;
2888 
2889 
2890 	if (stream_res->tg->funcs->set_blank_color)
2891 		stream_res->tg->funcs->set_blank_color(
2892 				stream_res->tg,
2893 				&black_color);
2894 
2895 	if (!blank) {
2896 		if (stream_res->tg->funcs->set_blank)
2897 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2898 		if (stream_res->abm) {
2899 			dc->hwss.set_pipe(pipe_ctx);
2900 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2901 		}
2902 	} else {
2903 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2904 		if (stream_res->tg->funcs->set_blank) {
2905 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2906 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2907 		}
2908 	}
2909 }
2910 
2911 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2912 {
2913 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2914 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2915 	struct custom_float_format fmt;
2916 
2917 	fmt.exponenta_bits = 6;
2918 	fmt.mantissa_bits = 12;
2919 	fmt.sign = true;
2920 
2921 
2922 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2923 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2924 
2925 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2926 			pipe_ctx->plane_res.dpp, hw_mult);
2927 }
2928 
2929 void dcn10_program_pipe(
2930 		struct dc *dc,
2931 		struct pipe_ctx *pipe_ctx,
2932 		struct dc_state *context)
2933 {
2934 	struct dce_hwseq *hws = dc->hwseq;
2935 
2936 	if (pipe_ctx->top_pipe == NULL) {
2937 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2938 
2939 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2940 				pipe_ctx->stream_res.tg,
2941 				calculate_vready_offset_for_group(pipe_ctx),
2942 				pipe_ctx->pipe_dlg_param.vstartup_start,
2943 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2944 				pipe_ctx->pipe_dlg_param.vupdate_width);
2945 
2946 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2947 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2948 
2949 		if (hws->funcs.setup_vupdate_interrupt)
2950 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2951 
2952 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2953 	}
2954 
2955 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2956 		dcn10_enable_plane(dc, pipe_ctx, context);
2957 
2958 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2959 
2960 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2961 
2962 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2963 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2964 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2965 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2966 
2967 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2968 	 * only do gamma programming for full update.
2969 	 * TODO: This can be further optimized/cleaned up
2970 	 * Always call this for now since it does memcmp inside before
2971 	 * doing heavy calculation and programming
2972 	 */
2973 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2974 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2975 }
2976 
2977 void dcn10_wait_for_pending_cleared(struct dc *dc,
2978 		struct dc_state *context)
2979 {
2980 		struct pipe_ctx *pipe_ctx;
2981 		struct timing_generator *tg;
2982 		int i;
2983 
2984 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2985 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2986 			tg = pipe_ctx->stream_res.tg;
2987 
2988 			/*
2989 			 * Only wait for top pipe's tg penindg bit
2990 			 * Also skip if pipe is disabled.
2991 			 */
2992 			if (pipe_ctx->top_pipe ||
2993 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2994 			    !tg->funcs->is_tg_enabled(tg))
2995 				continue;
2996 
2997 			/*
2998 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2999 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
3000 			 * seems to not trigger the update right away, and if we
3001 			 * lock again before VUPDATE then we don't get a separated
3002 			 * operation.
3003 			 */
3004 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
3005 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
3006 		}
3007 }
3008 
3009 void dcn10_post_unlock_program_front_end(
3010 		struct dc *dc,
3011 		struct dc_state *context)
3012 {
3013 	int i;
3014 
3015 	DC_LOGGER_INIT(dc->ctx->logger);
3016 
3017 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3018 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3019 
3020 		if (!pipe_ctx->top_pipe &&
3021 			!pipe_ctx->prev_odm_pipe &&
3022 			pipe_ctx->stream) {
3023 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3024 
3025 			if (context->stream_status[i].plane_count == 0)
3026 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3027 		}
3028 	}
3029 
3030 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3031 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3032 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3033 
3034 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3035 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3036 			dc->hwss.optimize_bandwidth(dc, context);
3037 			break;
3038 		}
3039 
3040 	if (dc->hwseq->wa.DEGVIDCN10_254)
3041 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3042 }
3043 
3044 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3045 {
3046 	uint8_t i;
3047 
3048 	for (i = 0; i < context->stream_count; i++) {
3049 		if (context->streams[i]->timing.timing_3d_format
3050 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3051 			/*
3052 			 * Disable stutter
3053 			 */
3054 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3055 			break;
3056 		}
3057 	}
3058 }
3059 
3060 void dcn10_prepare_bandwidth(
3061 		struct dc *dc,
3062 		struct dc_state *context)
3063 {
3064 	struct dce_hwseq *hws = dc->hwseq;
3065 	struct hubbub *hubbub = dc->res_pool->hubbub;
3066 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3067 
3068 	if (dc->debug.sanity_checks)
3069 		hws->funcs.verify_allow_pstate_change_high(dc);
3070 
3071 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3072 		if (context->stream_count == 0)
3073 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3074 
3075 		dc->clk_mgr->funcs->update_clocks(
3076 				dc->clk_mgr,
3077 				context,
3078 				false);
3079 	}
3080 
3081 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3082 			&context->bw_ctx.bw.dcn.watermarks,
3083 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3084 			true);
3085 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3086 
3087 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3088 		DC_FP_START();
3089 		dcn_get_soc_clks(
3090 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3091 		DC_FP_END();
3092 		dcn_bw_notify_pplib_of_wm_ranges(
3093 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3094 	}
3095 
3096 	if (dc->debug.sanity_checks)
3097 		hws->funcs.verify_allow_pstate_change_high(dc);
3098 }
3099 
3100 void dcn10_optimize_bandwidth(
3101 		struct dc *dc,
3102 		struct dc_state *context)
3103 {
3104 	struct dce_hwseq *hws = dc->hwseq;
3105 	struct hubbub *hubbub = dc->res_pool->hubbub;
3106 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3107 
3108 	if (dc->debug.sanity_checks)
3109 		hws->funcs.verify_allow_pstate_change_high(dc);
3110 
3111 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3112 		if (context->stream_count == 0)
3113 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3114 
3115 		dc->clk_mgr->funcs->update_clocks(
3116 				dc->clk_mgr,
3117 				context,
3118 				true);
3119 	}
3120 
3121 	hubbub->funcs->program_watermarks(hubbub,
3122 			&context->bw_ctx.bw.dcn.watermarks,
3123 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3124 			true);
3125 
3126 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3127 
3128 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3129 		DC_FP_START();
3130 		dcn_get_soc_clks(
3131 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3132 		DC_FP_END();
3133 		dcn_bw_notify_pplib_of_wm_ranges(
3134 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3135 	}
3136 
3137 	if (dc->debug.sanity_checks)
3138 		hws->funcs.verify_allow_pstate_change_high(dc);
3139 }
3140 
3141 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3142 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3143 {
3144 	int i = 0;
3145 	struct drr_params params = {0};
3146 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3147 	unsigned int event_triggers = 0x800;
3148 	// Note DRR trigger events are generated regardless of whether num frames met.
3149 	unsigned int num_frames = 2;
3150 
3151 	params.vertical_total_max = adjust.v_total_max;
3152 	params.vertical_total_min = adjust.v_total_min;
3153 	params.vertical_total_mid = adjust.v_total_mid;
3154 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3155 	/* TODO: If multiple pipes are to be supported, you need
3156 	 * some GSL stuff. Static screen triggers may be programmed differently
3157 	 * as well.
3158 	 */
3159 	for (i = 0; i < num_pipes; i++) {
3160 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3161 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3162 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3163 					pipe_ctx[i]->stream_res.tg, &params);
3164 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3165 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3166 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3167 						pipe_ctx[i]->stream_res.tg,
3168 						event_triggers, num_frames);
3169 		}
3170 	}
3171 }
3172 
3173 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3174 		int num_pipes,
3175 		struct crtc_position *position)
3176 {
3177 	int i = 0;
3178 
3179 	/* TODO: handle pipes > 1
3180 	 */
3181 	for (i = 0; i < num_pipes; i++)
3182 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3183 }
3184 
3185 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3186 		int num_pipes, const struct dc_static_screen_params *params)
3187 {
3188 	unsigned int i;
3189 	unsigned int triggers = 0;
3190 
3191 	if (params->triggers.surface_update)
3192 		triggers |= 0x80;
3193 	if (params->triggers.cursor_update)
3194 		triggers |= 0x2;
3195 	if (params->triggers.force_trigger)
3196 		triggers |= 0x1;
3197 
3198 	for (i = 0; i < num_pipes; i++)
3199 		pipe_ctx[i]->stream_res.tg->funcs->
3200 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3201 					triggers, params->num_frames);
3202 }
3203 
3204 static void dcn10_config_stereo_parameters(
3205 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3206 {
3207 	enum view_3d_format view_format = stream->view_format;
3208 	enum dc_timing_3d_format timing_3d_format =\
3209 			stream->timing.timing_3d_format;
3210 	bool non_stereo_timing = false;
3211 
3212 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3213 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3214 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3215 		non_stereo_timing = true;
3216 
3217 	if (non_stereo_timing == false &&
3218 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3219 
3220 		flags->PROGRAM_STEREO         = 1;
3221 		flags->PROGRAM_POLARITY       = 1;
3222 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3223 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3224 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3225 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3226 
3227 			if (stream->link && stream->link->ddc) {
3228 				enum display_dongle_type dongle = \
3229 						stream->link->ddc->dongle_type;
3230 
3231 				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3232 					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3233 					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3234 					flags->DISABLE_STEREO_DP_SYNC = 1;
3235 			}
3236 		}
3237 		flags->RIGHT_EYE_POLARITY =\
3238 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3239 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3240 			flags->FRAME_PACKED = 1;
3241 	}
3242 
3243 	return;
3244 }
3245 
3246 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3247 {
3248 	struct crtc_stereo_flags flags = { 0 };
3249 	struct dc_stream_state *stream = pipe_ctx->stream;
3250 
3251 	dcn10_config_stereo_parameters(stream, &flags);
3252 
3253 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3254 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3255 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3256 	} else {
3257 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3258 	}
3259 
3260 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3261 		pipe_ctx->stream_res.opp,
3262 		flags.PROGRAM_STEREO == 1,
3263 		&stream->timing);
3264 
3265 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3266 		pipe_ctx->stream_res.tg,
3267 		&stream->timing,
3268 		&flags);
3269 
3270 	return;
3271 }
3272 
3273 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3274 {
3275 	int i;
3276 
3277 	for (i = 0; i < res_pool->pipe_count; i++) {
3278 		if (res_pool->hubps[i]->inst == mpcc_inst)
3279 			return res_pool->hubps[i];
3280 	}
3281 	ASSERT(false);
3282 	return NULL;
3283 }
3284 
3285 void dcn10_wait_for_mpcc_disconnect(
3286 		struct dc *dc,
3287 		struct resource_pool *res_pool,
3288 		struct pipe_ctx *pipe_ctx)
3289 {
3290 	struct dce_hwseq *hws = dc->hwseq;
3291 	int mpcc_inst;
3292 
3293 	if (dc->debug.sanity_checks) {
3294 		hws->funcs.verify_allow_pstate_change_high(dc);
3295 	}
3296 
3297 	if (!pipe_ctx->stream_res.opp)
3298 		return;
3299 
3300 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3301 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3302 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3303 
3304 			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3305 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3306 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3307 			hubp->funcs->set_blank(hubp, true);
3308 		}
3309 	}
3310 
3311 	if (dc->debug.sanity_checks) {
3312 		hws->funcs.verify_allow_pstate_change_high(dc);
3313 	}
3314 
3315 }
3316 
3317 bool dcn10_dummy_display_power_gating(
3318 	struct dc *dc,
3319 	uint8_t controller_id,
3320 	struct dc_bios *dcb,
3321 	enum pipe_gating_control power_gating)
3322 {
3323 	return true;
3324 }
3325 
3326 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3327 {
3328 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3329 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3330 	bool flip_pending;
3331 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3332 
3333 	if (plane_state == NULL)
3334 		return;
3335 
3336 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3337 					pipe_ctx->plane_res.hubp);
3338 
3339 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3340 
3341 	if (!flip_pending)
3342 		plane_state->status.current_address = plane_state->status.requested_address;
3343 
3344 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3345 			tg->funcs->is_stereo_left_eye) {
3346 		plane_state->status.is_right_eye =
3347 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3348 	}
3349 
3350 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3351 		struct dce_hwseq *hwseq = dc->hwseq;
3352 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3353 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3354 
3355 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3356 			struct hubbub *hubbub = dc->res_pool->hubbub;
3357 
3358 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3359 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3360 		}
3361 	}
3362 }
3363 
3364 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3365 {
3366 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3367 
3368 	/* In DCN, this programming sequence is owned by the hubbub */
3369 	hubbub->funcs->update_dchub(hubbub, dh_data);
3370 }
3371 
3372 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3373 {
3374 	struct pipe_ctx *test_pipe, *split_pipe;
3375 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3376 	struct rect r1 = scl_data->recout, r2, r2_half;
3377 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3378 	int cur_layer = pipe_ctx->plane_state->layer_index;
3379 
3380 	/**
3381 	 * Disable the cursor if there's another pipe above this with a
3382 	 * plane that contains this pipe's viewport to prevent double cursor
3383 	 * and incorrect scaling artifacts.
3384 	 */
3385 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3386 	     test_pipe = test_pipe->top_pipe) {
3387 		// Skip invisible layer and pipe-split plane on same layer
3388 		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3389 			continue;
3390 
3391 		r2 = test_pipe->plane_res.scl_data.recout;
3392 		r2_r = r2.x + r2.width;
3393 		r2_b = r2.y + r2.height;
3394 		split_pipe = test_pipe;
3395 
3396 		/**
3397 		 * There is another half plane on same layer because of
3398 		 * pipe-split, merge together per same height.
3399 		 */
3400 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3401 		     split_pipe = split_pipe->top_pipe)
3402 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3403 				r2_half = split_pipe->plane_res.scl_data.recout;
3404 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3405 				r2.width = r2.width + r2_half.width;
3406 				r2_r = r2.x + r2.width;
3407 				break;
3408 			}
3409 
3410 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3411 			return true;
3412 	}
3413 
3414 	return false;
3415 }
3416 
3417 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3418 {
3419 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3420 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3421 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3422 	struct dc_cursor_mi_param param = {
3423 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3424 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3425 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3426 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3427 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3428 		.rotation = pipe_ctx->plane_state->rotation,
3429 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3430 	};
3431 	bool pipe_split_on = false;
3432 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3433 		(pipe_ctx->prev_odm_pipe != NULL);
3434 
3435 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3436 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3437 	int x_pos = pos_cpy.x;
3438 	int y_pos = pos_cpy.y;
3439 
3440 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3441 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3442 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3443 			pipe_split_on = true;
3444 		}
3445 	}
3446 
3447 	/**
3448 	 * DC cursor is stream space, HW cursor is plane space and drawn
3449 	 * as part of the framebuffer.
3450 	 *
3451 	 * Cursor position can't be negative, but hotspot can be used to
3452 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3453 	 * than the cursor size.
3454 	 */
3455 
3456 	/**
3457 	 * Translate cursor from stream space to plane space.
3458 	 *
3459 	 * If the cursor is scaled then we need to scale the position
3460 	 * to be in the approximately correct place. We can't do anything
3461 	 * about the actual size being incorrect, that's a limitation of
3462 	 * the hardware.
3463 	 */
3464 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3465 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3466 				pipe_ctx->plane_state->dst_rect.width;
3467 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3468 				pipe_ctx->plane_state->dst_rect.height;
3469 	} else {
3470 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3471 				pipe_ctx->plane_state->dst_rect.width;
3472 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3473 				pipe_ctx->plane_state->dst_rect.height;
3474 	}
3475 
3476 	/**
3477 	 * If the cursor's source viewport is clipped then we need to
3478 	 * translate the cursor to appear in the correct position on
3479 	 * the screen.
3480 	 *
3481 	 * This translation isn't affected by scaling so it needs to be
3482 	 * done *after* we adjust the position for the scale factor.
3483 	 *
3484 	 * This is only done by opt-in for now since there are still
3485 	 * some usecases like tiled display that might enable the
3486 	 * cursor on both streams while expecting dc to clip it.
3487 	 */
3488 	if (pos_cpy.translate_by_source) {
3489 		x_pos += pipe_ctx->plane_state->src_rect.x;
3490 		y_pos += pipe_ctx->plane_state->src_rect.y;
3491 	}
3492 
3493 	/**
3494 	 * If the position is negative then we need to add to the hotspot
3495 	 * to shift the cursor outside the plane.
3496 	 */
3497 
3498 	if (x_pos < 0) {
3499 		pos_cpy.x_hotspot -= x_pos;
3500 		x_pos = 0;
3501 	}
3502 
3503 	if (y_pos < 0) {
3504 		pos_cpy.y_hotspot -= y_pos;
3505 		y_pos = 0;
3506 	}
3507 
3508 	pos_cpy.x = (uint32_t)x_pos;
3509 	pos_cpy.y = (uint32_t)y_pos;
3510 
3511 	if (pipe_ctx->plane_state->address.type
3512 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3513 		pos_cpy.enable = false;
3514 
3515 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3516 		pos_cpy.enable = false;
3517 
3518 
3519 	if (param.rotation == ROTATION_ANGLE_0) {
3520 		int viewport_width =
3521 			pipe_ctx->plane_res.scl_data.viewport.width;
3522 		int viewport_x =
3523 			pipe_ctx->plane_res.scl_data.viewport.x;
3524 
3525 		if (param.mirror) {
3526 			if (pipe_split_on || odm_combine_on) {
3527 				if (pos_cpy.x >= viewport_width + viewport_x) {
3528 					pos_cpy.x = 2 * viewport_width
3529 							- pos_cpy.x + 2 * viewport_x;
3530 				} else {
3531 					uint32_t temp_x = pos_cpy.x;
3532 
3533 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3534 					if (temp_x >= viewport_x +
3535 						(int)hubp->curs_attr.width || pos_cpy.x
3536 						<= (int)hubp->curs_attr.width +
3537 						pipe_ctx->plane_state->src_rect.x) {
3538 						pos_cpy.x = temp_x + viewport_width;
3539 					}
3540 				}
3541 			} else {
3542 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3543 			}
3544 		}
3545 	}
3546 	// Swap axis and mirror horizontally
3547 	else if (param.rotation == ROTATION_ANGLE_90) {
3548 		uint32_t temp_x = pos_cpy.x;
3549 
3550 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3551 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3552 		pos_cpy.y = temp_x;
3553 	}
3554 	// Swap axis and mirror vertically
3555 	else if (param.rotation == ROTATION_ANGLE_270) {
3556 		uint32_t temp_y = pos_cpy.y;
3557 		int viewport_height =
3558 			pipe_ctx->plane_res.scl_data.viewport.height;
3559 		int viewport_y =
3560 			pipe_ctx->plane_res.scl_data.viewport.y;
3561 
3562 		/**
3563 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3564 		 * For pipe split cases:
3565 		 * - apply offset of viewport.y to normalize pos_cpy.x
3566 		 * - calculate the pos_cpy.y as before
3567 		 * - shift pos_cpy.y back by same offset to get final value
3568 		 * - since we iterate through both pipes, use the lower
3569 		 *   viewport.y for offset
3570 		 * For non pipe split cases, use the same calculation for
3571 		 *  pos_cpy.y as the 180 degree rotation case below,
3572 		 *  but use pos_cpy.x as our input because we are rotating
3573 		 *  270 degrees
3574 		 */
3575 		if (pipe_split_on || odm_combine_on) {
3576 			int pos_cpy_x_offset;
3577 			int other_pipe_viewport_y;
3578 
3579 			if (pipe_split_on) {
3580 				if (pipe_ctx->bottom_pipe) {
3581 					other_pipe_viewport_y =
3582 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3583 				} else {
3584 					other_pipe_viewport_y =
3585 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3586 				}
3587 			} else {
3588 				if (pipe_ctx->next_odm_pipe) {
3589 					other_pipe_viewport_y =
3590 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3591 				} else {
3592 					other_pipe_viewport_y =
3593 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3594 				}
3595 			}
3596 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3597 				other_pipe_viewport_y : viewport_y;
3598 			pos_cpy.x -= pos_cpy_x_offset;
3599 			if (pos_cpy.x > viewport_height) {
3600 				pos_cpy.x = pos_cpy.x - viewport_height;
3601 				pos_cpy.y = viewport_height - pos_cpy.x;
3602 			} else {
3603 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3604 			}
3605 			pos_cpy.y += pos_cpy_x_offset;
3606 		} else {
3607 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3608 		}
3609 		pos_cpy.x = temp_y;
3610 	}
3611 	// Mirror horizontally and vertically
3612 	else if (param.rotation == ROTATION_ANGLE_180) {
3613 		int viewport_width =
3614 			pipe_ctx->plane_res.scl_data.viewport.width;
3615 		int viewport_x =
3616 			pipe_ctx->plane_res.scl_data.viewport.x;
3617 
3618 		if (!param.mirror) {
3619 			if (pipe_split_on || odm_combine_on) {
3620 				if (pos_cpy.x >= viewport_width + viewport_x) {
3621 					pos_cpy.x = 2 * viewport_width
3622 							- pos_cpy.x + 2 * viewport_x;
3623 				} else {
3624 					uint32_t temp_x = pos_cpy.x;
3625 
3626 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3627 					if (temp_x >= viewport_x +
3628 						(int)hubp->curs_attr.width || pos_cpy.x
3629 						<= (int)hubp->curs_attr.width +
3630 						pipe_ctx->plane_state->src_rect.x) {
3631 						pos_cpy.x = 2 * viewport_width - temp_x;
3632 					}
3633 				}
3634 			} else {
3635 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3636 			}
3637 		}
3638 
3639 		/**
3640 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3641 		 * Calculation:
3642 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3643 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3644 		 * Simplify it as:
3645 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3646 		 */
3647 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3648 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3649 	}
3650 
3651 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3652 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3653 }
3654 
3655 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3656 {
3657 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3658 
3659 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3660 			pipe_ctx->plane_res.hubp, attributes);
3661 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3662 		pipe_ctx->plane_res.dpp, attributes);
3663 }
3664 
3665 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3666 {
3667 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3668 	struct fixed31_32 multiplier;
3669 	struct dpp_cursor_attributes opt_attr = { 0 };
3670 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3671 	struct custom_float_format fmt;
3672 
3673 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3674 		return;
3675 
3676 	fmt.exponenta_bits = 5;
3677 	fmt.mantissa_bits = 10;
3678 	fmt.sign = true;
3679 
3680 	if (sdr_white_level > 80) {
3681 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3682 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3683 	}
3684 
3685 	opt_attr.scale = hw_scale;
3686 	opt_attr.bias = 0;
3687 
3688 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3689 			pipe_ctx->plane_res.dpp, &opt_attr);
3690 }
3691 
3692 /*
3693  * apply_front_porch_workaround  TODO FPGA still need?
3694  *
3695  * This is a workaround for a bug that has existed since R5xx and has not been
3696  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3697  */
3698 static void apply_front_porch_workaround(
3699 	struct dc_crtc_timing *timing)
3700 {
3701 	if (timing->flags.INTERLACE == 1) {
3702 		if (timing->v_front_porch < 2)
3703 			timing->v_front_porch = 2;
3704 	} else {
3705 		if (timing->v_front_porch < 1)
3706 			timing->v_front_porch = 1;
3707 	}
3708 }
3709 
3710 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3711 {
3712 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3713 	struct dc_crtc_timing patched_crtc_timing;
3714 	int vesa_sync_start;
3715 	int asic_blank_end;
3716 	int interlace_factor;
3717 
3718 	patched_crtc_timing = *dc_crtc_timing;
3719 	apply_front_porch_workaround(&patched_crtc_timing);
3720 
3721 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3722 
3723 	vesa_sync_start = patched_crtc_timing.v_addressable +
3724 			patched_crtc_timing.v_border_bottom +
3725 			patched_crtc_timing.v_front_porch;
3726 
3727 	asic_blank_end = (patched_crtc_timing.v_total -
3728 			vesa_sync_start -
3729 			patched_crtc_timing.v_border_top)
3730 			* interlace_factor;
3731 
3732 	return asic_blank_end -
3733 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3734 }
3735 
3736 void dcn10_calc_vupdate_position(
3737 		struct dc *dc,
3738 		struct pipe_ctx *pipe_ctx,
3739 		uint32_t *start_line,
3740 		uint32_t *end_line)
3741 {
3742 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3743 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3744 
3745 	if (vupdate_pos >= 0)
3746 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3747 	else
3748 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3749 	*end_line = (*start_line + 2) % timing->v_total;
3750 }
3751 
3752 static void dcn10_cal_vline_position(
3753 		struct dc *dc,
3754 		struct pipe_ctx *pipe_ctx,
3755 		uint32_t *start_line,
3756 		uint32_t *end_line)
3757 {
3758 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3759 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3760 
3761 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3762 		if (vline_pos > 0)
3763 			vline_pos--;
3764 		else if (vline_pos < 0)
3765 			vline_pos++;
3766 
3767 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3768 		if (vline_pos >= 0)
3769 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3770 		else
3771 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3772 		*end_line = (*start_line + 2) % timing->v_total;
3773 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3774 		// vsync is line 0 so start_line is just the requested line offset
3775 		*start_line = vline_pos;
3776 		*end_line = (*start_line + 2) % timing->v_total;
3777 	} else
3778 		ASSERT(0);
3779 }
3780 
3781 void dcn10_setup_periodic_interrupt(
3782 		struct dc *dc,
3783 		struct pipe_ctx *pipe_ctx)
3784 {
3785 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3786 	uint32_t start_line = 0;
3787 	uint32_t end_line = 0;
3788 
3789 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3790 
3791 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3792 }
3793 
3794 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3795 {
3796 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3797 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3798 
3799 	if (start_line < 0) {
3800 		ASSERT(0);
3801 		start_line = 0;
3802 	}
3803 
3804 	if (tg->funcs->setup_vertical_interrupt2)
3805 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3806 }
3807 
3808 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3809 		struct dc_link_settings *link_settings)
3810 {
3811 	struct encoder_unblank_param params = {0};
3812 	struct dc_stream_state *stream = pipe_ctx->stream;
3813 	struct dc_link *link = stream->link;
3814 	struct dce_hwseq *hws = link->dc->hwseq;
3815 
3816 	/* only 3 items below are used by unblank */
3817 	params.timing = pipe_ctx->stream->timing;
3818 
3819 	params.link_settings.link_rate = link_settings->link_rate;
3820 
3821 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3822 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3823 			params.timing.pix_clk_100hz /= 2;
3824 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3825 	}
3826 
3827 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3828 		hws->funcs.edp_backlight_control(link, true);
3829 	}
3830 }
3831 
3832 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3833 				const uint8_t *custom_sdp_message,
3834 				unsigned int sdp_message_size)
3835 {
3836 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3837 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3838 				pipe_ctx->stream_res.stream_enc,
3839 				custom_sdp_message,
3840 				sdp_message_size);
3841 	}
3842 }
3843 enum dc_status dcn10_set_clock(struct dc *dc,
3844 			enum dc_clock_type clock_type,
3845 			uint32_t clk_khz,
3846 			uint32_t stepping)
3847 {
3848 	struct dc_state *context = dc->current_state;
3849 	struct dc_clock_config clock_cfg = {0};
3850 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3851 
3852 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3853 		return DC_FAIL_UNSUPPORTED_1;
3854 
3855 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3856 		context, clock_type, &clock_cfg);
3857 
3858 	if (clk_khz > clock_cfg.max_clock_khz)
3859 		return DC_FAIL_CLK_EXCEED_MAX;
3860 
3861 	if (clk_khz < clock_cfg.min_clock_khz)
3862 		return DC_FAIL_CLK_BELOW_MIN;
3863 
3864 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3865 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3866 
3867 	/*update internal request clock for update clock use*/
3868 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3869 		current_clocks->dispclk_khz = clk_khz;
3870 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3871 		current_clocks->dppclk_khz = clk_khz;
3872 	else
3873 		return DC_ERROR_UNEXPECTED;
3874 
3875 	if (dc->clk_mgr->funcs->update_clocks)
3876 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3877 				context, true);
3878 	return DC_OK;
3879 
3880 }
3881 
3882 void dcn10_get_clock(struct dc *dc,
3883 			enum dc_clock_type clock_type,
3884 			struct dc_clock_config *clock_cfg)
3885 {
3886 	struct dc_state *context = dc->current_state;
3887 
3888 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3889 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3890 
3891 }
3892 
3893 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3894 {
3895 	struct resource_pool *pool = dc->res_pool;
3896 	int i;
3897 
3898 	for (i = 0; i < pool->pipe_count; i++) {
3899 		struct hubp *hubp = pool->hubps[i];
3900 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3901 
3902 		hubp->funcs->hubp_read_state(hubp);
3903 
3904 		if (!s->blank_en)
3905 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3906 	}
3907 }
3908