1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 #include "dc_trace.h"
56 
57 #define DC_LOGGER_INIT(logger)
58 
59 #define CTX \
60 	hws->ctx
61 #define REG(reg)\
62 	hws->regs->reg
63 
64 #undef FN
65 #define FN(reg_name, field_name) \
66 	hws->shifts->field_name, hws->masks->field_name
67 
68 /*print is 17 wide, first two characters are spaces*/
69 #define DTN_INFO_MICRO_SEC(ref_cycle) \
70 	print_microsec(dc_ctx, log_ctx, ref_cycle)
71 
72 #define GAMMA_HW_POINTS_NUM 256
73 
74 void print_microsec(struct dc_context *dc_ctx,
75 	struct dc_log_buffer_ctx *log_ctx,
76 	uint32_t ref_cycle)
77 {
78 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
79 	static const unsigned int frac = 1000;
80 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
81 
82 	DTN_INFO("  %11d.%03d",
83 			us_x10 / frac,
84 			us_x10 % frac);
85 }
86 
87 void dcn10_lock_all_pipes(struct dc *dc,
88 	struct dc_state *context,
89 	bool lock)
90 {
91 	struct pipe_ctx *pipe_ctx;
92 	struct timing_generator *tg;
93 	int i;
94 
95 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
96 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
97 		tg = pipe_ctx->stream_res.tg;
98 
99 		/*
100 		 * Only lock the top pipe's tg to prevent redundant
101 		 * (un)locking. Also skip if pipe is disabled.
102 		 */
103 		if (pipe_ctx->top_pipe ||
104 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
105 		    !tg->funcs->is_tg_enabled(tg))
106 			continue;
107 
108 		if (lock)
109 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
110 		else
111 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
112 	}
113 }
114 
115 static void log_mpc_crc(struct dc *dc,
116 	struct dc_log_buffer_ctx *log_ctx)
117 {
118 	struct dc_context *dc_ctx = dc->ctx;
119 	struct dce_hwseq *hws = dc->hwseq;
120 
121 	if (REG(MPC_CRC_RESULT_GB))
122 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
123 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
124 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
125 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
126 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
127 }
128 
129 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
130 {
131 	struct dc_context *dc_ctx = dc->ctx;
132 	struct dcn_hubbub_wm wm;
133 	int i;
134 
135 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
136 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
137 
138 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
139 			"         sr_enter          sr_exit  dram_clk_change\n");
140 
141 	for (i = 0; i < 4; i++) {
142 		struct dcn_hubbub_wm_set *s;
143 
144 		s = &wm.sets[i];
145 		DTN_INFO("WM_Set[%d]:", s->wm_set);
146 		DTN_INFO_MICRO_SEC(s->data_urgent);
147 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
148 		DTN_INFO_MICRO_SEC(s->sr_enter);
149 		DTN_INFO_MICRO_SEC(s->sr_exit);
150 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
151 		DTN_INFO("\n");
152 	}
153 
154 	DTN_INFO("\n");
155 }
156 
157 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
158 {
159 	struct dc_context *dc_ctx = dc->ctx;
160 	struct resource_pool *pool = dc->res_pool;
161 	int i;
162 
163 	DTN_INFO(
164 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
165 	for (i = 0; i < pool->pipe_count; i++) {
166 		struct hubp *hubp = pool->hubps[i];
167 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
168 
169 		hubp->funcs->hubp_read_state(hubp);
170 
171 		if (!s->blank_en) {
172 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
173 					hubp->inst,
174 					s->pixel_format,
175 					s->inuse_addr_hi,
176 					s->viewport_width,
177 					s->viewport_height,
178 					s->rotation_angle,
179 					s->h_mirror_en,
180 					s->sw_mode,
181 					s->dcc_en,
182 					s->blank_en,
183 					s->clock_en,
184 					s->ttu_disable,
185 					s->underflow_status);
186 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
187 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
188 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
189 			DTN_INFO("\n");
190 		}
191 	}
192 
193 	DTN_INFO("\n=========RQ========\n");
194 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
195 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
196 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
197 	for (i = 0; i < pool->pipe_count; i++) {
198 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
199 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
200 
201 		if (!s->blank_en)
202 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
203 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
204 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
205 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
206 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
207 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
208 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
209 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
210 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
211 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
212 	}
213 
214 	DTN_INFO("========DLG========\n");
215 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
216 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
217 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
218 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
219 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
220 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
221 			"  x_rp_dlay  x_rr_sfl\n");
222 	for (i = 0; i < pool->pipe_count; i++) {
223 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
224 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
225 
226 		if (!s->blank_en)
227 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
228 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
229 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
230 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
231 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
232 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
233 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
234 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
235 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
236 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
237 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
238 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
239 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
240 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
241 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
242 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
243 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
244 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
245 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
246 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
247 				dlg_regs->xfc_reg_remote_surface_flip_latency);
248 	}
249 
250 	DTN_INFO("========TTU========\n");
251 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
252 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
253 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
254 	for (i = 0; i < pool->pipe_count; i++) {
255 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
256 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
257 
258 		if (!s->blank_en)
259 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
260 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
261 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
262 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
263 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
264 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
265 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
266 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
267 	}
268 	DTN_INFO("\n");
269 }
270 
271 void dcn10_log_hw_state(struct dc *dc,
272 	struct dc_log_buffer_ctx *log_ctx)
273 {
274 	struct dc_context *dc_ctx = dc->ctx;
275 	struct resource_pool *pool = dc->res_pool;
276 	int i;
277 
278 	DTN_INFO_BEGIN();
279 
280 	dcn10_log_hubbub_state(dc, log_ctx);
281 
282 	dcn10_log_hubp_states(dc, log_ctx);
283 
284 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
285 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
286 			"C31 C32   C33 C34\n");
287 	for (i = 0; i < pool->pipe_count; i++) {
288 		struct dpp *dpp = pool->dpps[i];
289 		struct dcn_dpp_state s = {0};
290 
291 		dpp->funcs->dpp_read_state(dpp, &s);
292 
293 		if (!s.is_enabled)
294 			continue;
295 
296 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
297 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
298 				dpp->inst,
299 				s.igam_input_format,
300 				(s.igam_lut_mode == 0) ? "BypassFixed" :
301 					((s.igam_lut_mode == 1) ? "BypassFloat" :
302 					((s.igam_lut_mode == 2) ? "RAM" :
303 					((s.igam_lut_mode == 3) ? "RAM" :
304 								 "Unknown"))),
305 				(s.dgam_lut_mode == 0) ? "Bypass" :
306 					((s.dgam_lut_mode == 1) ? "sRGB" :
307 					((s.dgam_lut_mode == 2) ? "Ycc" :
308 					((s.dgam_lut_mode == 3) ? "RAM" :
309 					((s.dgam_lut_mode == 4) ? "RAM" :
310 								 "Unknown")))),
311 				(s.rgam_lut_mode == 0) ? "Bypass" :
312 					((s.rgam_lut_mode == 1) ? "sRGB" :
313 					((s.rgam_lut_mode == 2) ? "Ycc" :
314 					((s.rgam_lut_mode == 3) ? "RAM" :
315 					((s.rgam_lut_mode == 4) ? "RAM" :
316 								 "Unknown")))),
317 				s.gamut_remap_mode,
318 				s.gamut_remap_c11_c12,
319 				s.gamut_remap_c13_c14,
320 				s.gamut_remap_c21_c22,
321 				s.gamut_remap_c23_c24,
322 				s.gamut_remap_c31_c32,
323 				s.gamut_remap_c33_c34);
324 		DTN_INFO("\n");
325 	}
326 	DTN_INFO("\n");
327 
328 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
329 	for (i = 0; i < pool->pipe_count; i++) {
330 		struct mpcc_state s = {0};
331 
332 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
333 		if (s.opp_id != 0xf)
334 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
335 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
336 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
337 				s.idle);
338 	}
339 	DTN_INFO("\n");
340 
341 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
342 
343 	for (i = 0; i < pool->timing_generator_count; i++) {
344 		struct timing_generator *tg = pool->timing_generators[i];
345 		struct dcn_otg_state s = {0};
346 		/* Read shared OTG state registers for all DCNx */
347 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
348 
349 		/*
350 		 * For DCN2 and greater, a register on the OPP is used to
351 		 * determine if the CRTC is blanked instead of the OTG. So use
352 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
353 		 *
354 		 * TODO: Implement DCN-specific read_otg_state hooks.
355 		 */
356 		if (pool->opps[i]->funcs->dpg_is_blanked)
357 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
358 		else
359 			s.blank_enabled = tg->funcs->is_blanked(tg);
360 
361 		//only print if OTG master is enabled
362 		if ((s.otg_enabled & 1) == 0)
363 			continue;
364 
365 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
366 				tg->inst,
367 				s.v_blank_start,
368 				s.v_blank_end,
369 				s.v_sync_a_start,
370 				s.v_sync_a_end,
371 				s.v_sync_a_pol,
372 				s.v_total_max,
373 				s.v_total_min,
374 				s.v_total_max_sel,
375 				s.v_total_min_sel,
376 				s.h_blank_start,
377 				s.h_blank_end,
378 				s.h_sync_a_start,
379 				s.h_sync_a_end,
380 				s.h_sync_a_pol,
381 				s.h_total,
382 				s.v_total,
383 				s.underflow_occurred_status,
384 				s.blank_enabled);
385 
386 		// Clear underflow for debug purposes
387 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
388 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
389 		// it from here without affecting the original intent.
390 		tg->funcs->clear_optc_underflow(tg);
391 	}
392 	DTN_INFO("\n");
393 
394 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
395 	// TODO: Update golden log header to reflect this name change
396 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
397 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
398 		struct display_stream_compressor *dsc = pool->dscs[i];
399 		struct dcn_dsc_state s = {0};
400 
401 		dsc->funcs->dsc_read_state(dsc, &s);
402 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
403 		dsc->inst,
404 			s.dsc_clock_en,
405 			s.dsc_slice_width,
406 			s.dsc_bits_per_pixel);
407 		DTN_INFO("\n");
408 	}
409 	DTN_INFO("\n");
410 
411 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
412 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
413 	for (i = 0; i < pool->stream_enc_count; i++) {
414 		struct stream_encoder *enc = pool->stream_enc[i];
415 		struct enc_state s = {0};
416 
417 		if (enc->funcs->enc_read_state) {
418 			enc->funcs->enc_read_state(enc, &s);
419 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
420 				enc->id,
421 				s.dsc_mode,
422 				s.sec_gsp_pps_line_num,
423 				s.vbid6_line_reference,
424 				s.vbid6_line_num,
425 				s.sec_gsp_pps_enable,
426 				s.sec_stream_enable);
427 			DTN_INFO("\n");
428 		}
429 	}
430 	DTN_INFO("\n");
431 
432 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
433 	for (i = 0; i < dc->link_count; i++) {
434 		struct link_encoder *lenc = dc->links[i]->link_enc;
435 
436 		struct link_enc_state s = {0};
437 
438 		if (lenc->funcs->read_state) {
439 			lenc->funcs->read_state(lenc, &s);
440 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
441 				i,
442 				s.dphy_fec_en,
443 				s.dphy_fec_ready_shadow,
444 				s.dphy_fec_active_status,
445 				s.dp_link_training_complete);
446 			DTN_INFO("\n");
447 		}
448 	}
449 	DTN_INFO("\n");
450 
451 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
452 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
453 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
454 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
455 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
456 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
457 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
458 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
459 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
460 
461 	log_mpc_crc(dc, log_ctx);
462 
463 	DTN_INFO_END();
464 }
465 
466 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
467 {
468 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
469 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
470 
471 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
472 		tg->funcs->clear_optc_underflow(tg);
473 		return true;
474 	}
475 
476 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
477 		hubp->funcs->hubp_clear_underflow(hubp);
478 		return true;
479 	}
480 	return false;
481 }
482 
483 void dcn10_enable_power_gating_plane(
484 	struct dce_hwseq *hws,
485 	bool enable)
486 {
487 	bool force_on = true; /* disable power gating */
488 
489 	if (enable)
490 		force_on = false;
491 
492 	/* DCHUBP0/1/2/3 */
493 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
494 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
495 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
496 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
497 
498 	/* DPP0/1/2/3 */
499 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
500 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
501 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
502 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
503 }
504 
505 void dcn10_disable_vga(
506 	struct dce_hwseq *hws)
507 {
508 	unsigned int in_vga1_mode = 0;
509 	unsigned int in_vga2_mode = 0;
510 	unsigned int in_vga3_mode = 0;
511 	unsigned int in_vga4_mode = 0;
512 
513 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
514 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
515 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
516 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
517 
518 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
519 			in_vga3_mode == 0 && in_vga4_mode == 0)
520 		return;
521 
522 	REG_WRITE(D1VGA_CONTROL, 0);
523 	REG_WRITE(D2VGA_CONTROL, 0);
524 	REG_WRITE(D3VGA_CONTROL, 0);
525 	REG_WRITE(D4VGA_CONTROL, 0);
526 
527 	/* HW Engineer's Notes:
528 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
529 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
530 	 *
531 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
532 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
533 	 */
534 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
535 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
536 }
537 
538 void dcn10_dpp_pg_control(
539 		struct dce_hwseq *hws,
540 		unsigned int dpp_inst,
541 		bool power_on)
542 {
543 	uint32_t power_gate = power_on ? 0 : 1;
544 	uint32_t pwr_status = power_on ? 0 : 2;
545 
546 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
547 		return;
548 	if (REG(DOMAIN1_PG_CONFIG) == 0)
549 		return;
550 
551 	switch (dpp_inst) {
552 	case 0: /* DPP0 */
553 		REG_UPDATE(DOMAIN1_PG_CONFIG,
554 				DOMAIN1_POWER_GATE, power_gate);
555 
556 		REG_WAIT(DOMAIN1_PG_STATUS,
557 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
558 				1, 1000);
559 		break;
560 	case 1: /* DPP1 */
561 		REG_UPDATE(DOMAIN3_PG_CONFIG,
562 				DOMAIN3_POWER_GATE, power_gate);
563 
564 		REG_WAIT(DOMAIN3_PG_STATUS,
565 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
566 				1, 1000);
567 		break;
568 	case 2: /* DPP2 */
569 		REG_UPDATE(DOMAIN5_PG_CONFIG,
570 				DOMAIN5_POWER_GATE, power_gate);
571 
572 		REG_WAIT(DOMAIN5_PG_STATUS,
573 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
574 				1, 1000);
575 		break;
576 	case 3: /* DPP3 */
577 		REG_UPDATE(DOMAIN7_PG_CONFIG,
578 				DOMAIN7_POWER_GATE, power_gate);
579 
580 		REG_WAIT(DOMAIN7_PG_STATUS,
581 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
582 				1, 1000);
583 		break;
584 	default:
585 		BREAK_TO_DEBUGGER();
586 		break;
587 	}
588 }
589 
590 void dcn10_hubp_pg_control(
591 		struct dce_hwseq *hws,
592 		unsigned int hubp_inst,
593 		bool power_on)
594 {
595 	uint32_t power_gate = power_on ? 0 : 1;
596 	uint32_t pwr_status = power_on ? 0 : 2;
597 
598 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
599 		return;
600 	if (REG(DOMAIN0_PG_CONFIG) == 0)
601 		return;
602 
603 	switch (hubp_inst) {
604 	case 0: /* DCHUBP0 */
605 		REG_UPDATE(DOMAIN0_PG_CONFIG,
606 				DOMAIN0_POWER_GATE, power_gate);
607 
608 		REG_WAIT(DOMAIN0_PG_STATUS,
609 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
610 				1, 1000);
611 		break;
612 	case 1: /* DCHUBP1 */
613 		REG_UPDATE(DOMAIN2_PG_CONFIG,
614 				DOMAIN2_POWER_GATE, power_gate);
615 
616 		REG_WAIT(DOMAIN2_PG_STATUS,
617 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
618 				1, 1000);
619 		break;
620 	case 2: /* DCHUBP2 */
621 		REG_UPDATE(DOMAIN4_PG_CONFIG,
622 				DOMAIN4_POWER_GATE, power_gate);
623 
624 		REG_WAIT(DOMAIN4_PG_STATUS,
625 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
626 				1, 1000);
627 		break;
628 	case 3: /* DCHUBP3 */
629 		REG_UPDATE(DOMAIN6_PG_CONFIG,
630 				DOMAIN6_POWER_GATE, power_gate);
631 
632 		REG_WAIT(DOMAIN6_PG_STATUS,
633 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
634 				1, 1000);
635 		break;
636 	default:
637 		BREAK_TO_DEBUGGER();
638 		break;
639 	}
640 }
641 
642 static void power_on_plane(
643 	struct dce_hwseq *hws,
644 	int plane_id)
645 {
646 	DC_LOGGER_INIT(hws->ctx->logger);
647 	if (REG(DC_IP_REQUEST_CNTL)) {
648 		REG_SET(DC_IP_REQUEST_CNTL, 0,
649 				IP_REQUEST_EN, 1);
650 
651 		if (hws->funcs.dpp_pg_control)
652 			hws->funcs.dpp_pg_control(hws, plane_id, true);
653 
654 		if (hws->funcs.hubp_pg_control)
655 			hws->funcs.hubp_pg_control(hws, plane_id, true);
656 
657 		REG_SET(DC_IP_REQUEST_CNTL, 0,
658 				IP_REQUEST_EN, 0);
659 		DC_LOG_DEBUG(
660 				"Un-gated front end for pipe %d\n", plane_id);
661 	}
662 }
663 
664 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
665 {
666 	struct dce_hwseq *hws = dc->hwseq;
667 	struct hubp *hubp = dc->res_pool->hubps[0];
668 
669 	if (!hws->wa_state.DEGVIDCN10_253_applied)
670 		return;
671 
672 	hubp->funcs->set_blank(hubp, true);
673 
674 	REG_SET(DC_IP_REQUEST_CNTL, 0,
675 			IP_REQUEST_EN, 1);
676 
677 	hws->funcs.hubp_pg_control(hws, 0, false);
678 	REG_SET(DC_IP_REQUEST_CNTL, 0,
679 			IP_REQUEST_EN, 0);
680 
681 	hws->wa_state.DEGVIDCN10_253_applied = false;
682 }
683 
684 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
685 {
686 	struct dce_hwseq *hws = dc->hwseq;
687 	struct hubp *hubp = dc->res_pool->hubps[0];
688 	int i;
689 
690 	if (dc->debug.disable_stutter)
691 		return;
692 
693 	if (!hws->wa.DEGVIDCN10_253)
694 		return;
695 
696 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
697 		if (!dc->res_pool->hubps[i]->power_gated)
698 			return;
699 	}
700 
701 	/* all pipe power gated, apply work around to enable stutter. */
702 
703 	REG_SET(DC_IP_REQUEST_CNTL, 0,
704 			IP_REQUEST_EN, 1);
705 
706 	hws->funcs.hubp_pg_control(hws, 0, true);
707 	REG_SET(DC_IP_REQUEST_CNTL, 0,
708 			IP_REQUEST_EN, 0);
709 
710 	hubp->funcs->set_hubp_blank_en(hubp, false);
711 	hws->wa_state.DEGVIDCN10_253_applied = true;
712 }
713 
714 void dcn10_bios_golden_init(struct dc *dc)
715 {
716 	struct dce_hwseq *hws = dc->hwseq;
717 	struct dc_bios *bp = dc->ctx->dc_bios;
718 	int i;
719 	bool allow_self_fresh_force_enable = true;
720 
721 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
722 		return;
723 
724 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
725 		allow_self_fresh_force_enable =
726 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
727 
728 
729 	/* WA for making DF sleep when idle after resume from S0i3.
730 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
731 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
732 	 * before calling command table and it changed to 1 after,
733 	 * it should be set back to 0.
734 	 */
735 
736 	/* initialize dcn global */
737 	bp->funcs->enable_disp_power_gating(bp,
738 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
739 
740 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
741 		/* initialize dcn per pipe */
742 		bp->funcs->enable_disp_power_gating(bp,
743 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
744 	}
745 
746 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
747 		if (allow_self_fresh_force_enable == false &&
748 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
749 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
750 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
751 
752 }
753 
754 static void false_optc_underflow_wa(
755 		struct dc *dc,
756 		const struct dc_stream_state *stream,
757 		struct timing_generator *tg)
758 {
759 	int i;
760 	bool underflow;
761 
762 	if (!dc->hwseq->wa.false_optc_underflow)
763 		return;
764 
765 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
766 
767 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
768 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
769 
770 		if (old_pipe_ctx->stream != stream)
771 			continue;
772 
773 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
774 	}
775 
776 	if (tg->funcs->set_blank_data_double_buffer)
777 		tg->funcs->set_blank_data_double_buffer(tg, true);
778 
779 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
780 		tg->funcs->clear_optc_underflow(tg);
781 }
782 
783 enum dc_status dcn10_enable_stream_timing(
784 		struct pipe_ctx *pipe_ctx,
785 		struct dc_state *context,
786 		struct dc *dc)
787 {
788 	struct dc_stream_state *stream = pipe_ctx->stream;
789 	enum dc_color_space color_space;
790 	struct tg_color black_color = {0};
791 
792 	/* by upper caller loop, pipe0 is parent pipe and be called first.
793 	 * back end is set up by for pipe0. Other children pipe share back end
794 	 * with pipe 0. No program is needed.
795 	 */
796 	if (pipe_ctx->top_pipe != NULL)
797 		return DC_OK;
798 
799 	/* TODO check if timing_changed, disable stream if timing changed */
800 
801 	/* HW program guide assume display already disable
802 	 * by unplug sequence. OTG assume stop.
803 	 */
804 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
805 
806 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
807 			pipe_ctx->clock_source,
808 			&pipe_ctx->stream_res.pix_clk_params,
809 			&pipe_ctx->pll_settings)) {
810 		BREAK_TO_DEBUGGER();
811 		return DC_ERROR_UNEXPECTED;
812 	}
813 
814 	pipe_ctx->stream_res.tg->funcs->program_timing(
815 			pipe_ctx->stream_res.tg,
816 			&stream->timing,
817 			pipe_ctx->pipe_dlg_param.vready_offset,
818 			pipe_ctx->pipe_dlg_param.vstartup_start,
819 			pipe_ctx->pipe_dlg_param.vupdate_offset,
820 			pipe_ctx->pipe_dlg_param.vupdate_width,
821 			pipe_ctx->stream->signal,
822 			true);
823 
824 #if 0 /* move to after enable_crtc */
825 	/* TODO: OPP FMT, ABM. etc. should be done here. */
826 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
827 
828 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
829 
830 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
831 				pipe_ctx->stream_res.opp,
832 				&stream->bit_depth_params,
833 				&stream->clamping);
834 #endif
835 	/* program otg blank color */
836 	color_space = stream->output_color_space;
837 	color_space_to_black_color(dc, color_space, &black_color);
838 
839 	/*
840 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
841 	 * alternate between Cb and Cr, so both channels need the pixel
842 	 * value for Y
843 	 */
844 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
845 		black_color.color_r_cr = black_color.color_g_y;
846 
847 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
848 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
849 				pipe_ctx->stream_res.tg,
850 				&black_color);
851 
852 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
853 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
854 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
855 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
856 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
857 	}
858 
859 	/* VTG is  within DCHUB command block. DCFCLK is always on */
860 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
861 		BREAK_TO_DEBUGGER();
862 		return DC_ERROR_UNEXPECTED;
863 	}
864 
865 	/* TODO program crtc source select for non-virtual signal*/
866 	/* TODO program FMT */
867 	/* TODO setup link_enc */
868 	/* TODO set stream attributes */
869 	/* TODO program audio */
870 	/* TODO enable stream if timing changed */
871 	/* TODO unblank stream if DP */
872 
873 	return DC_OK;
874 }
875 
876 static void dcn10_reset_back_end_for_pipe(
877 		struct dc *dc,
878 		struct pipe_ctx *pipe_ctx,
879 		struct dc_state *context)
880 {
881 	int i;
882 	struct dc_link *link;
883 	DC_LOGGER_INIT(dc->ctx->logger);
884 	if (pipe_ctx->stream_res.stream_enc == NULL) {
885 		pipe_ctx->stream = NULL;
886 		return;
887 	}
888 
889 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
890 		link = pipe_ctx->stream->link;
891 		/* DPMS may already disable or */
892 		/* dpms_off status is incorrect due to fastboot
893 		 * feature. When system resume from S4 with second
894 		 * screen only, the dpms_off would be true but
895 		 * VBIOS lit up eDP, so check link status too.
896 		 */
897 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
898 			core_link_disable_stream(pipe_ctx);
899 		else if (pipe_ctx->stream_res.audio)
900 			dc->hwss.disable_audio_stream(pipe_ctx);
901 
902 		if (pipe_ctx->stream_res.audio) {
903 			/*disable az_endpoint*/
904 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
905 
906 			/*free audio*/
907 			if (dc->caps.dynamic_audio == true) {
908 				/*we have to dynamic arbitrate the audio endpoints*/
909 				/*we free the resource, need reset is_audio_acquired*/
910 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
911 						pipe_ctx->stream_res.audio, false);
912 				pipe_ctx->stream_res.audio = NULL;
913 			}
914 		}
915 	}
916 
917 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
918 	 * back end share by all pipes and will be disable only when disable
919 	 * parent pipe.
920 	 */
921 	if (pipe_ctx->top_pipe == NULL) {
922 
923 		if (pipe_ctx->stream_res.abm)
924 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
925 
926 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
927 
928 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
929 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
930 			pipe_ctx->stream_res.tg->funcs->set_drr(
931 					pipe_ctx->stream_res.tg, NULL);
932 	}
933 
934 	for (i = 0; i < dc->res_pool->pipe_count; i++)
935 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
936 			break;
937 
938 	if (i == dc->res_pool->pipe_count)
939 		return;
940 
941 	pipe_ctx->stream = NULL;
942 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
943 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
944 }
945 
946 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
947 {
948 	struct hubp *hubp ;
949 	unsigned int i;
950 	bool need_recover = true;
951 
952 	if (!dc->debug.recovery_enabled)
953 		return false;
954 
955 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
956 		struct pipe_ctx *pipe_ctx =
957 			&dc->current_state->res_ctx.pipe_ctx[i];
958 		if (pipe_ctx != NULL) {
959 			hubp = pipe_ctx->plane_res.hubp;
960 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
961 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
962 					/* one pipe underflow, we will reset all the pipes*/
963 					need_recover = true;
964 				}
965 			}
966 		}
967 	}
968 	if (!need_recover)
969 		return false;
970 	/*
971 	DCHUBP_CNTL:HUBP_BLANK_EN=1
972 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
973 	DCHUBP_CNTL:HUBP_DISABLE=1
974 	DCHUBP_CNTL:HUBP_DISABLE=0
975 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
976 	DCSURF_PRIMARY_SURFACE_ADDRESS
977 	DCHUBP_CNTL:HUBP_BLANK_EN=0
978 	*/
979 
980 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
981 		struct pipe_ctx *pipe_ctx =
982 			&dc->current_state->res_ctx.pipe_ctx[i];
983 		if (pipe_ctx != NULL) {
984 			hubp = pipe_ctx->plane_res.hubp;
985 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
986 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
987 				hubp->funcs->set_hubp_blank_en(hubp, true);
988 		}
989 	}
990 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
991 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
992 
993 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
994 		struct pipe_ctx *pipe_ctx =
995 			&dc->current_state->res_ctx.pipe_ctx[i];
996 		if (pipe_ctx != NULL) {
997 			hubp = pipe_ctx->plane_res.hubp;
998 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
999 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1000 				hubp->funcs->hubp_disable_control(hubp, true);
1001 		}
1002 	}
1003 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1004 		struct pipe_ctx *pipe_ctx =
1005 			&dc->current_state->res_ctx.pipe_ctx[i];
1006 		if (pipe_ctx != NULL) {
1007 			hubp = pipe_ctx->plane_res.hubp;
1008 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1009 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1010 				hubp->funcs->hubp_disable_control(hubp, true);
1011 		}
1012 	}
1013 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1014 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1015 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1016 		struct pipe_ctx *pipe_ctx =
1017 			&dc->current_state->res_ctx.pipe_ctx[i];
1018 		if (pipe_ctx != NULL) {
1019 			hubp = pipe_ctx->plane_res.hubp;
1020 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1021 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1022 				hubp->funcs->set_hubp_blank_en(hubp, true);
1023 		}
1024 	}
1025 	return true;
1026 
1027 }
1028 
1029 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1030 {
1031 	static bool should_log_hw_state; /* prevent hw state log by default */
1032 
1033 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1034 		int i = 0;
1035 
1036 		if (should_log_hw_state)
1037 			dcn10_log_hw_state(dc, NULL);
1038 
1039 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1040 		BREAK_TO_DEBUGGER();
1041 		if (dcn10_hw_wa_force_recovery(dc)) {
1042 		/*check again*/
1043 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1044 				BREAK_TO_DEBUGGER();
1045 		}
1046 	}
1047 }
1048 
1049 /* trigger HW to start disconnect plane from stream on the next vsync */
1050 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1051 {
1052 	struct dce_hwseq *hws = dc->hwseq;
1053 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1054 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1055 	struct mpc *mpc = dc->res_pool->mpc;
1056 	struct mpc_tree *mpc_tree_params;
1057 	struct mpcc *mpcc_to_remove = NULL;
1058 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1059 
1060 	mpc_tree_params = &(opp->mpc_tree_params);
1061 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1062 
1063 	/*Already reset*/
1064 	if (mpcc_to_remove == NULL)
1065 		return;
1066 
1067 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1068 	if (opp != NULL)
1069 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1070 
1071 	dc->optimized_required = true;
1072 
1073 	if (hubp->funcs->hubp_disconnect)
1074 		hubp->funcs->hubp_disconnect(hubp);
1075 
1076 	if (dc->debug.sanity_checks)
1077 		hws->funcs.verify_allow_pstate_change_high(dc);
1078 }
1079 
1080 void dcn10_plane_atomic_power_down(struct dc *dc,
1081 		struct dpp *dpp,
1082 		struct hubp *hubp)
1083 {
1084 	struct dce_hwseq *hws = dc->hwseq;
1085 	DC_LOGGER_INIT(dc->ctx->logger);
1086 
1087 	if (REG(DC_IP_REQUEST_CNTL)) {
1088 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1089 				IP_REQUEST_EN, 1);
1090 
1091 		if (hws->funcs.dpp_pg_control)
1092 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1093 
1094 		if (hws->funcs.hubp_pg_control)
1095 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1096 
1097 		dpp->funcs->dpp_reset(dpp);
1098 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1099 				IP_REQUEST_EN, 0);
1100 		DC_LOG_DEBUG(
1101 				"Power gated front end %d\n", hubp->inst);
1102 	}
1103 }
1104 
1105 /* disable HW used by plane.
1106  * note:  cannot disable until disconnect is complete
1107  */
1108 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1109 {
1110 	struct dce_hwseq *hws = dc->hwseq;
1111 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1112 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1113 	int opp_id = hubp->opp_id;
1114 
1115 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1116 
1117 	hubp->funcs->hubp_clk_cntl(hubp, false);
1118 
1119 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1120 
1121 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1122 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1123 				pipe_ctx->stream_res.opp,
1124 				false);
1125 
1126 	hubp->power_gated = true;
1127 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1128 
1129 	hws->funcs.plane_atomic_power_down(dc,
1130 			pipe_ctx->plane_res.dpp,
1131 			pipe_ctx->plane_res.hubp);
1132 
1133 	pipe_ctx->stream = NULL;
1134 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1135 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1136 	pipe_ctx->top_pipe = NULL;
1137 	pipe_ctx->bottom_pipe = NULL;
1138 	pipe_ctx->plane_state = NULL;
1139 }
1140 
1141 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1142 {
1143 	struct dce_hwseq *hws = dc->hwseq;
1144 	DC_LOGGER_INIT(dc->ctx->logger);
1145 
1146 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1147 		return;
1148 
1149 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1150 
1151 	apply_DEGVIDCN10_253_wa(dc);
1152 
1153 	DC_LOG_DC("Power down front end %d\n",
1154 					pipe_ctx->pipe_idx);
1155 }
1156 
1157 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1158 {
1159 	int i;
1160 	struct dce_hwseq *hws = dc->hwseq;
1161 	bool can_apply_seamless_boot = false;
1162 
1163 	for (i = 0; i < context->stream_count; i++) {
1164 		if (context->streams[i]->apply_seamless_boot_optimization) {
1165 			can_apply_seamless_boot = true;
1166 			break;
1167 		}
1168 	}
1169 
1170 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1171 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1172 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1173 
1174 		/* There is assumption that pipe_ctx is not mapping irregularly
1175 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1176 		 * we will use the pipe, so don't disable
1177 		 */
1178 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1179 			continue;
1180 
1181 		/* Blank controller using driver code instead of
1182 		 * command table.
1183 		 */
1184 		if (tg->funcs->is_tg_enabled(tg)) {
1185 			if (hws->funcs.init_blank != NULL) {
1186 				hws->funcs.init_blank(dc, tg);
1187 				tg->funcs->lock(tg);
1188 			} else {
1189 				tg->funcs->lock(tg);
1190 				tg->funcs->set_blank(tg, true);
1191 				hwss_wait_for_blank_complete(tg);
1192 			}
1193 		}
1194 	}
1195 
1196 	/* num_opp will be equal to number of mpcc */
1197 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1198 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1199 
1200 		/* Cannot reset the MPC mux if seamless boot */
1201 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1202 			continue;
1203 
1204 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1205 				dc->res_pool->mpc, i);
1206 	}
1207 
1208 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1209 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1210 		struct hubp *hubp = dc->res_pool->hubps[i];
1211 		struct dpp *dpp = dc->res_pool->dpps[i];
1212 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1213 
1214 		/* There is assumption that pipe_ctx is not mapping irregularly
1215 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1216 		 * we will use the pipe, so don't disable
1217 		 */
1218 		if (can_apply_seamless_boot &&
1219 			pipe_ctx->stream != NULL &&
1220 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1221 				pipe_ctx->stream_res.tg)) {
1222 			// Enable double buffering for OTG_BLANK no matter if
1223 			// seamless boot is enabled or not to suppress global sync
1224 			// signals when OTG blanked. This is to prevent pipe from
1225 			// requesting data while in PSR.
1226 			tg->funcs->tg_init(tg);
1227 			hubp->power_gated = true;
1228 			continue;
1229 		}
1230 
1231 		/* Disable on the current state so the new one isn't cleared. */
1232 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1233 
1234 		dpp->funcs->dpp_reset(dpp);
1235 
1236 		pipe_ctx->stream_res.tg = tg;
1237 		pipe_ctx->pipe_idx = i;
1238 
1239 		pipe_ctx->plane_res.hubp = hubp;
1240 		pipe_ctx->plane_res.dpp = dpp;
1241 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1242 		hubp->mpcc_id = dpp->inst;
1243 		hubp->opp_id = OPP_ID_INVALID;
1244 		hubp->power_gated = false;
1245 
1246 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1247 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1248 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1249 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1250 
1251 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1252 
1253 		if (tg->funcs->is_tg_enabled(tg))
1254 			tg->funcs->unlock(tg);
1255 
1256 		dc->hwss.disable_plane(dc, pipe_ctx);
1257 
1258 		pipe_ctx->stream_res.tg = NULL;
1259 		pipe_ctx->plane_res.hubp = NULL;
1260 
1261 		tg->funcs->tg_init(tg);
1262 	}
1263 }
1264 
1265 void dcn10_init_hw(struct dc *dc)
1266 {
1267 	int i, j;
1268 	struct abm *abm = dc->res_pool->abm;
1269 	struct dmcu *dmcu = dc->res_pool->dmcu;
1270 	struct dce_hwseq *hws = dc->hwseq;
1271 	struct dc_bios *dcb = dc->ctx->dc_bios;
1272 	struct resource_pool *res_pool = dc->res_pool;
1273 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1274 	bool   is_optimized_init_done = false;
1275 
1276 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1277 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1278 
1279 	// Initialize the dccg
1280 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1281 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1282 
1283 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1284 
1285 		REG_WRITE(REFCLK_CNTL, 0);
1286 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1287 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1288 
1289 		if (!dc->debug.disable_clock_gate) {
1290 			/* enable all DCN clock gating */
1291 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1292 
1293 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1294 
1295 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1296 		}
1297 
1298 		//Enable ability to power gate / don't force power on permanently
1299 		if (hws->funcs.enable_power_gating_plane)
1300 			hws->funcs.enable_power_gating_plane(hws, true);
1301 
1302 		return;
1303 	}
1304 
1305 	if (!dcb->funcs->is_accelerated_mode(dcb))
1306 		hws->funcs.disable_vga(dc->hwseq);
1307 
1308 	hws->funcs.bios_golden_init(dc);
1309 
1310 	if (dc->ctx->dc_bios->fw_info_valid) {
1311 		res_pool->ref_clocks.xtalin_clock_inKhz =
1312 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1313 
1314 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1315 			if (res_pool->dccg && res_pool->hubbub) {
1316 
1317 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1318 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1319 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1320 
1321 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1322 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1323 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1324 			} else {
1325 				// Not all ASICs have DCCG sw component
1326 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1327 						res_pool->ref_clocks.xtalin_clock_inKhz;
1328 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1329 						res_pool->ref_clocks.xtalin_clock_inKhz;
1330 			}
1331 		}
1332 	} else
1333 		ASSERT_CRITICAL(false);
1334 
1335 	for (i = 0; i < dc->link_count; i++) {
1336 		/* Power up AND update implementation according to the
1337 		 * required signal (which may be different from the
1338 		 * default signal on connector).
1339 		 */
1340 		struct dc_link *link = dc->links[i];
1341 
1342 		if (!is_optimized_init_done)
1343 			link->link_enc->funcs->hw_init(link->link_enc);
1344 
1345 		/* Check for enabled DIG to identify enabled display */
1346 		if (link->link_enc->funcs->is_dig_enabled &&
1347 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1348 			link->link_status.link_active = true;
1349 	}
1350 
1351 	/* Power gate DSCs */
1352 	if (!is_optimized_init_done) {
1353 		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1354 			if (hws->funcs.dsc_pg_control != NULL)
1355 				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1356 	}
1357 
1358 	/* we want to turn off all dp displays before doing detection */
1359 	if (dc->config.power_down_display_on_boot) {
1360 		uint8_t dpcd_power_state = '\0';
1361 		enum dc_status status = DC_ERROR_UNEXPECTED;
1362 
1363 		for (i = 0; i < dc->link_count; i++) {
1364 			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1365 				continue;
1366 
1367 			/*
1368 			 * If any of the displays are lit up turn them off.
1369 			 * The reason is that some MST hubs cannot be turned off
1370 			 * completely until we tell them to do so.
1371 			 * If not turned off, then displays connected to MST hub
1372 			 * won't light up.
1373 			 */
1374 			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1375 							&dpcd_power_state, sizeof(dpcd_power_state));
1376 			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1377 				/* blank dp stream before power off receiver*/
1378 				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1379 					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1380 
1381 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1382 						if (fe == dc->res_pool->stream_enc[j]->id) {
1383 							dc->res_pool->stream_enc[j]->funcs->dp_blank(
1384 										dc->res_pool->stream_enc[j]);
1385 							break;
1386 						}
1387 					}
1388 				}
1389 				dp_receiver_power_ctrl(dc->links[i], false);
1390 			}
1391 		}
1392 	}
1393 
1394 	/* If taking control over from VBIOS, we may want to optimize our first
1395 	 * mode set, so we need to skip powering down pipes until we know which
1396 	 * pipes we want to use.
1397 	 * Otherwise, if taking control is not possible, we need to power
1398 	 * everything down.
1399 	 */
1400 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1401 		if (!is_optimized_init_done) {
1402 			hws->funcs.init_pipes(dc, dc->current_state);
1403 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1404 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1405 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1406 		}
1407 	}
1408 
1409 	if (!is_optimized_init_done) {
1410 
1411 		for (i = 0; i < res_pool->audio_count; i++) {
1412 			struct audio *audio = res_pool->audios[i];
1413 
1414 			audio->funcs->hw_init(audio);
1415 		}
1416 
1417 		for (i = 0; i < dc->link_count; i++) {
1418 			struct dc_link *link = dc->links[i];
1419 
1420 			if (link->panel_cntl)
1421 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1422 		}
1423 
1424 		if (abm != NULL)
1425 			abm->funcs->abm_init(abm, backlight);
1426 
1427 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1428 			dmcu->funcs->dmcu_init(dmcu);
1429 	}
1430 
1431 	if (abm != NULL && dmcu != NULL)
1432 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1433 
1434 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1435 	if (!is_optimized_init_done)
1436 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1437 
1438 	if (!dc->debug.disable_clock_gate) {
1439 		/* enable all DCN clock gating */
1440 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1441 
1442 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1443 
1444 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1445 	}
1446 	if (hws->funcs.enable_power_gating_plane)
1447 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1448 
1449 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1450 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1451 }
1452 
1453 /* In headless boot cases, DIG may be turned
1454  * on which causes HW/SW discrepancies.
1455  * To avoid this, power down hardware on boot
1456  * if DIG is turned on
1457  */
1458 void dcn10_power_down_on_boot(struct dc *dc)
1459 {
1460 	int i = 0;
1461 	struct dc_link *edp_link;
1462 
1463 	edp_link = get_edp_link(dc);
1464 	if (edp_link &&
1465 			edp_link->link_enc->funcs->is_dig_enabled &&
1466 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1467 			dc->hwseq->funcs.edp_backlight_control &&
1468 			dc->hwss.power_down &&
1469 			dc->hwss.edp_power_control) {
1470 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1471 		dc->hwss.power_down(dc);
1472 		dc->hwss.edp_power_control(edp_link, false);
1473 	} else {
1474 		for (i = 0; i < dc->link_count; i++) {
1475 			struct dc_link *link = dc->links[i];
1476 
1477 			if (link->link_enc->funcs->is_dig_enabled &&
1478 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1479 					dc->hwss.power_down) {
1480 				dc->hwss.power_down(dc);
1481 				break;
1482 			}
1483 
1484 		}
1485 	}
1486 
1487 	/*
1488 	 * Call update_clocks with empty context
1489 	 * to send DISPLAY_OFF
1490 	 * Otherwise DISPLAY_OFF may not be asserted
1491 	 */
1492 	if (dc->clk_mgr->funcs->set_low_power_state)
1493 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1494 }
1495 
1496 void dcn10_reset_hw_ctx_wrap(
1497 		struct dc *dc,
1498 		struct dc_state *context)
1499 {
1500 	int i;
1501 	struct dce_hwseq *hws = dc->hwseq;
1502 
1503 	/* Reset Back End*/
1504 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1505 		struct pipe_ctx *pipe_ctx_old =
1506 			&dc->current_state->res_ctx.pipe_ctx[i];
1507 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1508 
1509 		if (!pipe_ctx_old->stream)
1510 			continue;
1511 
1512 		if (pipe_ctx_old->top_pipe)
1513 			continue;
1514 
1515 		if (!pipe_ctx->stream ||
1516 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1517 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1518 
1519 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1520 			if (hws->funcs.enable_stream_gating)
1521 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1522 			if (old_clk)
1523 				old_clk->funcs->cs_power_down(old_clk);
1524 		}
1525 	}
1526 }
1527 
1528 static bool patch_address_for_sbs_tb_stereo(
1529 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1530 {
1531 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1532 	bool sec_split = pipe_ctx->top_pipe &&
1533 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1534 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1535 		(pipe_ctx->stream->timing.timing_3d_format ==
1536 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1537 		 pipe_ctx->stream->timing.timing_3d_format ==
1538 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1539 		*addr = plane_state->address.grph_stereo.left_addr;
1540 		plane_state->address.grph_stereo.left_addr =
1541 		plane_state->address.grph_stereo.right_addr;
1542 		return true;
1543 	} else {
1544 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1545 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1546 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1547 			plane_state->address.grph_stereo.right_addr =
1548 			plane_state->address.grph_stereo.left_addr;
1549 			plane_state->address.grph_stereo.right_meta_addr =
1550 			plane_state->address.grph_stereo.left_meta_addr;
1551 		}
1552 	}
1553 	return false;
1554 }
1555 
1556 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1557 {
1558 	bool addr_patched = false;
1559 	PHYSICAL_ADDRESS_LOC addr;
1560 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1561 
1562 	if (plane_state == NULL)
1563 		return;
1564 
1565 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1566 
1567 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1568 			pipe_ctx->plane_res.hubp,
1569 			&plane_state->address,
1570 			plane_state->flip_immediate);
1571 
1572 	plane_state->status.requested_address = plane_state->address;
1573 
1574 	if (plane_state->flip_immediate)
1575 		plane_state->status.current_address = plane_state->address;
1576 
1577 	if (addr_patched)
1578 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1579 }
1580 
1581 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1582 			const struct dc_plane_state *plane_state)
1583 {
1584 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1585 	const struct dc_transfer_func *tf = NULL;
1586 	bool result = true;
1587 
1588 	if (dpp_base == NULL)
1589 		return false;
1590 
1591 	if (plane_state->in_transfer_func)
1592 		tf = plane_state->in_transfer_func;
1593 
1594 	if (plane_state->gamma_correction &&
1595 		!dpp_base->ctx->dc->debug.always_use_regamma
1596 		&& !plane_state->gamma_correction->is_identity
1597 			&& dce_use_lut(plane_state->format))
1598 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1599 
1600 	if (tf == NULL)
1601 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1602 	else if (tf->type == TF_TYPE_PREDEFINED) {
1603 		switch (tf->tf) {
1604 		case TRANSFER_FUNCTION_SRGB:
1605 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1606 			break;
1607 		case TRANSFER_FUNCTION_BT709:
1608 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1609 			break;
1610 		case TRANSFER_FUNCTION_LINEAR:
1611 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1612 			break;
1613 		case TRANSFER_FUNCTION_PQ:
1614 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1615 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1616 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1617 			result = true;
1618 			break;
1619 		default:
1620 			result = false;
1621 			break;
1622 		}
1623 	} else if (tf->type == TF_TYPE_BYPASS) {
1624 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1625 	} else {
1626 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1627 					&dpp_base->degamma_params);
1628 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1629 				&dpp_base->degamma_params);
1630 		result = true;
1631 	}
1632 
1633 	return result;
1634 }
1635 
1636 #define MAX_NUM_HW_POINTS 0x200
1637 
1638 static void log_tf(struct dc_context *ctx,
1639 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1640 {
1641 	// DC_LOG_GAMMA is default logging of all hw points
1642 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1643 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1644 	int i = 0;
1645 
1646 	DC_LOGGER_INIT(ctx->logger);
1647 	DC_LOG_GAMMA("Gamma Correction TF");
1648 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1649 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1650 
1651 	for (i = 0; i < hw_points_num; i++) {
1652 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1653 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1654 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1655 	}
1656 
1657 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1658 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1659 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1660 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1661 	}
1662 }
1663 
1664 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1665 				const struct dc_stream_state *stream)
1666 {
1667 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1668 
1669 	if (dpp == NULL)
1670 		return false;
1671 
1672 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1673 
1674 	if (stream->out_transfer_func &&
1675 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1676 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1677 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1678 
1679 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1680 	 * update.
1681 	 */
1682 	else if (cm_helper_translate_curve_to_hw_format(
1683 			stream->out_transfer_func,
1684 			&dpp->regamma_params, false)) {
1685 		dpp->funcs->dpp_program_regamma_pwl(
1686 				dpp,
1687 				&dpp->regamma_params, OPP_REGAMMA_USER);
1688 	} else
1689 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1690 
1691 	if (stream != NULL && stream->ctx != NULL &&
1692 			stream->out_transfer_func != NULL) {
1693 		log_tf(stream->ctx,
1694 				stream->out_transfer_func,
1695 				dpp->regamma_params.hw_points_num);
1696 	}
1697 
1698 	return true;
1699 }
1700 
1701 void dcn10_pipe_control_lock(
1702 	struct dc *dc,
1703 	struct pipe_ctx *pipe,
1704 	bool lock)
1705 {
1706 	struct dce_hwseq *hws = dc->hwseq;
1707 
1708 	/* use TG master update lock to lock everything on the TG
1709 	 * therefore only top pipe need to lock
1710 	 */
1711 	if (!pipe || pipe->top_pipe)
1712 		return;
1713 
1714 	if (dc->debug.sanity_checks)
1715 		hws->funcs.verify_allow_pstate_change_high(dc);
1716 
1717 	if (lock)
1718 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1719 	else
1720 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1721 
1722 	if (dc->debug.sanity_checks)
1723 		hws->funcs.verify_allow_pstate_change_high(dc);
1724 }
1725 
1726 /**
1727  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1728  *
1729  * Software keepout workaround to prevent cursor update locking from stalling
1730  * out cursor updates indefinitely or from old values from being retained in
1731  * the case where the viewport changes in the same frame as the cursor.
1732  *
1733  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1734  * too close to VUPDATE, then stall out until VUPDATE finishes.
1735  *
1736  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1737  *       to avoid the need for this workaround.
1738  */
1739 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1740 {
1741 	struct dc_stream_state *stream = pipe_ctx->stream;
1742 	struct crtc_position position;
1743 	uint32_t vupdate_start, vupdate_end;
1744 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1745 	unsigned int us_per_line, us_vupdate;
1746 
1747 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1748 		return;
1749 
1750 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1751 		return;
1752 
1753 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1754 				       &vupdate_end);
1755 
1756 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1757 	vpos = position.vertical_count;
1758 
1759 	/* Avoid wraparound calculation issues */
1760 	vupdate_start += stream->timing.v_total;
1761 	vupdate_end += stream->timing.v_total;
1762 	vpos += stream->timing.v_total;
1763 
1764 	if (vpos <= vupdate_start) {
1765 		/* VPOS is in VACTIVE or back porch. */
1766 		lines_to_vupdate = vupdate_start - vpos;
1767 	} else if (vpos > vupdate_end) {
1768 		/* VPOS is in the front porch. */
1769 		return;
1770 	} else {
1771 		/* VPOS is in VUPDATE. */
1772 		lines_to_vupdate = 0;
1773 	}
1774 
1775 	/* Calculate time until VUPDATE in microseconds. */
1776 	us_per_line =
1777 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1778 	us_to_vupdate = lines_to_vupdate * us_per_line;
1779 
1780 	/* 70 us is a conservative estimate of cursor update time*/
1781 	if (us_to_vupdate > 70)
1782 		return;
1783 
1784 	/* Stall out until the cursor update completes. */
1785 	if (vupdate_end < vupdate_start)
1786 		vupdate_end += stream->timing.v_total;
1787 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1788 	udelay(us_to_vupdate + us_vupdate);
1789 }
1790 
1791 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1792 {
1793 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1794 	if (!pipe || pipe->top_pipe)
1795 		return;
1796 
1797 	/* Prevent cursor lock from stalling out cursor updates. */
1798 	if (lock)
1799 		delay_cursor_until_vupdate(dc, pipe);
1800 
1801 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1802 		union dmub_hw_lock_flags hw_locks = { 0 };
1803 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1804 
1805 		hw_locks.bits.lock_cursor = 1;
1806 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1807 
1808 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1809 					lock,
1810 					&hw_locks,
1811 					&inst_flags);
1812 	} else
1813 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1814 				pipe->stream_res.opp->inst, lock);
1815 }
1816 
1817 static bool wait_for_reset_trigger_to_occur(
1818 	struct dc_context *dc_ctx,
1819 	struct timing_generator *tg)
1820 {
1821 	bool rc = false;
1822 
1823 	/* To avoid endless loop we wait at most
1824 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1825 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1826 	int i;
1827 
1828 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1829 
1830 		if (!tg->funcs->is_counter_moving(tg)) {
1831 			DC_ERROR("TG counter is not moving!\n");
1832 			break;
1833 		}
1834 
1835 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1836 			rc = true;
1837 			/* usually occurs at i=1 */
1838 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1839 					i);
1840 			break;
1841 		}
1842 
1843 		/* Wait for one frame. */
1844 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1845 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1846 	}
1847 
1848 	if (false == rc)
1849 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1850 
1851 	return rc;
1852 }
1853 
1854 void dcn10_enable_timing_synchronization(
1855 	struct dc *dc,
1856 	int group_index,
1857 	int group_size,
1858 	struct pipe_ctx *grouped_pipes[])
1859 {
1860 	struct dc_context *dc_ctx = dc->ctx;
1861 	struct output_pixel_processor *opp;
1862 	struct timing_generator *tg;
1863 	int i, width, height;
1864 
1865 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
1866 
1867 	for (i = 1; i < group_size; i++) {
1868 		opp = grouped_pipes[i]->stream_res.opp;
1869 		tg = grouped_pipes[i]->stream_res.tg;
1870 		tg->funcs->get_otg_active_size(tg, &width, &height);
1871 		if (opp->funcs->opp_program_dpg_dimensions)
1872 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
1873 	}
1874 
1875 	for (i = 1; i < group_size; i++)
1876 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1877 				grouped_pipes[i]->stream_res.tg,
1878 				grouped_pipes[0]->stream_res.tg->inst);
1879 
1880 	DC_SYNC_INFO("Waiting for trigger\n");
1881 
1882 	/* Need to get only check 1 pipe for having reset as all the others are
1883 	 * synchronized. Look at last pipe programmed to reset.
1884 	 */
1885 
1886 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1887 	for (i = 1; i < group_size; i++)
1888 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1889 				grouped_pipes[i]->stream_res.tg);
1890 
1891 	for (i = 1; i < group_size; i++) {
1892 		opp = grouped_pipes[i]->stream_res.opp;
1893 		tg = grouped_pipes[i]->stream_res.tg;
1894 		tg->funcs->get_otg_active_size(tg, &width, &height);
1895 		if (opp->funcs->opp_program_dpg_dimensions)
1896 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
1897 	}
1898 
1899 	DC_SYNC_INFO("Sync complete\n");
1900 }
1901 
1902 void dcn10_enable_per_frame_crtc_position_reset(
1903 	struct dc *dc,
1904 	int group_size,
1905 	struct pipe_ctx *grouped_pipes[])
1906 {
1907 	struct dc_context *dc_ctx = dc->ctx;
1908 	int i;
1909 
1910 	DC_SYNC_INFO("Setting up\n");
1911 	for (i = 0; i < group_size; i++)
1912 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1913 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1914 					grouped_pipes[i]->stream_res.tg,
1915 					0,
1916 					&grouped_pipes[i]->stream->triggered_crtc_reset);
1917 
1918 	DC_SYNC_INFO("Waiting for trigger\n");
1919 
1920 	for (i = 0; i < group_size; i++)
1921 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1922 
1923 	DC_SYNC_INFO("Multi-display sync is complete\n");
1924 }
1925 
1926 /*static void print_rq_dlg_ttu(
1927 		struct dc *dc,
1928 		struct pipe_ctx *pipe_ctx)
1929 {
1930 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1931 			"\n============== DML TTU Output parameters [%d] ==============\n"
1932 			"qos_level_low_wm: %d, \n"
1933 			"qos_level_high_wm: %d, \n"
1934 			"min_ttu_vblank: %d, \n"
1935 			"qos_level_flip: %d, \n"
1936 			"refcyc_per_req_delivery_l: %d, \n"
1937 			"qos_level_fixed_l: %d, \n"
1938 			"qos_ramp_disable_l: %d, \n"
1939 			"refcyc_per_req_delivery_pre_l: %d, \n"
1940 			"refcyc_per_req_delivery_c: %d, \n"
1941 			"qos_level_fixed_c: %d, \n"
1942 			"qos_ramp_disable_c: %d, \n"
1943 			"refcyc_per_req_delivery_pre_c: %d\n"
1944 			"=============================================================\n",
1945 			pipe_ctx->pipe_idx,
1946 			pipe_ctx->ttu_regs.qos_level_low_wm,
1947 			pipe_ctx->ttu_regs.qos_level_high_wm,
1948 			pipe_ctx->ttu_regs.min_ttu_vblank,
1949 			pipe_ctx->ttu_regs.qos_level_flip,
1950 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1951 			pipe_ctx->ttu_regs.qos_level_fixed_l,
1952 			pipe_ctx->ttu_regs.qos_ramp_disable_l,
1953 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1954 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1955 			pipe_ctx->ttu_regs.qos_level_fixed_c,
1956 			pipe_ctx->ttu_regs.qos_ramp_disable_c,
1957 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1958 			);
1959 
1960 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1961 			"\n============== DML DLG Output parameters [%d] ==============\n"
1962 			"refcyc_h_blank_end: %d, \n"
1963 			"dlg_vblank_end: %d, \n"
1964 			"min_dst_y_next_start: %d, \n"
1965 			"refcyc_per_htotal: %d, \n"
1966 			"refcyc_x_after_scaler: %d, \n"
1967 			"dst_y_after_scaler: %d, \n"
1968 			"dst_y_prefetch: %d, \n"
1969 			"dst_y_per_vm_vblank: %d, \n"
1970 			"dst_y_per_row_vblank: %d, \n"
1971 			"ref_freq_to_pix_freq: %d, \n"
1972 			"vratio_prefetch: %d, \n"
1973 			"refcyc_per_pte_group_vblank_l: %d, \n"
1974 			"refcyc_per_meta_chunk_vblank_l: %d, \n"
1975 			"dst_y_per_pte_row_nom_l: %d, \n"
1976 			"refcyc_per_pte_group_nom_l: %d, \n",
1977 			pipe_ctx->pipe_idx,
1978 			pipe_ctx->dlg_regs.refcyc_h_blank_end,
1979 			pipe_ctx->dlg_regs.dlg_vblank_end,
1980 			pipe_ctx->dlg_regs.min_dst_y_next_start,
1981 			pipe_ctx->dlg_regs.refcyc_per_htotal,
1982 			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1983 			pipe_ctx->dlg_regs.dst_y_after_scaler,
1984 			pipe_ctx->dlg_regs.dst_y_prefetch,
1985 			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1986 			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1987 			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1988 			pipe_ctx->dlg_regs.vratio_prefetch,
1989 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1990 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1991 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1992 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1993 			);
1994 
1995 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1996 			"\ndst_y_per_meta_row_nom_l: %d, \n"
1997 			"refcyc_per_meta_chunk_nom_l: %d, \n"
1998 			"refcyc_per_line_delivery_pre_l: %d, \n"
1999 			"refcyc_per_line_delivery_l: %d, \n"
2000 			"vratio_prefetch_c: %d, \n"
2001 			"refcyc_per_pte_group_vblank_c: %d, \n"
2002 			"refcyc_per_meta_chunk_vblank_c: %d, \n"
2003 			"dst_y_per_pte_row_nom_c: %d, \n"
2004 			"refcyc_per_pte_group_nom_c: %d, \n"
2005 			"dst_y_per_meta_row_nom_c: %d, \n"
2006 			"refcyc_per_meta_chunk_nom_c: %d, \n"
2007 			"refcyc_per_line_delivery_pre_c: %d, \n"
2008 			"refcyc_per_line_delivery_c: %d \n"
2009 			"========================================================\n",
2010 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
2011 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
2012 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
2013 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
2014 			pipe_ctx->dlg_regs.vratio_prefetch_c,
2015 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
2016 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
2017 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
2018 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
2019 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
2020 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
2021 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
2022 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
2023 			);
2024 
2025 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2026 			"\n============== DML RQ Output parameters [%d] ==============\n"
2027 			"chunk_size: %d \n"
2028 			"min_chunk_size: %d \n"
2029 			"meta_chunk_size: %d \n"
2030 			"min_meta_chunk_size: %d \n"
2031 			"dpte_group_size: %d \n"
2032 			"mpte_group_size: %d \n"
2033 			"swath_height: %d \n"
2034 			"pte_row_height_linear: %d \n"
2035 			"========================================================\n",
2036 			pipe_ctx->pipe_idx,
2037 			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2038 			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2039 			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2040 			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2041 			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2042 			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2043 			pipe_ctx->rq_regs.rq_regs_l.swath_height,
2044 			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2045 			);
2046 }
2047 */
2048 
2049 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2050 		struct vm_system_aperture_param *apt,
2051 		struct dce_hwseq *hws)
2052 {
2053 	PHYSICAL_ADDRESS_LOC physical_page_number;
2054 	uint32_t logical_addr_low;
2055 	uint32_t logical_addr_high;
2056 
2057 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2058 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2059 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2060 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2061 
2062 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2063 			LOGICAL_ADDR, &logical_addr_low);
2064 
2065 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2066 			LOGICAL_ADDR, &logical_addr_high);
2067 
2068 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2069 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2070 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2071 }
2072 
2073 /* Temporary read settings, future will get values from kmd directly */
2074 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2075 		struct vm_context0_param *vm0,
2076 		struct dce_hwseq *hws)
2077 {
2078 	PHYSICAL_ADDRESS_LOC fb_base;
2079 	PHYSICAL_ADDRESS_LOC fb_offset;
2080 	uint32_t fb_base_value;
2081 	uint32_t fb_offset_value;
2082 
2083 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2084 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2085 
2086 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2087 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2088 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2089 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2090 
2091 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2092 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2093 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2094 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2095 
2096 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2097 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2098 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2099 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2100 
2101 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2102 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2103 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2104 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2105 
2106 	/*
2107 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2108 	 * Therefore we need to do
2109 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2110 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2111 	 */
2112 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2113 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2114 	vm0->pte_base.quad_part += fb_base.quad_part;
2115 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2116 }
2117 
2118 
2119 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2120 {
2121 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2122 	struct vm_system_aperture_param apt = { {{ 0 } } };
2123 	struct vm_context0_param vm0 = { { { 0 } } };
2124 
2125 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2126 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2127 
2128 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2129 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2130 }
2131 
2132 static void dcn10_enable_plane(
2133 	struct dc *dc,
2134 	struct pipe_ctx *pipe_ctx,
2135 	struct dc_state *context)
2136 {
2137 	struct dce_hwseq *hws = dc->hwseq;
2138 
2139 	if (dc->debug.sanity_checks) {
2140 		hws->funcs.verify_allow_pstate_change_high(dc);
2141 	}
2142 
2143 	undo_DEGVIDCN10_253_wa(dc);
2144 
2145 	power_on_plane(dc->hwseq,
2146 		pipe_ctx->plane_res.hubp->inst);
2147 
2148 	/* enable DCFCLK current DCHUB */
2149 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2150 
2151 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2152 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2153 			pipe_ctx->stream_res.opp,
2154 			true);
2155 
2156 /* TODO: enable/disable in dm as per update type.
2157 	if (plane_state) {
2158 		DC_LOG_DC(dc->ctx->logger,
2159 				"Pipe:%d 0x%x: addr hi:0x%x, "
2160 				"addr low:0x%x, "
2161 				"src: %d, %d, %d,"
2162 				" %d; dst: %d, %d, %d, %d;\n",
2163 				pipe_ctx->pipe_idx,
2164 				plane_state,
2165 				plane_state->address.grph.addr.high_part,
2166 				plane_state->address.grph.addr.low_part,
2167 				plane_state->src_rect.x,
2168 				plane_state->src_rect.y,
2169 				plane_state->src_rect.width,
2170 				plane_state->src_rect.height,
2171 				plane_state->dst_rect.x,
2172 				plane_state->dst_rect.y,
2173 				plane_state->dst_rect.width,
2174 				plane_state->dst_rect.height);
2175 
2176 		DC_LOG_DC(dc->ctx->logger,
2177 				"Pipe %d: width, height, x, y         format:%d\n"
2178 				"viewport:%d, %d, %d, %d\n"
2179 				"recout:  %d, %d, %d, %d\n",
2180 				pipe_ctx->pipe_idx,
2181 				plane_state->format,
2182 				pipe_ctx->plane_res.scl_data.viewport.width,
2183 				pipe_ctx->plane_res.scl_data.viewport.height,
2184 				pipe_ctx->plane_res.scl_data.viewport.x,
2185 				pipe_ctx->plane_res.scl_data.viewport.y,
2186 				pipe_ctx->plane_res.scl_data.recout.width,
2187 				pipe_ctx->plane_res.scl_data.recout.height,
2188 				pipe_ctx->plane_res.scl_data.recout.x,
2189 				pipe_ctx->plane_res.scl_data.recout.y);
2190 		print_rq_dlg_ttu(dc, pipe_ctx);
2191 	}
2192 */
2193 	if (dc->config.gpu_vm_support)
2194 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2195 
2196 	if (dc->debug.sanity_checks) {
2197 		hws->funcs.verify_allow_pstate_change_high(dc);
2198 	}
2199 }
2200 
2201 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2202 {
2203 	int i = 0;
2204 	struct dpp_grph_csc_adjustment adjust;
2205 	memset(&adjust, 0, sizeof(adjust));
2206 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2207 
2208 
2209 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2210 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2211 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2212 			adjust.temperature_matrix[i] =
2213 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2214 	} else if (pipe_ctx->plane_state &&
2215 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2216 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2217 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2218 			adjust.temperature_matrix[i] =
2219 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2220 	}
2221 
2222 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2223 }
2224 
2225 
2226 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2227 {
2228 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2229 		if (pipe_ctx->top_pipe) {
2230 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2231 
2232 			while (top->top_pipe)
2233 				top = top->top_pipe; // Traverse to top pipe_ctx
2234 			if (top->plane_state && top->plane_state->layer_index == 0)
2235 				return true; // Front MPO plane not hidden
2236 		}
2237 	}
2238 	return false;
2239 }
2240 
2241 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2242 {
2243 	// Override rear plane RGB bias to fix MPO brightness
2244 	uint16_t rgb_bias = matrix[3];
2245 
2246 	matrix[3] = 0;
2247 	matrix[7] = 0;
2248 	matrix[11] = 0;
2249 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2250 	matrix[3] = rgb_bias;
2251 	matrix[7] = rgb_bias;
2252 	matrix[11] = rgb_bias;
2253 }
2254 
2255 void dcn10_program_output_csc(struct dc *dc,
2256 		struct pipe_ctx *pipe_ctx,
2257 		enum dc_color_space colorspace,
2258 		uint16_t *matrix,
2259 		int opp_id)
2260 {
2261 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2262 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2263 
2264 			/* MPO is broken with RGB colorspaces when OCSC matrix
2265 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2266 			 * Blending adds offsets from front + rear to rear plane
2267 			 *
2268 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2269 			 * black value pixels add offset instead of rear + front
2270 			 */
2271 
2272 			int16_t rgb_bias = matrix[3];
2273 			// matrix[3/7/11] are all the same offset value
2274 
2275 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2276 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2277 			} else {
2278 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2279 			}
2280 		}
2281 	} else {
2282 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2283 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2284 	}
2285 }
2286 
2287 void dcn10_get_surface_visual_confirm_color(
2288 		const struct pipe_ctx *pipe_ctx,
2289 		struct tg_color *color)
2290 {
2291 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2292 
2293 	switch (pipe_ctx->plane_res.scl_data.format) {
2294 	case PIXEL_FORMAT_ARGB8888:
2295 		/* set border color to red */
2296 		color->color_r_cr = color_value;
2297 		break;
2298 
2299 	case PIXEL_FORMAT_ARGB2101010:
2300 		/* set border color to blue */
2301 		color->color_b_cb = color_value;
2302 		break;
2303 	case PIXEL_FORMAT_420BPP8:
2304 		/* set border color to green */
2305 		color->color_g_y = color_value;
2306 		break;
2307 	case PIXEL_FORMAT_420BPP10:
2308 		/* set border color to yellow */
2309 		color->color_g_y = color_value;
2310 		color->color_r_cr = color_value;
2311 		break;
2312 	case PIXEL_FORMAT_FP16:
2313 		/* set border color to white */
2314 		color->color_r_cr = color_value;
2315 		color->color_b_cb = color_value;
2316 		color->color_g_y = color_value;
2317 		break;
2318 	default:
2319 		break;
2320 	}
2321 }
2322 
2323 void dcn10_get_hdr_visual_confirm_color(
2324 		struct pipe_ctx *pipe_ctx,
2325 		struct tg_color *color)
2326 {
2327 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2328 
2329 	// Determine the overscan color based on the top-most (desktop) plane's context
2330 	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2331 
2332 	while (top_pipe_ctx->top_pipe != NULL)
2333 		top_pipe_ctx = top_pipe_ctx->top_pipe;
2334 
2335 	switch (top_pipe_ctx->plane_res.scl_data.format) {
2336 	case PIXEL_FORMAT_ARGB2101010:
2337 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2338 			/* HDR10, ARGB2101010 - set border color to red */
2339 			color->color_r_cr = color_value;
2340 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2341 			/* FreeSync 2 ARGB2101010 - set border color to pink */
2342 			color->color_r_cr = color_value;
2343 			color->color_b_cb = color_value;
2344 		}
2345 		break;
2346 	case PIXEL_FORMAT_FP16:
2347 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2348 			/* HDR10, FP16 - set border color to blue */
2349 			color->color_b_cb = color_value;
2350 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2351 			/* FreeSync 2 HDR - set border color to green */
2352 			color->color_g_y = color_value;
2353 		}
2354 		break;
2355 	default:
2356 		/* SDR - set border color to Gray */
2357 		color->color_r_cr = color_value/2;
2358 		color->color_b_cb = color_value/2;
2359 		color->color_g_y = color_value/2;
2360 		break;
2361 	}
2362 }
2363 
2364 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2365 {
2366 	struct dc_bias_and_scale bns_params = {0};
2367 
2368 	// program the input csc
2369 	dpp->funcs->dpp_setup(dpp,
2370 			plane_state->format,
2371 			EXPANSION_MODE_ZERO,
2372 			plane_state->input_csc_color_matrix,
2373 			plane_state->color_space,
2374 			NULL);
2375 
2376 	//set scale and bias registers
2377 	build_prescale_params(&bns_params, plane_state);
2378 	if (dpp->funcs->dpp_program_bias_and_scale)
2379 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2380 }
2381 
2382 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2383 {
2384 	struct dce_hwseq *hws = dc->hwseq;
2385 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2386 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2387 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2388 	int mpcc_id;
2389 	struct mpcc *new_mpcc;
2390 	struct mpc *mpc = dc->res_pool->mpc;
2391 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2392 
2393 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2394 		hws->funcs.get_hdr_visual_confirm_color(
2395 				pipe_ctx, &blnd_cfg.black_color);
2396 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2397 		hws->funcs.get_surface_visual_confirm_color(
2398 				pipe_ctx, &blnd_cfg.black_color);
2399 	} else {
2400 		color_space_to_black_color(
2401 				dc, pipe_ctx->stream->output_color_space,
2402 				&blnd_cfg.black_color);
2403 	}
2404 
2405 	if (per_pixel_alpha)
2406 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2407 	else
2408 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2409 
2410 	blnd_cfg.overlap_only = false;
2411 	blnd_cfg.global_gain = 0xff;
2412 
2413 	if (pipe_ctx->plane_state->global_alpha)
2414 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2415 	else
2416 		blnd_cfg.global_alpha = 0xff;
2417 
2418 	/* DCN1.0 has output CM before MPC which seems to screw with
2419 	 * pre-multiplied alpha.
2420 	 */
2421 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2422 			pipe_ctx->stream->output_color_space)
2423 					&& per_pixel_alpha;
2424 
2425 
2426 	/*
2427 	 * TODO: remove hack
2428 	 * Note: currently there is a bug in init_hw such that
2429 	 * on resume from hibernate, BIOS sets up MPCC0, and
2430 	 * we do mpcc_remove but the mpcc cannot go to idle
2431 	 * after remove. This cause us to pick mpcc1 here,
2432 	 * which causes a pstate hang for yet unknown reason.
2433 	 */
2434 	mpcc_id = hubp->inst;
2435 
2436 	/* If there is no full update, don't need to touch MPC tree*/
2437 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2438 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2439 		return;
2440 	}
2441 
2442 	/* check if this MPCC is already being used */
2443 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2444 	/* remove MPCC if being used */
2445 	if (new_mpcc != NULL)
2446 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2447 	else
2448 		if (dc->debug.sanity_checks)
2449 			mpc->funcs->assert_mpcc_idle_before_connect(
2450 					dc->res_pool->mpc, mpcc_id);
2451 
2452 	/* Call MPC to insert new plane */
2453 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2454 			mpc_tree_params,
2455 			&blnd_cfg,
2456 			NULL,
2457 			NULL,
2458 			hubp->inst,
2459 			mpcc_id);
2460 
2461 	ASSERT(new_mpcc != NULL);
2462 
2463 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2464 	hubp->mpcc_id = mpcc_id;
2465 }
2466 
2467 static void update_scaler(struct pipe_ctx *pipe_ctx)
2468 {
2469 	bool per_pixel_alpha =
2470 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2471 
2472 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2473 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2474 	/* scaler configuration */
2475 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2476 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2477 }
2478 
2479 static void dcn10_update_dchubp_dpp(
2480 	struct dc *dc,
2481 	struct pipe_ctx *pipe_ctx,
2482 	struct dc_state *context)
2483 {
2484 	struct dce_hwseq *hws = dc->hwseq;
2485 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2486 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2487 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2488 	struct plane_size size = plane_state->plane_size;
2489 	unsigned int compat_level = 0;
2490 	bool should_divided_by_2 = false;
2491 
2492 	/* depends on DML calculation, DPP clock value may change dynamically */
2493 	/* If request max dpp clk is lower than current dispclk, no need to
2494 	 * divided by 2
2495 	 */
2496 	if (plane_state->update_flags.bits.full_update) {
2497 
2498 		/* new calculated dispclk, dppclk are stored in
2499 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2500 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2501 		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2502 		 * dispclk will put in use after optimize_bandwidth when
2503 		 * ramp_up_dispclk_with_dpp is called.
2504 		 * there are two places for dppclk be put in use. One location
2505 		 * is the same as the location as dispclk. Another is within
2506 		 * update_dchubp_dpp which happens between pre_bandwidth and
2507 		 * optimize_bandwidth.
2508 		 * dppclk updated within update_dchubp_dpp will cause new
2509 		 * clock values of dispclk and dppclk not be in use at the same
2510 		 * time. when clocks are decreased, this may cause dppclk is
2511 		 * lower than previous configuration and let pipe stuck.
2512 		 * for example, eDP + external dp,  change resolution of DP from
2513 		 * 1920x1080x144hz to 1280x960x60hz.
2514 		 * before change: dispclk = 337889 dppclk = 337889
2515 		 * change mode, dcn_validate_bandwidth calculate
2516 		 *                dispclk = 143122 dppclk = 143122
2517 		 * update_dchubp_dpp be executed before dispclk be updated,
2518 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2519 		 * 168944. this will cause pipe pstate warning issue.
2520 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2521 		 * dispclk is going to be decreased, keep dppclk = dispclk
2522 		 **/
2523 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2524 				dc->clk_mgr->clks.dispclk_khz)
2525 			should_divided_by_2 = false;
2526 		else
2527 			should_divided_by_2 =
2528 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2529 					dc->clk_mgr->clks.dispclk_khz / 2;
2530 
2531 		dpp->funcs->dpp_dppclk_control(
2532 				dpp,
2533 				should_divided_by_2,
2534 				true);
2535 
2536 		if (dc->res_pool->dccg)
2537 			dc->res_pool->dccg->funcs->update_dpp_dto(
2538 					dc->res_pool->dccg,
2539 					dpp->inst,
2540 					pipe_ctx->plane_res.bw.dppclk_khz);
2541 		else
2542 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2543 						dc->clk_mgr->clks.dispclk_khz / 2 :
2544 							dc->clk_mgr->clks.dispclk_khz;
2545 	}
2546 
2547 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2548 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2549 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2550 	 */
2551 	if (plane_state->update_flags.bits.full_update) {
2552 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2553 
2554 		hubp->funcs->hubp_setup(
2555 			hubp,
2556 			&pipe_ctx->dlg_regs,
2557 			&pipe_ctx->ttu_regs,
2558 			&pipe_ctx->rq_regs,
2559 			&pipe_ctx->pipe_dlg_param);
2560 		hubp->funcs->hubp_setup_interdependent(
2561 			hubp,
2562 			&pipe_ctx->dlg_regs,
2563 			&pipe_ctx->ttu_regs);
2564 	}
2565 
2566 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2567 
2568 	if (plane_state->update_flags.bits.full_update ||
2569 		plane_state->update_flags.bits.bpp_change)
2570 		dcn10_update_dpp(dpp, plane_state);
2571 
2572 	if (plane_state->update_flags.bits.full_update ||
2573 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2574 		plane_state->update_flags.bits.global_alpha_change)
2575 		hws->funcs.update_mpcc(dc, pipe_ctx);
2576 
2577 	if (plane_state->update_flags.bits.full_update ||
2578 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2579 		plane_state->update_flags.bits.global_alpha_change ||
2580 		plane_state->update_flags.bits.scaling_change ||
2581 		plane_state->update_flags.bits.position_change) {
2582 		update_scaler(pipe_ctx);
2583 	}
2584 
2585 	if (plane_state->update_flags.bits.full_update ||
2586 		plane_state->update_flags.bits.scaling_change ||
2587 		plane_state->update_flags.bits.position_change) {
2588 		hubp->funcs->mem_program_viewport(
2589 			hubp,
2590 			&pipe_ctx->plane_res.scl_data.viewport,
2591 			&pipe_ctx->plane_res.scl_data.viewport_c);
2592 	}
2593 
2594 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2595 		dc->hwss.set_cursor_position(pipe_ctx);
2596 		dc->hwss.set_cursor_attribute(pipe_ctx);
2597 
2598 		if (dc->hwss.set_cursor_sdr_white_level)
2599 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2600 	}
2601 
2602 	if (plane_state->update_flags.bits.full_update) {
2603 		/*gamut remap*/
2604 		dc->hwss.program_gamut_remap(pipe_ctx);
2605 
2606 		dc->hwss.program_output_csc(dc,
2607 				pipe_ctx,
2608 				pipe_ctx->stream->output_color_space,
2609 				pipe_ctx->stream->csc_color_matrix.matrix,
2610 				pipe_ctx->stream_res.opp->inst);
2611 	}
2612 
2613 	if (plane_state->update_flags.bits.full_update ||
2614 		plane_state->update_flags.bits.pixel_format_change ||
2615 		plane_state->update_flags.bits.horizontal_mirror_change ||
2616 		plane_state->update_flags.bits.rotation_change ||
2617 		plane_state->update_flags.bits.swizzle_change ||
2618 		plane_state->update_flags.bits.dcc_change ||
2619 		plane_state->update_flags.bits.bpp_change ||
2620 		plane_state->update_flags.bits.scaling_change ||
2621 		plane_state->update_flags.bits.plane_size_change) {
2622 		hubp->funcs->hubp_program_surface_config(
2623 			hubp,
2624 			plane_state->format,
2625 			&plane_state->tiling_info,
2626 			&size,
2627 			plane_state->rotation,
2628 			&plane_state->dcc,
2629 			plane_state->horizontal_mirror,
2630 			compat_level);
2631 	}
2632 
2633 	hubp->power_gated = false;
2634 
2635 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2636 
2637 	if (is_pipe_tree_visible(pipe_ctx))
2638 		dc->hwss.set_hubp_blank(dc, pipe_ctx, false);
2639 }
2640 
2641 void dcn10_blank_pixel_data(
2642 		struct dc *dc,
2643 		struct pipe_ctx *pipe_ctx,
2644 		bool blank)
2645 {
2646 	enum dc_color_space color_space;
2647 	struct tg_color black_color = {0};
2648 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2649 	struct dc_stream_state *stream = pipe_ctx->stream;
2650 
2651 	/* program otg blank color */
2652 	color_space = stream->output_color_space;
2653 	color_space_to_black_color(dc, color_space, &black_color);
2654 
2655 	/*
2656 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2657 	 * alternate between Cb and Cr, so both channels need the pixel
2658 	 * value for Y
2659 	 */
2660 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2661 		black_color.color_r_cr = black_color.color_g_y;
2662 
2663 
2664 	if (stream_res->tg->funcs->set_blank_color)
2665 		stream_res->tg->funcs->set_blank_color(
2666 				stream_res->tg,
2667 				&black_color);
2668 
2669 	if (!blank) {
2670 		if (stream_res->tg->funcs->set_blank)
2671 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2672 		if (stream_res->abm) {
2673 			dc->hwss.set_pipe(pipe_ctx);
2674 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2675 		}
2676 	} else if (blank) {
2677 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2678 		if (stream_res->tg->funcs->set_blank) {
2679 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2680 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2681 		}
2682 	}
2683 }
2684 
2685 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2686 {
2687 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2688 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2689 	struct custom_float_format fmt;
2690 
2691 	fmt.exponenta_bits = 6;
2692 	fmt.mantissa_bits = 12;
2693 	fmt.sign = true;
2694 
2695 
2696 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2697 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2698 
2699 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2700 			pipe_ctx->plane_res.dpp, hw_mult);
2701 }
2702 
2703 void dcn10_program_pipe(
2704 		struct dc *dc,
2705 		struct pipe_ctx *pipe_ctx,
2706 		struct dc_state *context)
2707 {
2708 	struct dce_hwseq *hws = dc->hwseq;
2709 
2710 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2711 		dcn10_enable_plane(dc, pipe_ctx, context);
2712 
2713 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2714 
2715 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2716 
2717 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2718 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2719 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2720 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2721 
2722 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2723 	 * only do gamma programming for full update.
2724 	 * TODO: This can be further optimized/cleaned up
2725 	 * Always call this for now since it does memcmp inside before
2726 	 * doing heavy calculation and programming
2727 	 */
2728 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2729 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2730 }
2731 
2732 static void dcn10_program_all_pipe_in_tree(
2733 		struct dc *dc,
2734 		struct pipe_ctx *pipe_ctx,
2735 		struct dc_state *context)
2736 {
2737 	struct dce_hwseq *hws = dc->hwseq;
2738 
2739 	if (pipe_ctx->top_pipe == NULL) {
2740 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2741 
2742 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2743 				pipe_ctx->stream_res.tg,
2744 				pipe_ctx->pipe_dlg_param.vready_offset,
2745 				pipe_ctx->pipe_dlg_param.vstartup_start,
2746 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2747 				pipe_ctx->pipe_dlg_param.vupdate_width);
2748 
2749 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2750 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2751 
2752 		if (hws->funcs.setup_vupdate_interrupt)
2753 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2754 
2755 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2756 	}
2757 
2758 	if (pipe_ctx->plane_state != NULL)
2759 		hws->funcs.program_pipe(dc, pipe_ctx, context);
2760 
2761 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2762 		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2763 }
2764 
2765 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2766 		struct dc *dc,
2767 		struct dc_state *context,
2768 		const struct dc_stream_state *stream)
2769 {
2770 	int i;
2771 
2772 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2773 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2774 		struct pipe_ctx *old_pipe_ctx =
2775 				&dc->current_state->res_ctx.pipe_ctx[i];
2776 
2777 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2778 			continue;
2779 
2780 		if (pipe_ctx->stream != stream)
2781 			continue;
2782 
2783 		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2784 			return pipe_ctx;
2785 	}
2786 	return NULL;
2787 }
2788 
2789 void dcn10_wait_for_pending_cleared(struct dc *dc,
2790 		struct dc_state *context)
2791 {
2792 		struct pipe_ctx *pipe_ctx;
2793 		struct timing_generator *tg;
2794 		int i;
2795 
2796 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2797 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2798 			tg = pipe_ctx->stream_res.tg;
2799 
2800 			/*
2801 			 * Only wait for top pipe's tg penindg bit
2802 			 * Also skip if pipe is disabled.
2803 			 */
2804 			if (pipe_ctx->top_pipe ||
2805 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2806 			    !tg->funcs->is_tg_enabled(tg))
2807 				continue;
2808 
2809 			/*
2810 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2811 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2812 			 * seems to not trigger the update right away, and if we
2813 			 * lock again before VUPDATE then we don't get a separated
2814 			 * operation.
2815 			 */
2816 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2817 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2818 		}
2819 }
2820 
2821 void dcn10_apply_ctx_for_surface(
2822 		struct dc *dc,
2823 		const struct dc_stream_state *stream,
2824 		int num_planes,
2825 		struct dc_state *context)
2826 {
2827 	struct dce_hwseq *hws = dc->hwseq;
2828 	int i;
2829 	struct timing_generator *tg;
2830 	uint32_t underflow_check_delay_us;
2831 	bool interdependent_update = false;
2832 	struct pipe_ctx *top_pipe_to_program =
2833 			dcn10_find_top_pipe_for_stream(dc, context, stream);
2834 	DC_LOGGER_INIT(dc->ctx->logger);
2835 
2836 	// Clear pipe_ctx flag
2837 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2838 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2839 		pipe_ctx->update_flags.raw = 0;
2840 	}
2841 
2842 	if (!top_pipe_to_program)
2843 		return;
2844 
2845 	tg = top_pipe_to_program->stream_res.tg;
2846 
2847 	interdependent_update = top_pipe_to_program->plane_state &&
2848 		top_pipe_to_program->plane_state->update_flags.bits.full_update;
2849 
2850 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2851 
2852 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2853 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2854 
2855 	if (underflow_check_delay_us != 0xFFFFFFFF)
2856 		udelay(underflow_check_delay_us);
2857 
2858 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2859 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2860 
2861 	if (num_planes == 0) {
2862 		/* OTG blank before remove all front end */
2863 		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2864 	}
2865 
2866 	/* Disconnect unused mpcc */
2867 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2868 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2869 		struct pipe_ctx *old_pipe_ctx =
2870 				&dc->current_state->res_ctx.pipe_ctx[i];
2871 
2872 		if ((!pipe_ctx->plane_state ||
2873 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2874 		    old_pipe_ctx->plane_state &&
2875 		    old_pipe_ctx->stream_res.tg == tg) {
2876 
2877 			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2878 			pipe_ctx->update_flags.bits.disable = 1;
2879 
2880 			DC_LOG_DC("Reset mpcc for pipe %d\n",
2881 					old_pipe_ctx->pipe_idx);
2882 		}
2883 	}
2884 
2885 	if (num_planes > 0)
2886 		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2887 
2888 	/* Program secondary blending tree and writeback pipes */
2889 	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2890 		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2891 	if (interdependent_update)
2892 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2893 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2894 			/* Skip inactive pipes and ones already updated */
2895 			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2896 			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2897 				continue;
2898 
2899 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2900 				pipe_ctx->plane_res.hubp,
2901 				&pipe_ctx->dlg_regs,
2902 				&pipe_ctx->ttu_regs);
2903 		}
2904 }
2905 
2906 void dcn10_post_unlock_program_front_end(
2907 		struct dc *dc,
2908 		struct dc_state *context)
2909 {
2910 	int i;
2911 
2912 	DC_LOGGER_INIT(dc->ctx->logger);
2913 
2914 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2915 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2916 
2917 		if (!pipe_ctx->top_pipe &&
2918 			!pipe_ctx->prev_odm_pipe &&
2919 			pipe_ctx->stream) {
2920 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
2921 
2922 			if (context->stream_status[i].plane_count == 0)
2923 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2924 		}
2925 	}
2926 
2927 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2928 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2929 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2930 
2931 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2932 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2933 			dc->hwss.optimize_bandwidth(dc, context);
2934 			break;
2935 		}
2936 
2937 	if (dc->hwseq->wa.DEGVIDCN10_254)
2938 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2939 }
2940 
2941 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2942 {
2943 	uint8_t i;
2944 
2945 	for (i = 0; i < context->stream_count; i++) {
2946 		if (context->streams[i]->timing.timing_3d_format
2947 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2948 			/*
2949 			 * Disable stutter
2950 			 */
2951 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2952 			break;
2953 		}
2954 	}
2955 }
2956 
2957 void dcn10_prepare_bandwidth(
2958 		struct dc *dc,
2959 		struct dc_state *context)
2960 {
2961 	struct dce_hwseq *hws = dc->hwseq;
2962 	struct hubbub *hubbub = dc->res_pool->hubbub;
2963 
2964 	if (dc->debug.sanity_checks)
2965 		hws->funcs.verify_allow_pstate_change_high(dc);
2966 
2967 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2968 		if (context->stream_count == 0)
2969 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2970 
2971 		dc->clk_mgr->funcs->update_clocks(
2972 				dc->clk_mgr,
2973 				context,
2974 				false);
2975 	}
2976 
2977 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2978 			&context->bw_ctx.bw.dcn.watermarks,
2979 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2980 			true);
2981 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2982 
2983 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2984 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2985 
2986 	if (dc->debug.sanity_checks)
2987 		hws->funcs.verify_allow_pstate_change_high(dc);
2988 }
2989 
2990 void dcn10_optimize_bandwidth(
2991 		struct dc *dc,
2992 		struct dc_state *context)
2993 {
2994 	struct dce_hwseq *hws = dc->hwseq;
2995 	struct hubbub *hubbub = dc->res_pool->hubbub;
2996 
2997 	if (dc->debug.sanity_checks)
2998 		hws->funcs.verify_allow_pstate_change_high(dc);
2999 
3000 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3001 		if (context->stream_count == 0)
3002 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3003 
3004 		dc->clk_mgr->funcs->update_clocks(
3005 				dc->clk_mgr,
3006 				context,
3007 				true);
3008 	}
3009 
3010 	hubbub->funcs->program_watermarks(hubbub,
3011 			&context->bw_ctx.bw.dcn.watermarks,
3012 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3013 			true);
3014 
3015 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3016 
3017 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3018 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3019 
3020 	if (dc->debug.sanity_checks)
3021 		hws->funcs.verify_allow_pstate_change_high(dc);
3022 }
3023 
3024 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3025 		int num_pipes, unsigned int vmin, unsigned int vmax,
3026 		unsigned int vmid, unsigned int vmid_frame_number)
3027 {
3028 	int i = 0;
3029 	struct drr_params params = {0};
3030 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3031 	unsigned int event_triggers = 0x800;
3032 	// Note DRR trigger events are generated regardless of whether num frames met.
3033 	unsigned int num_frames = 2;
3034 
3035 	params.vertical_total_max = vmax;
3036 	params.vertical_total_min = vmin;
3037 	params.vertical_total_mid = vmid;
3038 	params.vertical_total_mid_frame_num = vmid_frame_number;
3039 
3040 	/* TODO: If multiple pipes are to be supported, you need
3041 	 * some GSL stuff. Static screen triggers may be programmed differently
3042 	 * as well.
3043 	 */
3044 	for (i = 0; i < num_pipes; i++) {
3045 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3046 			pipe_ctx[i]->stream_res.tg, &params);
3047 		if (vmax != 0 && vmin != 0)
3048 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3049 					pipe_ctx[i]->stream_res.tg,
3050 					event_triggers, num_frames);
3051 	}
3052 }
3053 
3054 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3055 		int num_pipes,
3056 		struct crtc_position *position)
3057 {
3058 	int i = 0;
3059 
3060 	/* TODO: handle pipes > 1
3061 	 */
3062 	for (i = 0; i < num_pipes; i++)
3063 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3064 }
3065 
3066 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3067 		int num_pipes, const struct dc_static_screen_params *params)
3068 {
3069 	unsigned int i;
3070 	unsigned int triggers = 0;
3071 
3072 	if (params->triggers.surface_update)
3073 		triggers |= 0x80;
3074 	if (params->triggers.cursor_update)
3075 		triggers |= 0x2;
3076 	if (params->triggers.force_trigger)
3077 		triggers |= 0x1;
3078 
3079 	for (i = 0; i < num_pipes; i++)
3080 		pipe_ctx[i]->stream_res.tg->funcs->
3081 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3082 					triggers, params->num_frames);
3083 }
3084 
3085 static void dcn10_config_stereo_parameters(
3086 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3087 {
3088 	enum view_3d_format view_format = stream->view_format;
3089 	enum dc_timing_3d_format timing_3d_format =\
3090 			stream->timing.timing_3d_format;
3091 	bool non_stereo_timing = false;
3092 
3093 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3094 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3095 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3096 		non_stereo_timing = true;
3097 
3098 	if (non_stereo_timing == false &&
3099 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3100 
3101 		flags->PROGRAM_STEREO         = 1;
3102 		flags->PROGRAM_POLARITY       = 1;
3103 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3104 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3105 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3106 			enum display_dongle_type dongle = \
3107 					stream->link->ddc->dongle_type;
3108 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3109 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3110 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3111 				flags->DISABLE_STEREO_DP_SYNC = 1;
3112 		}
3113 		flags->RIGHT_EYE_POLARITY =\
3114 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3115 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3116 			flags->FRAME_PACKED = 1;
3117 	}
3118 
3119 	return;
3120 }
3121 
3122 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3123 {
3124 	struct crtc_stereo_flags flags = { 0 };
3125 	struct dc_stream_state *stream = pipe_ctx->stream;
3126 
3127 	dcn10_config_stereo_parameters(stream, &flags);
3128 
3129 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3130 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3131 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3132 	} else {
3133 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3134 	}
3135 
3136 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3137 		pipe_ctx->stream_res.opp,
3138 		flags.PROGRAM_STEREO == 1,
3139 		&stream->timing);
3140 
3141 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3142 		pipe_ctx->stream_res.tg,
3143 		&stream->timing,
3144 		&flags);
3145 
3146 	return;
3147 }
3148 
3149 static struct pipe_ctx *get_pipe_ctx_by_hubp_inst(struct dc_state *context, int mpcc_inst)
3150 {
3151 	int i;
3152 
3153 	for (i = 0; i < MAX_PIPES; i++) {
3154 		if (context->res_ctx.pipe_ctx[i].plane_res.hubp
3155 				&& context->res_ctx.pipe_ctx[i].plane_res.hubp->inst == mpcc_inst) {
3156 			return &context->res_ctx.pipe_ctx[i];
3157 		}
3158 
3159 	}
3160 	ASSERT(false);
3161 	return NULL;
3162 }
3163 
3164 void dcn10_wait_for_mpcc_disconnect(
3165 		struct dc *dc,
3166 		struct resource_pool *res_pool,
3167 		struct pipe_ctx *pipe_ctx)
3168 {
3169 	struct dce_hwseq *hws = dc->hwseq;
3170 	int mpcc_inst;
3171 
3172 	if (dc->debug.sanity_checks) {
3173 		hws->funcs.verify_allow_pstate_change_high(dc);
3174 	}
3175 
3176 	if (!pipe_ctx->stream_res.opp)
3177 		return;
3178 
3179 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3180 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3181 			struct pipe_ctx *restore_bottom_pipe;
3182 			struct pipe_ctx *restore_top_pipe;
3183 			struct pipe_ctx *inst_pipe_ctx = get_pipe_ctx_by_hubp_inst(dc->current_state, mpcc_inst);
3184 
3185 			ASSERT(inst_pipe_ctx);
3186 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3187 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3188 			/*
3189 			 * Set top and bottom pipes NULL, as we don't want
3190 			 * to blank those pipes when disconnecting from MPCC
3191 			 */
3192 			restore_bottom_pipe = inst_pipe_ctx->bottom_pipe;
3193 			restore_top_pipe = inst_pipe_ctx->top_pipe;
3194 			inst_pipe_ctx->top_pipe = inst_pipe_ctx->bottom_pipe = NULL;
3195 			dc->hwss.set_hubp_blank(dc, inst_pipe_ctx, true);
3196 			inst_pipe_ctx->top_pipe = restore_top_pipe;
3197 			inst_pipe_ctx->bottom_pipe = restore_bottom_pipe;
3198 		}
3199 	}
3200 
3201 	if (dc->debug.sanity_checks) {
3202 		hws->funcs.verify_allow_pstate_change_high(dc);
3203 	}
3204 
3205 }
3206 
3207 bool dcn10_dummy_display_power_gating(
3208 	struct dc *dc,
3209 	uint8_t controller_id,
3210 	struct dc_bios *dcb,
3211 	enum pipe_gating_control power_gating)
3212 {
3213 	return true;
3214 }
3215 
3216 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3217 {
3218 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3219 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3220 	bool flip_pending;
3221 	struct dc *dc = plane_state->ctx->dc;
3222 
3223 	if (plane_state == NULL)
3224 		return;
3225 
3226 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3227 					pipe_ctx->plane_res.hubp);
3228 
3229 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3230 
3231 	if (!flip_pending)
3232 		plane_state->status.current_address = plane_state->status.requested_address;
3233 
3234 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3235 			tg->funcs->is_stereo_left_eye) {
3236 		plane_state->status.is_right_eye =
3237 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3238 	}
3239 
3240 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3241 		struct dce_hwseq *hwseq = dc->hwseq;
3242 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3243 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3244 
3245 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3246 			struct hubbub *hubbub = dc->res_pool->hubbub;
3247 
3248 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3249 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3250 		}
3251 	}
3252 }
3253 
3254 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3255 {
3256 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3257 
3258 	/* In DCN, this programming sequence is owned by the hubbub */
3259 	hubbub->funcs->update_dchub(hubbub, dh_data);
3260 }
3261 
3262 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3263 {
3264 	struct pipe_ctx *test_pipe;
3265 	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3266 	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3267 
3268 	/**
3269 	 * Disable the cursor if there's another pipe above this with a
3270 	 * plane that contains this pipe's viewport to prevent double cursor
3271 	 * and incorrect scaling artifacts.
3272 	 */
3273 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3274 	     test_pipe = test_pipe->top_pipe) {
3275 		if (!test_pipe->plane_state->visible)
3276 			continue;
3277 
3278 		r2 = &test_pipe->plane_res.scl_data.recout;
3279 		r2_r = r2->x + r2->width;
3280 		r2_b = r2->y + r2->height;
3281 
3282 		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3283 			return true;
3284 	}
3285 
3286 	return false;
3287 }
3288 
3289 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3290 {
3291 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3292 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3293 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3294 	struct dc_cursor_mi_param param = {
3295 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3296 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3297 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3298 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3299 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3300 		.rotation = pipe_ctx->plane_state->rotation,
3301 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3302 	};
3303 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3304 		(pipe_ctx->bottom_pipe != NULL);
3305 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3306 		(pipe_ctx->prev_odm_pipe != NULL);
3307 
3308 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3309 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3310 	int x_pos = pos_cpy.x;
3311 	int y_pos = pos_cpy.y;
3312 
3313 	/**
3314 	 * DC cursor is stream space, HW cursor is plane space and drawn
3315 	 * as part of the framebuffer.
3316 	 *
3317 	 * Cursor position can't be negative, but hotspot can be used to
3318 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3319 	 * than the cursor size.
3320 	 */
3321 
3322 	/**
3323 	 * Translate cursor from stream space to plane space.
3324 	 *
3325 	 * If the cursor is scaled then we need to scale the position
3326 	 * to be in the approximately correct place. We can't do anything
3327 	 * about the actual size being incorrect, that's a limitation of
3328 	 * the hardware.
3329 	 */
3330 	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3331 			pipe_ctx->plane_state->dst_rect.width;
3332 	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3333 			pipe_ctx->plane_state->dst_rect.height;
3334 
3335 	/**
3336 	 * If the cursor's source viewport is clipped then we need to
3337 	 * translate the cursor to appear in the correct position on
3338 	 * the screen.
3339 	 *
3340 	 * This translation isn't affected by scaling so it needs to be
3341 	 * done *after* we adjust the position for the scale factor.
3342 	 *
3343 	 * This is only done by opt-in for now since there are still
3344 	 * some usecases like tiled display that might enable the
3345 	 * cursor on both streams while expecting dc to clip it.
3346 	 */
3347 	if (pos_cpy.translate_by_source) {
3348 		x_pos += pipe_ctx->plane_state->src_rect.x;
3349 		y_pos += pipe_ctx->plane_state->src_rect.y;
3350 	}
3351 
3352 	/**
3353 	 * If the position is negative then we need to add to the hotspot
3354 	 * to shift the cursor outside the plane.
3355 	 */
3356 
3357 	if (x_pos < 0) {
3358 		pos_cpy.x_hotspot -= x_pos;
3359 		x_pos = 0;
3360 	}
3361 
3362 	if (y_pos < 0) {
3363 		pos_cpy.y_hotspot -= y_pos;
3364 		y_pos = 0;
3365 	}
3366 
3367 	pos_cpy.x = (uint32_t)x_pos;
3368 	pos_cpy.y = (uint32_t)y_pos;
3369 
3370 	if (pipe_ctx->plane_state->address.type
3371 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3372 		pos_cpy.enable = false;
3373 
3374 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3375 		pos_cpy.enable = false;
3376 
3377 	// Swap axis and mirror horizontally
3378 	if (param.rotation == ROTATION_ANGLE_90) {
3379 		uint32_t temp_x = pos_cpy.x;
3380 
3381 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3382 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3383 		pos_cpy.y = temp_x;
3384 	}
3385 	// Swap axis and mirror vertically
3386 	else if (param.rotation == ROTATION_ANGLE_270) {
3387 		uint32_t temp_y = pos_cpy.y;
3388 		int viewport_height =
3389 			pipe_ctx->plane_res.scl_data.viewport.height;
3390 		int viewport_y =
3391 			pipe_ctx->plane_res.scl_data.viewport.y;
3392 
3393 		/**
3394 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3395 		 * For pipe split cases:
3396 		 * - apply offset of viewport.y to normalize pos_cpy.x
3397 		 * - calculate the pos_cpy.y as before
3398 		 * - shift pos_cpy.y back by same offset to get final value
3399 		 * - since we iterate through both pipes, use the lower
3400 		 *   viewport.y for offset
3401 		 * For non pipe split cases, use the same calculation for
3402 		 *  pos_cpy.y as the 180 degree rotation case below,
3403 		 *  but use pos_cpy.x as our input because we are rotating
3404 		 *  270 degrees
3405 		 */
3406 		if (pipe_split_on || odm_combine_on) {
3407 			int pos_cpy_x_offset;
3408 			int other_pipe_viewport_y;
3409 
3410 			if (pipe_split_on) {
3411 				if (pipe_ctx->bottom_pipe) {
3412 					other_pipe_viewport_y =
3413 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3414 				} else {
3415 					other_pipe_viewport_y =
3416 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3417 				}
3418 			} else {
3419 				if (pipe_ctx->next_odm_pipe) {
3420 					other_pipe_viewport_y =
3421 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3422 				} else {
3423 					other_pipe_viewport_y =
3424 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3425 				}
3426 			}
3427 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3428 				other_pipe_viewport_y : viewport_y;
3429 			pos_cpy.x -= pos_cpy_x_offset;
3430 			if (pos_cpy.x > viewport_height) {
3431 				pos_cpy.x = pos_cpy.x - viewport_height;
3432 				pos_cpy.y = viewport_height - pos_cpy.x;
3433 			} else {
3434 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3435 			}
3436 			pos_cpy.y += pos_cpy_x_offset;
3437 		} else {
3438 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3439 		}
3440 		pos_cpy.x = temp_y;
3441 	}
3442 	// Mirror horizontally and vertically
3443 	else if (param.rotation == ROTATION_ANGLE_180) {
3444 		int viewport_width =
3445 			pipe_ctx->plane_res.scl_data.viewport.width;
3446 		int viewport_x =
3447 			pipe_ctx->plane_res.scl_data.viewport.x;
3448 
3449 		if (pipe_split_on || odm_combine_on) {
3450 			if (pos_cpy.x >= viewport_width + viewport_x) {
3451 				pos_cpy.x = 2 * viewport_width
3452 						- pos_cpy.x + 2 * viewport_x;
3453 			} else {
3454 				uint32_t temp_x = pos_cpy.x;
3455 
3456 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3457 				if (temp_x >= viewport_x +
3458 					(int)hubp->curs_attr.width || pos_cpy.x
3459 					<= (int)hubp->curs_attr.width +
3460 					pipe_ctx->plane_state->src_rect.x) {
3461 					pos_cpy.x = temp_x + viewport_width;
3462 				}
3463 			}
3464 		} else {
3465 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3466 		}
3467 
3468 		/**
3469 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3470 		 * Calculation:
3471 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3472 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3473 		 * Simplify it as:
3474 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3475 		 */
3476 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3477 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3478 	}
3479 
3480 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3481 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3482 }
3483 
3484 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3485 {
3486 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3487 
3488 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3489 			pipe_ctx->plane_res.hubp, attributes);
3490 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3491 		pipe_ctx->plane_res.dpp, attributes);
3492 }
3493 
3494 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3495 {
3496 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3497 	struct fixed31_32 multiplier;
3498 	struct dpp_cursor_attributes opt_attr = { 0 };
3499 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3500 	struct custom_float_format fmt;
3501 
3502 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3503 		return;
3504 
3505 	fmt.exponenta_bits = 5;
3506 	fmt.mantissa_bits = 10;
3507 	fmt.sign = true;
3508 
3509 	if (sdr_white_level > 80) {
3510 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3511 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3512 	}
3513 
3514 	opt_attr.scale = hw_scale;
3515 	opt_attr.bias = 0;
3516 
3517 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3518 			pipe_ctx->plane_res.dpp, &opt_attr);
3519 }
3520 
3521 /*
3522  * apply_front_porch_workaround  TODO FPGA still need?
3523  *
3524  * This is a workaround for a bug that has existed since R5xx and has not been
3525  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3526  */
3527 static void apply_front_porch_workaround(
3528 	struct dc_crtc_timing *timing)
3529 {
3530 	if (timing->flags.INTERLACE == 1) {
3531 		if (timing->v_front_porch < 2)
3532 			timing->v_front_porch = 2;
3533 	} else {
3534 		if (timing->v_front_porch < 1)
3535 			timing->v_front_porch = 1;
3536 	}
3537 }
3538 
3539 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3540 {
3541 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3542 	struct dc_crtc_timing patched_crtc_timing;
3543 	int vesa_sync_start;
3544 	int asic_blank_end;
3545 	int interlace_factor;
3546 	int vertical_line_start;
3547 
3548 	patched_crtc_timing = *dc_crtc_timing;
3549 	apply_front_porch_workaround(&patched_crtc_timing);
3550 
3551 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3552 
3553 	vesa_sync_start = patched_crtc_timing.v_addressable +
3554 			patched_crtc_timing.v_border_bottom +
3555 			patched_crtc_timing.v_front_porch;
3556 
3557 	asic_blank_end = (patched_crtc_timing.v_total -
3558 			vesa_sync_start -
3559 			patched_crtc_timing.v_border_top)
3560 			* interlace_factor;
3561 
3562 	vertical_line_start = asic_blank_end -
3563 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3564 
3565 	return vertical_line_start;
3566 }
3567 
3568 void dcn10_calc_vupdate_position(
3569 		struct dc *dc,
3570 		struct pipe_ctx *pipe_ctx,
3571 		uint32_t *start_line,
3572 		uint32_t *end_line)
3573 {
3574 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3575 	int vline_int_offset_from_vupdate =
3576 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3577 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3578 	int start_position;
3579 
3580 	if (vline_int_offset_from_vupdate > 0)
3581 		vline_int_offset_from_vupdate--;
3582 	else if (vline_int_offset_from_vupdate < 0)
3583 		vline_int_offset_from_vupdate++;
3584 
3585 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3586 
3587 	if (start_position >= 0)
3588 		*start_line = start_position;
3589 	else
3590 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3591 
3592 	*end_line = *start_line + 2;
3593 
3594 	if (*end_line >= dc_crtc_timing->v_total)
3595 		*end_line = 2;
3596 }
3597 
3598 static void dcn10_cal_vline_position(
3599 		struct dc *dc,
3600 		struct pipe_ctx *pipe_ctx,
3601 		enum vline_select vline,
3602 		uint32_t *start_line,
3603 		uint32_t *end_line)
3604 {
3605 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3606 
3607 	if (vline == VLINE0)
3608 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3609 	else if (vline == VLINE1)
3610 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3611 
3612 	switch (ref_point) {
3613 	case START_V_UPDATE:
3614 		dcn10_calc_vupdate_position(
3615 				dc,
3616 				pipe_ctx,
3617 				start_line,
3618 				end_line);
3619 		break;
3620 	case START_V_SYNC:
3621 		// Suppose to do nothing because vsync is 0;
3622 		break;
3623 	default:
3624 		ASSERT(0);
3625 		break;
3626 	}
3627 }
3628 
3629 void dcn10_setup_periodic_interrupt(
3630 		struct dc *dc,
3631 		struct pipe_ctx *pipe_ctx,
3632 		enum vline_select vline)
3633 {
3634 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3635 
3636 	if (vline == VLINE0) {
3637 		uint32_t start_line = 0;
3638 		uint32_t end_line = 0;
3639 
3640 		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3641 
3642 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3643 
3644 	} else if (vline == VLINE1) {
3645 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3646 				tg,
3647 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3648 	}
3649 }
3650 
3651 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3652 {
3653 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3654 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3655 
3656 	if (start_line < 0) {
3657 		ASSERT(0);
3658 		start_line = 0;
3659 	}
3660 
3661 	if (tg->funcs->setup_vertical_interrupt2)
3662 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3663 }
3664 
3665 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3666 		struct dc_link_settings *link_settings)
3667 {
3668 	struct encoder_unblank_param params = { { 0 } };
3669 	struct dc_stream_state *stream = pipe_ctx->stream;
3670 	struct dc_link *link = stream->link;
3671 	struct dce_hwseq *hws = link->dc->hwseq;
3672 
3673 	/* only 3 items below are used by unblank */
3674 	params.timing = pipe_ctx->stream->timing;
3675 
3676 	params.link_settings.link_rate = link_settings->link_rate;
3677 
3678 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3679 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3680 			params.timing.pix_clk_100hz /= 2;
3681 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3682 	}
3683 
3684 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3685 		hws->funcs.edp_backlight_control(link, true);
3686 	}
3687 }
3688 
3689 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3690 				const uint8_t *custom_sdp_message,
3691 				unsigned int sdp_message_size)
3692 {
3693 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3694 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3695 				pipe_ctx->stream_res.stream_enc,
3696 				custom_sdp_message,
3697 				sdp_message_size);
3698 	}
3699 }
3700 enum dc_status dcn10_set_clock(struct dc *dc,
3701 			enum dc_clock_type clock_type,
3702 			uint32_t clk_khz,
3703 			uint32_t stepping)
3704 {
3705 	struct dc_state *context = dc->current_state;
3706 	struct dc_clock_config clock_cfg = {0};
3707 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3708 
3709 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3710 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3711 						context, clock_type, &clock_cfg);
3712 
3713 	if (!dc->clk_mgr->funcs->get_clock)
3714 		return DC_FAIL_UNSUPPORTED_1;
3715 
3716 	if (clk_khz > clock_cfg.max_clock_khz)
3717 		return DC_FAIL_CLK_EXCEED_MAX;
3718 
3719 	if (clk_khz < clock_cfg.min_clock_khz)
3720 		return DC_FAIL_CLK_BELOW_MIN;
3721 
3722 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3723 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3724 
3725 	/*update internal request clock for update clock use*/
3726 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3727 		current_clocks->dispclk_khz = clk_khz;
3728 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3729 		current_clocks->dppclk_khz = clk_khz;
3730 	else
3731 		return DC_ERROR_UNEXPECTED;
3732 
3733 	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3734 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3735 				context, true);
3736 	return DC_OK;
3737 
3738 }
3739 
3740 void dcn10_get_clock(struct dc *dc,
3741 			enum dc_clock_type clock_type,
3742 			struct dc_clock_config *clock_cfg)
3743 {
3744 	struct dc_state *context = dc->current_state;
3745 
3746 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3747 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3748 
3749 }
3750 
3751 void dcn10_set_hubp_blank(const struct dc *dc,
3752 				struct pipe_ctx *pipe_ctx,
3753 				bool blank_enable)
3754 {
3755 	pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, blank_enable);
3756 }
3757