1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_psr.h"
55 #include "dc_dmub_srv.h"
56 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dc_trace.h"
58 #include "dce/dmub_outbox.h"
59 #include "inc/dc_link_dp.h"
60 #include "inc/link_dpcd.h"
61 
62 #define DC_LOGGER_INIT(logger)
63 
64 #define CTX \
65 	hws->ctx
66 #define REG(reg)\
67 	hws->regs->reg
68 
69 #undef FN
70 #define FN(reg_name, field_name) \
71 	hws->shifts->field_name, hws->masks->field_name
72 
73 /*print is 17 wide, first two characters are spaces*/
74 #define DTN_INFO_MICRO_SEC(ref_cycle) \
75 	print_microsec(dc_ctx, log_ctx, ref_cycle)
76 
77 #define GAMMA_HW_POINTS_NUM 256
78 
79 #define PGFSM_POWER_ON 0
80 #define PGFSM_POWER_OFF 2
81 
82 static void print_microsec(struct dc_context *dc_ctx,
83 			   struct dc_log_buffer_ctx *log_ctx,
84 			   uint32_t ref_cycle)
85 {
86 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
87 	static const unsigned int frac = 1000;
88 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
89 
90 	DTN_INFO("  %11d.%03d",
91 			us_x10 / frac,
92 			us_x10 % frac);
93 }
94 
95 void dcn10_lock_all_pipes(struct dc *dc,
96 	struct dc_state *context,
97 	bool lock)
98 {
99 	struct pipe_ctx *pipe_ctx;
100 	struct timing_generator *tg;
101 	int i;
102 
103 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
104 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 		tg = pipe_ctx->stream_res.tg;
106 
107 		/*
108 		 * Only lock the top pipe's tg to prevent redundant
109 		 * (un)locking. Also skip if pipe is disabled.
110 		 */
111 		if (pipe_ctx->top_pipe ||
112 		    !pipe_ctx->stream ||
113 		    !pipe_ctx->plane_state ||
114 		    !tg->funcs->is_tg_enabled(tg))
115 			continue;
116 
117 		if (lock)
118 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
119 		else
120 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 	}
122 }
123 
124 static void log_mpc_crc(struct dc *dc,
125 	struct dc_log_buffer_ctx *log_ctx)
126 {
127 	struct dc_context *dc_ctx = dc->ctx;
128 	struct dce_hwseq *hws = dc->hwseq;
129 
130 	if (REG(MPC_CRC_RESULT_GB))
131 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
136 }
137 
138 static void dcn10_log_hubbub_state(struct dc *dc,
139 				   struct dc_log_buffer_ctx *log_ctx)
140 {
141 	struct dc_context *dc_ctx = dc->ctx;
142 	struct dcn_hubbub_wm wm;
143 	int i;
144 
145 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
147 
148 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
149 			"         sr_enter          sr_exit  dram_clk_change\n");
150 
151 	for (i = 0; i < 4; i++) {
152 		struct dcn_hubbub_wm_set *s;
153 
154 		s = &wm.sets[i];
155 		DTN_INFO("WM_Set[%d]:", s->wm_set);
156 		DTN_INFO_MICRO_SEC(s->data_urgent);
157 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 		DTN_INFO_MICRO_SEC(s->sr_enter);
159 		DTN_INFO_MICRO_SEC(s->sr_exit);
160 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
161 		DTN_INFO("\n");
162 	}
163 
164 	DTN_INFO("\n");
165 }
166 
167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
168 {
169 	struct dc_context *dc_ctx = dc->ctx;
170 	struct resource_pool *pool = dc->res_pool;
171 	int i;
172 
173 	DTN_INFO(
174 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
175 	for (i = 0; i < pool->pipe_count; i++) {
176 		struct hubp *hubp = pool->hubps[i];
177 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
178 
179 		hubp->funcs->hubp_read_state(hubp);
180 
181 		if (!s->blank_en) {
182 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
183 					hubp->inst,
184 					s->pixel_format,
185 					s->inuse_addr_hi,
186 					s->viewport_width,
187 					s->viewport_height,
188 					s->rotation_angle,
189 					s->h_mirror_en,
190 					s->sw_mode,
191 					s->dcc_en,
192 					s->blank_en,
193 					s->clock_en,
194 					s->ttu_disable,
195 					s->underflow_status);
196 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
199 			DTN_INFO("\n");
200 		}
201 	}
202 
203 	DTN_INFO("\n=========RQ========\n");
204 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
205 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
206 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
207 	for (i = 0; i < pool->pipe_count; i++) {
208 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
210 
211 		if (!s->blank_en)
212 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
213 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
222 	}
223 
224 	DTN_INFO("========DLG========\n");
225 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
226 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
227 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
228 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
229 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
230 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
231 			"  x_rp_dlay  x_rr_sfl\n");
232 	for (i = 0; i < pool->pipe_count; i++) {
233 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
235 
236 		if (!s->blank_en)
237 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
238 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
239 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
240 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 				dlg_regs->xfc_reg_remote_surface_flip_latency);
258 	}
259 
260 	DTN_INFO("========TTU========\n");
261 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
262 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
263 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
264 	for (i = 0; i < pool->pipe_count; i++) {
265 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
267 
268 		if (!s->blank_en)
269 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
270 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
277 	}
278 	DTN_INFO("\n");
279 }
280 
281 void dcn10_log_hw_state(struct dc *dc,
282 	struct dc_log_buffer_ctx *log_ctx)
283 {
284 	struct dc_context *dc_ctx = dc->ctx;
285 	struct resource_pool *pool = dc->res_pool;
286 	int i;
287 
288 	DTN_INFO_BEGIN();
289 
290 	dcn10_log_hubbub_state(dc, log_ctx);
291 
292 	dcn10_log_hubp_states(dc, log_ctx);
293 
294 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
295 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
296 			"C31 C32   C33 C34\n");
297 	for (i = 0; i < pool->pipe_count; i++) {
298 		struct dpp *dpp = pool->dpps[i];
299 		struct dcn_dpp_state s = {0};
300 
301 		dpp->funcs->dpp_read_state(dpp, &s);
302 
303 		if (!s.is_enabled)
304 			continue;
305 
306 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
307 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
308 				dpp->inst,
309 				s.igam_input_format,
310 				(s.igam_lut_mode == 0) ? "BypassFixed" :
311 					((s.igam_lut_mode == 1) ? "BypassFloat" :
312 					((s.igam_lut_mode == 2) ? "RAM" :
313 					((s.igam_lut_mode == 3) ? "RAM" :
314 								 "Unknown"))),
315 				(s.dgam_lut_mode == 0) ? "Bypass" :
316 					((s.dgam_lut_mode == 1) ? "sRGB" :
317 					((s.dgam_lut_mode == 2) ? "Ycc" :
318 					((s.dgam_lut_mode == 3) ? "RAM" :
319 					((s.dgam_lut_mode == 4) ? "RAM" :
320 								 "Unknown")))),
321 				(s.rgam_lut_mode == 0) ? "Bypass" :
322 					((s.rgam_lut_mode == 1) ? "sRGB" :
323 					((s.rgam_lut_mode == 2) ? "Ycc" :
324 					((s.rgam_lut_mode == 3) ? "RAM" :
325 					((s.rgam_lut_mode == 4) ? "RAM" :
326 								 "Unknown")))),
327 				s.gamut_remap_mode,
328 				s.gamut_remap_c11_c12,
329 				s.gamut_remap_c13_c14,
330 				s.gamut_remap_c21_c22,
331 				s.gamut_remap_c23_c24,
332 				s.gamut_remap_c31_c32,
333 				s.gamut_remap_c33_c34);
334 		DTN_INFO("\n");
335 	}
336 	DTN_INFO("\n");
337 
338 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
339 	for (i = 0; i < pool->pipe_count; i++) {
340 		struct mpcc_state s = {0};
341 
342 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
343 		if (s.opp_id != 0xf)
344 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
345 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
347 				s.idle);
348 	}
349 	DTN_INFO("\n");
350 
351 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
352 
353 	for (i = 0; i < pool->timing_generator_count; i++) {
354 		struct timing_generator *tg = pool->timing_generators[i];
355 		struct dcn_otg_state s = {0};
356 		/* Read shared OTG state registers for all DCNx */
357 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
358 
359 		/*
360 		 * For DCN2 and greater, a register on the OPP is used to
361 		 * determine if the CRTC is blanked instead of the OTG. So use
362 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
363 		 *
364 		 * TODO: Implement DCN-specific read_otg_state hooks.
365 		 */
366 		if (pool->opps[i]->funcs->dpg_is_blanked)
367 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
368 		else
369 			s.blank_enabled = tg->funcs->is_blanked(tg);
370 
371 		//only print if OTG master is enabled
372 		if ((s.otg_enabled & 1) == 0)
373 			continue;
374 
375 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
376 				tg->inst,
377 				s.v_blank_start,
378 				s.v_blank_end,
379 				s.v_sync_a_start,
380 				s.v_sync_a_end,
381 				s.v_sync_a_pol,
382 				s.v_total_max,
383 				s.v_total_min,
384 				s.v_total_max_sel,
385 				s.v_total_min_sel,
386 				s.h_blank_start,
387 				s.h_blank_end,
388 				s.h_sync_a_start,
389 				s.h_sync_a_end,
390 				s.h_sync_a_pol,
391 				s.h_total,
392 				s.v_total,
393 				s.underflow_occurred_status,
394 				s.blank_enabled);
395 
396 		// Clear underflow for debug purposes
397 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
399 		// it from here without affecting the original intent.
400 		tg->funcs->clear_optc_underflow(tg);
401 	}
402 	DTN_INFO("\n");
403 
404 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 	// TODO: Update golden log header to reflect this name change
406 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
407 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 		struct display_stream_compressor *dsc = pool->dscs[i];
409 		struct dcn_dsc_state s = {0};
410 
411 		dsc->funcs->dsc_read_state(dsc, &s);
412 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 		dsc->inst,
414 			s.dsc_clock_en,
415 			s.dsc_slice_width,
416 			s.dsc_bits_per_pixel);
417 		DTN_INFO("\n");
418 	}
419 	DTN_INFO("\n");
420 
421 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
422 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
423 	for (i = 0; i < pool->stream_enc_count; i++) {
424 		struct stream_encoder *enc = pool->stream_enc[i];
425 		struct enc_state s = {0};
426 
427 		if (enc->funcs->enc_read_state) {
428 			enc->funcs->enc_read_state(enc, &s);
429 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
430 				enc->id,
431 				s.dsc_mode,
432 				s.sec_gsp_pps_line_num,
433 				s.vbid6_line_reference,
434 				s.vbid6_line_num,
435 				s.sec_gsp_pps_enable,
436 				s.sec_stream_enable);
437 			DTN_INFO("\n");
438 		}
439 	}
440 	DTN_INFO("\n");
441 
442 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
443 	for (i = 0; i < dc->link_count; i++) {
444 		struct link_encoder *lenc = dc->links[i]->link_enc;
445 
446 		struct link_enc_state s = {0};
447 
448 		if (lenc && lenc->funcs->read_state) {
449 			lenc->funcs->read_state(lenc, &s);
450 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
451 				i,
452 				s.dphy_fec_en,
453 				s.dphy_fec_ready_shadow,
454 				s.dphy_fec_active_status,
455 				s.dp_link_training_complete);
456 			DTN_INFO("\n");
457 		}
458 	}
459 	DTN_INFO("\n");
460 
461 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
462 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
463 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
470 
471 	log_mpc_crc(dc, log_ctx);
472 
473 	{
474 		if (pool->hpo_dp_stream_enc_count > 0) {
475 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
476 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
479 
480 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
482 
483 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
484 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 							hpo_dp_se_state.stream_enc_enabled,
486 							hpo_dp_se_state.otg_inst,
487 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 							(hpo_dp_se_state.component_depth == 0) ? 6 :
491 									((hpo_dp_se_state.component_depth == 1) ? 8 :
492 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 							hpo_dp_se_state.vid_stream_enabled,
494 							hpo_dp_se_state.sdp_enabled,
495 							hpo_dp_se_state.compressed_format,
496 							hpo_dp_se_state.mapped_to_link_enc);
497 				}
498 			}
499 
500 			DTN_INFO("\n");
501 		}
502 
503 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 		if (pool->hpo_dp_link_enc_count) {
505 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
506 
507 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
510 
511 				if (hpo_dp_link_enc->funcs->read_state) {
512 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
514 							hpo_dp_link_enc->inst,
515 							hpo_dp_le_state.link_enc_enabled,
516 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 							hpo_dp_le_state.lane_count,
520 							hpo_dp_le_state.stream_src[0],
521 							hpo_dp_le_state.slot_count[0],
522 							hpo_dp_le_state.vc_rate_x[0],
523 							hpo_dp_le_state.vc_rate_y[0]);
524 					DTN_INFO("\n");
525 				}
526 			}
527 
528 			DTN_INFO("\n");
529 		}
530 	}
531 
532 	DTN_INFO_END();
533 }
534 
535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
536 {
537 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
539 
540 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 		tg->funcs->clear_optc_underflow(tg);
542 		return true;
543 	}
544 
545 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 		hubp->funcs->hubp_clear_underflow(hubp);
547 		return true;
548 	}
549 	return false;
550 }
551 
552 void dcn10_enable_power_gating_plane(
553 	struct dce_hwseq *hws,
554 	bool enable)
555 {
556 	bool force_on = true; /* disable power gating */
557 
558 	if (enable)
559 		force_on = false;
560 
561 	/* DCHUBP0/1/2/3 */
562 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
566 
567 	/* DPP0/1/2/3 */
568 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
572 }
573 
574 void dcn10_disable_vga(
575 	struct dce_hwseq *hws)
576 {
577 	unsigned int in_vga1_mode = 0;
578 	unsigned int in_vga2_mode = 0;
579 	unsigned int in_vga3_mode = 0;
580 	unsigned int in_vga4_mode = 0;
581 
582 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
586 
587 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 			in_vga3_mode == 0 && in_vga4_mode == 0)
589 		return;
590 
591 	REG_WRITE(D1VGA_CONTROL, 0);
592 	REG_WRITE(D2VGA_CONTROL, 0);
593 	REG_WRITE(D3VGA_CONTROL, 0);
594 	REG_WRITE(D4VGA_CONTROL, 0);
595 
596 	/* HW Engineer's Notes:
597 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
599 	 *
600 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
602 	 */
603 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 }
606 
607 /**
608  * dcn10_dpp_pg_control - DPP power gate control.
609  *
610  * @hws: dce_hwseq reference.
611  * @dpp_inst: DPP instance reference.
612  * @power_on: true if we want to enable power gate, false otherwise.
613  *
614  * Enable or disable power gate in the specific DPP instance.
615  */
616 void dcn10_dpp_pg_control(
617 		struct dce_hwseq *hws,
618 		unsigned int dpp_inst,
619 		bool power_on)
620 {
621 	uint32_t power_gate = power_on ? 0 : 1;
622 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
623 
624 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
625 		return;
626 	if (REG(DOMAIN1_PG_CONFIG) == 0)
627 		return;
628 
629 	switch (dpp_inst) {
630 	case 0: /* DPP0 */
631 		REG_UPDATE(DOMAIN1_PG_CONFIG,
632 				DOMAIN1_POWER_GATE, power_gate);
633 
634 		REG_WAIT(DOMAIN1_PG_STATUS,
635 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 				1, 1000);
637 		break;
638 	case 1: /* DPP1 */
639 		REG_UPDATE(DOMAIN3_PG_CONFIG,
640 				DOMAIN3_POWER_GATE, power_gate);
641 
642 		REG_WAIT(DOMAIN3_PG_STATUS,
643 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 				1, 1000);
645 		break;
646 	case 2: /* DPP2 */
647 		REG_UPDATE(DOMAIN5_PG_CONFIG,
648 				DOMAIN5_POWER_GATE, power_gate);
649 
650 		REG_WAIT(DOMAIN5_PG_STATUS,
651 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 				1, 1000);
653 		break;
654 	case 3: /* DPP3 */
655 		REG_UPDATE(DOMAIN7_PG_CONFIG,
656 				DOMAIN7_POWER_GATE, power_gate);
657 
658 		REG_WAIT(DOMAIN7_PG_STATUS,
659 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
660 				1, 1000);
661 		break;
662 	default:
663 		BREAK_TO_DEBUGGER();
664 		break;
665 	}
666 }
667 
668 /**
669  * dcn10_hubp_pg_control - HUBP power gate control.
670  *
671  * @hws: dce_hwseq reference.
672  * @hubp_inst: DPP instance reference.
673  * @power_on: true if we want to enable power gate, false otherwise.
674  *
675  * Enable or disable power gate in the specific HUBP instance.
676  */
677 void dcn10_hubp_pg_control(
678 		struct dce_hwseq *hws,
679 		unsigned int hubp_inst,
680 		bool power_on)
681 {
682 	uint32_t power_gate = power_on ? 0 : 1;
683 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684 
685 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
686 		return;
687 	if (REG(DOMAIN0_PG_CONFIG) == 0)
688 		return;
689 
690 	switch (hubp_inst) {
691 	case 0: /* DCHUBP0 */
692 		REG_UPDATE(DOMAIN0_PG_CONFIG,
693 				DOMAIN0_POWER_GATE, power_gate);
694 
695 		REG_WAIT(DOMAIN0_PG_STATUS,
696 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
697 				1, 1000);
698 		break;
699 	case 1: /* DCHUBP1 */
700 		REG_UPDATE(DOMAIN2_PG_CONFIG,
701 				DOMAIN2_POWER_GATE, power_gate);
702 
703 		REG_WAIT(DOMAIN2_PG_STATUS,
704 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
705 				1, 1000);
706 		break;
707 	case 2: /* DCHUBP2 */
708 		REG_UPDATE(DOMAIN4_PG_CONFIG,
709 				DOMAIN4_POWER_GATE, power_gate);
710 
711 		REG_WAIT(DOMAIN4_PG_STATUS,
712 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
713 				1, 1000);
714 		break;
715 	case 3: /* DCHUBP3 */
716 		REG_UPDATE(DOMAIN6_PG_CONFIG,
717 				DOMAIN6_POWER_GATE, power_gate);
718 
719 		REG_WAIT(DOMAIN6_PG_STATUS,
720 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
721 				1, 1000);
722 		break;
723 	default:
724 		BREAK_TO_DEBUGGER();
725 		break;
726 	}
727 }
728 
729 static void power_on_plane(
730 	struct dce_hwseq *hws,
731 	int plane_id)
732 {
733 	DC_LOGGER_INIT(hws->ctx->logger);
734 	if (REG(DC_IP_REQUEST_CNTL)) {
735 		REG_SET(DC_IP_REQUEST_CNTL, 0,
736 				IP_REQUEST_EN, 1);
737 
738 		if (hws->funcs.dpp_pg_control)
739 			hws->funcs.dpp_pg_control(hws, plane_id, true);
740 
741 		if (hws->funcs.hubp_pg_control)
742 			hws->funcs.hubp_pg_control(hws, plane_id, true);
743 
744 		REG_SET(DC_IP_REQUEST_CNTL, 0,
745 				IP_REQUEST_EN, 0);
746 		DC_LOG_DEBUG(
747 				"Un-gated front end for pipe %d\n", plane_id);
748 	}
749 }
750 
751 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
752 {
753 	struct dce_hwseq *hws = dc->hwseq;
754 	struct hubp *hubp = dc->res_pool->hubps[0];
755 
756 	if (!hws->wa_state.DEGVIDCN10_253_applied)
757 		return;
758 
759 	hubp->funcs->set_blank(hubp, true);
760 
761 	REG_SET(DC_IP_REQUEST_CNTL, 0,
762 			IP_REQUEST_EN, 1);
763 
764 	hws->funcs.hubp_pg_control(hws, 0, false);
765 	REG_SET(DC_IP_REQUEST_CNTL, 0,
766 			IP_REQUEST_EN, 0);
767 
768 	hws->wa_state.DEGVIDCN10_253_applied = false;
769 }
770 
771 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
772 {
773 	struct dce_hwseq *hws = dc->hwseq;
774 	struct hubp *hubp = dc->res_pool->hubps[0];
775 	int i;
776 
777 	if (dc->debug.disable_stutter)
778 		return;
779 
780 	if (!hws->wa.DEGVIDCN10_253)
781 		return;
782 
783 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
784 		if (!dc->res_pool->hubps[i]->power_gated)
785 			return;
786 	}
787 
788 	/* all pipe power gated, apply work around to enable stutter. */
789 
790 	REG_SET(DC_IP_REQUEST_CNTL, 0,
791 			IP_REQUEST_EN, 1);
792 
793 	hws->funcs.hubp_pg_control(hws, 0, true);
794 	REG_SET(DC_IP_REQUEST_CNTL, 0,
795 			IP_REQUEST_EN, 0);
796 
797 	hubp->funcs->set_hubp_blank_en(hubp, false);
798 	hws->wa_state.DEGVIDCN10_253_applied = true;
799 }
800 
801 void dcn10_bios_golden_init(struct dc *dc)
802 {
803 	struct dce_hwseq *hws = dc->hwseq;
804 	struct dc_bios *bp = dc->ctx->dc_bios;
805 	int i;
806 	bool allow_self_fresh_force_enable = true;
807 
808 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
809 		return;
810 
811 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
812 		allow_self_fresh_force_enable =
813 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
814 
815 
816 	/* WA for making DF sleep when idle after resume from S0i3.
817 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
818 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
819 	 * before calling command table and it changed to 1 after,
820 	 * it should be set back to 0.
821 	 */
822 
823 	/* initialize dcn global */
824 	bp->funcs->enable_disp_power_gating(bp,
825 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
826 
827 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
828 		/* initialize dcn per pipe */
829 		bp->funcs->enable_disp_power_gating(bp,
830 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
831 	}
832 
833 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
834 		if (allow_self_fresh_force_enable == false &&
835 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
836 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
837 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
838 
839 }
840 
841 static void false_optc_underflow_wa(
842 		struct dc *dc,
843 		const struct dc_stream_state *stream,
844 		struct timing_generator *tg)
845 {
846 	int i;
847 	bool underflow;
848 
849 	if (!dc->hwseq->wa.false_optc_underflow)
850 		return;
851 
852 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
853 
854 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
855 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
856 
857 		if (old_pipe_ctx->stream != stream)
858 			continue;
859 
860 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
861 	}
862 
863 	if (tg->funcs->set_blank_data_double_buffer)
864 		tg->funcs->set_blank_data_double_buffer(tg, true);
865 
866 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
867 		tg->funcs->clear_optc_underflow(tg);
868 }
869 
870 enum dc_status dcn10_enable_stream_timing(
871 		struct pipe_ctx *pipe_ctx,
872 		struct dc_state *context,
873 		struct dc *dc)
874 {
875 	struct dc_stream_state *stream = pipe_ctx->stream;
876 	enum dc_color_space color_space;
877 	struct tg_color black_color = {0};
878 
879 	/* by upper caller loop, pipe0 is parent pipe and be called first.
880 	 * back end is set up by for pipe0. Other children pipe share back end
881 	 * with pipe 0. No program is needed.
882 	 */
883 	if (pipe_ctx->top_pipe != NULL)
884 		return DC_OK;
885 
886 	/* TODO check if timing_changed, disable stream if timing changed */
887 
888 	/* HW program guide assume display already disable
889 	 * by unplug sequence. OTG assume stop.
890 	 */
891 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
892 
893 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
894 			pipe_ctx->clock_source,
895 			&pipe_ctx->stream_res.pix_clk_params,
896 			dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
897 			&pipe_ctx->pll_settings)) {
898 		BREAK_TO_DEBUGGER();
899 		return DC_ERROR_UNEXPECTED;
900 	}
901 
902 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
903 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
904 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
905 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
906 		else
907 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
908 	}
909 
910 	pipe_ctx->stream_res.tg->funcs->program_timing(
911 			pipe_ctx->stream_res.tg,
912 			&stream->timing,
913 			pipe_ctx->pipe_dlg_param.vready_offset,
914 			pipe_ctx->pipe_dlg_param.vstartup_start,
915 			pipe_ctx->pipe_dlg_param.vupdate_offset,
916 			pipe_ctx->pipe_dlg_param.vupdate_width,
917 			pipe_ctx->stream->signal,
918 			true);
919 
920 #if 0 /* move to after enable_crtc */
921 	/* TODO: OPP FMT, ABM. etc. should be done here. */
922 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
923 
924 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
925 
926 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
927 				pipe_ctx->stream_res.opp,
928 				&stream->bit_depth_params,
929 				&stream->clamping);
930 #endif
931 	/* program otg blank color */
932 	color_space = stream->output_color_space;
933 	color_space_to_black_color(dc, color_space, &black_color);
934 
935 	/*
936 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
937 	 * alternate between Cb and Cr, so both channels need the pixel
938 	 * value for Y
939 	 */
940 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
941 		black_color.color_r_cr = black_color.color_g_y;
942 
943 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
944 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
945 				pipe_ctx->stream_res.tg,
946 				&black_color);
947 
948 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
949 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
950 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
951 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
952 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
953 	}
954 
955 	/* VTG is  within DCHUB command block. DCFCLK is always on */
956 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
957 		BREAK_TO_DEBUGGER();
958 		return DC_ERROR_UNEXPECTED;
959 	}
960 
961 	/* TODO program crtc source select for non-virtual signal*/
962 	/* TODO program FMT */
963 	/* TODO setup link_enc */
964 	/* TODO set stream attributes */
965 	/* TODO program audio */
966 	/* TODO enable stream if timing changed */
967 	/* TODO unblank stream if DP */
968 
969 	return DC_OK;
970 }
971 
972 static void dcn10_reset_back_end_for_pipe(
973 		struct dc *dc,
974 		struct pipe_ctx *pipe_ctx,
975 		struct dc_state *context)
976 {
977 	int i;
978 	struct dc_link *link;
979 	DC_LOGGER_INIT(dc->ctx->logger);
980 	if (pipe_ctx->stream_res.stream_enc == NULL) {
981 		pipe_ctx->stream = NULL;
982 		return;
983 	}
984 
985 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
986 		link = pipe_ctx->stream->link;
987 		/* DPMS may already disable or */
988 		/* dpms_off status is incorrect due to fastboot
989 		 * feature. When system resume from S4 with second
990 		 * screen only, the dpms_off would be true but
991 		 * VBIOS lit up eDP, so check link status too.
992 		 */
993 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
994 			core_link_disable_stream(pipe_ctx);
995 		else if (pipe_ctx->stream_res.audio)
996 			dc->hwss.disable_audio_stream(pipe_ctx);
997 
998 		if (pipe_ctx->stream_res.audio) {
999 			/*disable az_endpoint*/
1000 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1001 
1002 			/*free audio*/
1003 			if (dc->caps.dynamic_audio == true) {
1004 				/*we have to dynamic arbitrate the audio endpoints*/
1005 				/*we free the resource, need reset is_audio_acquired*/
1006 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1007 						pipe_ctx->stream_res.audio, false);
1008 				pipe_ctx->stream_res.audio = NULL;
1009 			}
1010 		}
1011 	}
1012 
1013 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1014 	 * back end share by all pipes and will be disable only when disable
1015 	 * parent pipe.
1016 	 */
1017 	if (pipe_ctx->top_pipe == NULL) {
1018 
1019 		if (pipe_ctx->stream_res.abm)
1020 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1021 
1022 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1023 
1024 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1025 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1026 			pipe_ctx->stream_res.tg->funcs->set_drr(
1027 					pipe_ctx->stream_res.tg, NULL);
1028 		pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1029 	}
1030 
1031 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1032 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1033 			break;
1034 
1035 	if (i == dc->res_pool->pipe_count)
1036 		return;
1037 
1038 	pipe_ctx->stream = NULL;
1039 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1040 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1041 }
1042 
1043 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1044 {
1045 	struct hubp *hubp ;
1046 	unsigned int i;
1047 	bool need_recover = true;
1048 
1049 	if (!dc->debug.recovery_enabled)
1050 		return false;
1051 
1052 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1053 		struct pipe_ctx *pipe_ctx =
1054 			&dc->current_state->res_ctx.pipe_ctx[i];
1055 		if (pipe_ctx != NULL) {
1056 			hubp = pipe_ctx->plane_res.hubp;
1057 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1058 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1059 					/* one pipe underflow, we will reset all the pipes*/
1060 					need_recover = true;
1061 				}
1062 			}
1063 		}
1064 	}
1065 	if (!need_recover)
1066 		return false;
1067 	/*
1068 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1069 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1070 	DCHUBP_CNTL:HUBP_DISABLE=1
1071 	DCHUBP_CNTL:HUBP_DISABLE=0
1072 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1073 	DCSURF_PRIMARY_SURFACE_ADDRESS
1074 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1075 	*/
1076 
1077 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1078 		struct pipe_ctx *pipe_ctx =
1079 			&dc->current_state->res_ctx.pipe_ctx[i];
1080 		if (pipe_ctx != NULL) {
1081 			hubp = pipe_ctx->plane_res.hubp;
1082 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1083 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1084 				hubp->funcs->set_hubp_blank_en(hubp, true);
1085 		}
1086 	}
1087 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1088 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1089 
1090 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1091 		struct pipe_ctx *pipe_ctx =
1092 			&dc->current_state->res_ctx.pipe_ctx[i];
1093 		if (pipe_ctx != NULL) {
1094 			hubp = pipe_ctx->plane_res.hubp;
1095 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1096 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1097 				hubp->funcs->hubp_disable_control(hubp, true);
1098 		}
1099 	}
1100 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1101 		struct pipe_ctx *pipe_ctx =
1102 			&dc->current_state->res_ctx.pipe_ctx[i];
1103 		if (pipe_ctx != NULL) {
1104 			hubp = pipe_ctx->plane_res.hubp;
1105 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1106 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1107 				hubp->funcs->hubp_disable_control(hubp, true);
1108 		}
1109 	}
1110 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1111 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1112 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1113 		struct pipe_ctx *pipe_ctx =
1114 			&dc->current_state->res_ctx.pipe_ctx[i];
1115 		if (pipe_ctx != NULL) {
1116 			hubp = pipe_ctx->plane_res.hubp;
1117 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1118 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1119 				hubp->funcs->set_hubp_blank_en(hubp, true);
1120 		}
1121 	}
1122 	return true;
1123 
1124 }
1125 
1126 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1127 {
1128 	struct hubbub *hubbub = dc->res_pool->hubbub;
1129 	static bool should_log_hw_state; /* prevent hw state log by default */
1130 
1131 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1132 		return;
1133 
1134 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1135 		int i = 0;
1136 
1137 		if (should_log_hw_state)
1138 			dcn10_log_hw_state(dc, NULL);
1139 
1140 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1141 		BREAK_TO_DEBUGGER();
1142 		if (dcn10_hw_wa_force_recovery(dc)) {
1143 			/*check again*/
1144 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1145 				BREAK_TO_DEBUGGER();
1146 		}
1147 	}
1148 }
1149 
1150 /* trigger HW to start disconnect plane from stream on the next vsync */
1151 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1152 {
1153 	struct dce_hwseq *hws = dc->hwseq;
1154 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1155 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1156 	struct mpc *mpc = dc->res_pool->mpc;
1157 	struct mpc_tree *mpc_tree_params;
1158 	struct mpcc *mpcc_to_remove = NULL;
1159 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1160 
1161 	mpc_tree_params = &(opp->mpc_tree_params);
1162 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1163 
1164 	/*Already reset*/
1165 	if (mpcc_to_remove == NULL)
1166 		return;
1167 
1168 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1169 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1170 	// so don't wait for MPCC_IDLE in the programming sequence
1171 	if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1172 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1173 
1174 	dc->optimized_required = true;
1175 
1176 	if (hubp->funcs->hubp_disconnect)
1177 		hubp->funcs->hubp_disconnect(hubp);
1178 
1179 	if (dc->debug.sanity_checks)
1180 		hws->funcs.verify_allow_pstate_change_high(dc);
1181 }
1182 
1183 /**
1184  * dcn10_plane_atomic_power_down - Power down plane components.
1185  *
1186  * @dc: dc struct reference. used for grab hwseq.
1187  * @dpp: dpp struct reference.
1188  * @hubp: hubp struct reference.
1189  *
1190  * Keep in mind that this operation requires a power gate configuration;
1191  * however, requests for switch power gate are precisely controlled to avoid
1192  * problems. For this reason, power gate request is usually disabled. This
1193  * function first needs to enable the power gate request before disabling DPP
1194  * and HUBP. Finally, it disables the power gate request again.
1195  */
1196 void dcn10_plane_atomic_power_down(struct dc *dc,
1197 		struct dpp *dpp,
1198 		struct hubp *hubp)
1199 {
1200 	struct dce_hwseq *hws = dc->hwseq;
1201 	DC_LOGGER_INIT(dc->ctx->logger);
1202 
1203 	if (REG(DC_IP_REQUEST_CNTL)) {
1204 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1205 				IP_REQUEST_EN, 1);
1206 
1207 		if (hws->funcs.dpp_pg_control)
1208 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1209 
1210 		if (hws->funcs.hubp_pg_control)
1211 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1212 
1213 		dpp->funcs->dpp_reset(dpp);
1214 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1215 				IP_REQUEST_EN, 0);
1216 		DC_LOG_DEBUG(
1217 				"Power gated front end %d\n", hubp->inst);
1218 	}
1219 }
1220 
1221 /* disable HW used by plane.
1222  * note:  cannot disable until disconnect is complete
1223  */
1224 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1225 {
1226 	struct dce_hwseq *hws = dc->hwseq;
1227 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1228 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1229 	int opp_id = hubp->opp_id;
1230 
1231 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1232 
1233 	hubp->funcs->hubp_clk_cntl(hubp, false);
1234 
1235 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1236 
1237 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1238 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1239 				pipe_ctx->stream_res.opp,
1240 				false);
1241 
1242 	hubp->power_gated = true;
1243 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1244 
1245 	hws->funcs.plane_atomic_power_down(dc,
1246 			pipe_ctx->plane_res.dpp,
1247 			pipe_ctx->plane_res.hubp);
1248 
1249 	pipe_ctx->stream = NULL;
1250 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1251 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1252 	pipe_ctx->top_pipe = NULL;
1253 	pipe_ctx->bottom_pipe = NULL;
1254 	pipe_ctx->plane_state = NULL;
1255 }
1256 
1257 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1258 {
1259 	struct dce_hwseq *hws = dc->hwseq;
1260 	DC_LOGGER_INIT(dc->ctx->logger);
1261 
1262 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1263 		return;
1264 
1265 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1266 
1267 	apply_DEGVIDCN10_253_wa(dc);
1268 
1269 	DC_LOG_DC("Power down front end %d\n",
1270 					pipe_ctx->pipe_idx);
1271 }
1272 
1273 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1274 {
1275 	int i;
1276 	struct dce_hwseq *hws = dc->hwseq;
1277 	struct hubbub *hubbub = dc->res_pool->hubbub;
1278 	bool can_apply_seamless_boot = false;
1279 
1280 	for (i = 0; i < context->stream_count; i++) {
1281 		if (context->streams[i]->apply_seamless_boot_optimization) {
1282 			can_apply_seamless_boot = true;
1283 			break;
1284 		}
1285 	}
1286 
1287 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1288 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1289 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1290 
1291 		/* There is assumption that pipe_ctx is not mapping irregularly
1292 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1293 		 * we will use the pipe, so don't disable
1294 		 */
1295 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1296 			continue;
1297 
1298 		/* Blank controller using driver code instead of
1299 		 * command table.
1300 		 */
1301 		if (tg->funcs->is_tg_enabled(tg)) {
1302 			if (hws->funcs.init_blank != NULL) {
1303 				hws->funcs.init_blank(dc, tg);
1304 				tg->funcs->lock(tg);
1305 			} else {
1306 				tg->funcs->lock(tg);
1307 				tg->funcs->set_blank(tg, true);
1308 				hwss_wait_for_blank_complete(tg);
1309 			}
1310 		}
1311 	}
1312 
1313 	/* Reset det size */
1314 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1315 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1316 		struct hubp *hubp = dc->res_pool->hubps[i];
1317 
1318 		/* Do not need to reset for seamless boot */
1319 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1320 			continue;
1321 
1322 		if (hubbub && hubp) {
1323 			if (hubbub->funcs->program_det_size)
1324 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1325 		}
1326 	}
1327 
1328 	/* num_opp will be equal to number of mpcc */
1329 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1330 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1331 
1332 		/* Cannot reset the MPC mux if seamless boot */
1333 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1334 			continue;
1335 
1336 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1337 				dc->res_pool->mpc, i);
1338 	}
1339 
1340 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1341 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1342 		struct hubp *hubp = dc->res_pool->hubps[i];
1343 		struct dpp *dpp = dc->res_pool->dpps[i];
1344 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1345 
1346 		/* There is assumption that pipe_ctx is not mapping irregularly
1347 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1348 		 * we will use the pipe, so don't disable
1349 		 */
1350 		if (can_apply_seamless_boot &&
1351 			pipe_ctx->stream != NULL &&
1352 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1353 				pipe_ctx->stream_res.tg)) {
1354 			// Enable double buffering for OTG_BLANK no matter if
1355 			// seamless boot is enabled or not to suppress global sync
1356 			// signals when OTG blanked. This is to prevent pipe from
1357 			// requesting data while in PSR.
1358 			tg->funcs->tg_init(tg);
1359 			hubp->power_gated = true;
1360 			continue;
1361 		}
1362 
1363 		/* Disable on the current state so the new one isn't cleared. */
1364 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1365 
1366 		dpp->funcs->dpp_reset(dpp);
1367 
1368 		pipe_ctx->stream_res.tg = tg;
1369 		pipe_ctx->pipe_idx = i;
1370 
1371 		pipe_ctx->plane_res.hubp = hubp;
1372 		pipe_ctx->plane_res.dpp = dpp;
1373 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1374 		hubp->mpcc_id = dpp->inst;
1375 		hubp->opp_id = OPP_ID_INVALID;
1376 		hubp->power_gated = false;
1377 
1378 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1379 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1380 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1381 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1382 
1383 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1384 
1385 		if (tg->funcs->is_tg_enabled(tg))
1386 			tg->funcs->unlock(tg);
1387 
1388 		dc->hwss.disable_plane(dc, pipe_ctx);
1389 
1390 		pipe_ctx->stream_res.tg = NULL;
1391 		pipe_ctx->plane_res.hubp = NULL;
1392 
1393 		if (tg->funcs->is_tg_enabled(tg)) {
1394 			if (tg->funcs->init_odm)
1395 				tg->funcs->init_odm(tg);
1396 		}
1397 
1398 		tg->funcs->tg_init(tg);
1399 	}
1400 
1401 	/* Power gate DSCs */
1402 	if (hws->funcs.dsc_pg_control != NULL) {
1403 		uint32_t num_opps = 0;
1404 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1405 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1406 
1407 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1408 		// We can't use res_pool->res_cap->num_timing_generator to check
1409 		// Because it records display pipes default setting built in driver,
1410 		// not display pipes of the current chip.
1411 		// Some ASICs would be fused display pipes less than the default setting.
1412 		// In dcnxx_resource_construct function, driver would obatin real information.
1413 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1414 			uint32_t optc_dsc_state = 0;
1415 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1416 
1417 			if (tg->funcs->is_tg_enabled(tg)) {
1418 				if (tg->funcs->get_dsc_status)
1419 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1420 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1421 				// non-zero value is DSC enabled
1422 				if (optc_dsc_state != 0) {
1423 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1424 					break;
1425 				}
1426 			}
1427 		}
1428 
1429 		// Step 2: To power down DSC but skip DSC  of running OPTC
1430 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1431 			struct dcn_dsc_state s  = {0};
1432 
1433 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1434 
1435 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1436 				s.dsc_clock_en && s.dsc_fw_en)
1437 				continue;
1438 
1439 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1440 		}
1441 	}
1442 }
1443 
1444 void dcn10_init_hw(struct dc *dc)
1445 {
1446 	int i;
1447 	struct abm *abm = dc->res_pool->abm;
1448 	struct dmcu *dmcu = dc->res_pool->dmcu;
1449 	struct dce_hwseq *hws = dc->hwseq;
1450 	struct dc_bios *dcb = dc->ctx->dc_bios;
1451 	struct resource_pool *res_pool = dc->res_pool;
1452 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1453 	bool   is_optimized_init_done = false;
1454 
1455 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1456 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1457 
1458 	/* Align bw context with hw config when system resume. */
1459 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1460 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1461 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1462 	}
1463 
1464 	// Initialize the dccg
1465 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1466 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1467 
1468 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1469 
1470 		REG_WRITE(REFCLK_CNTL, 0);
1471 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1472 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1473 
1474 		if (!dc->debug.disable_clock_gate) {
1475 			/* enable all DCN clock gating */
1476 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1477 
1478 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1479 
1480 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1481 		}
1482 
1483 		//Enable ability to power gate / don't force power on permanently
1484 		if (hws->funcs.enable_power_gating_plane)
1485 			hws->funcs.enable_power_gating_plane(hws, true);
1486 
1487 		return;
1488 	}
1489 
1490 	if (!dcb->funcs->is_accelerated_mode(dcb))
1491 		hws->funcs.disable_vga(dc->hwseq);
1492 
1493 	hws->funcs.bios_golden_init(dc);
1494 
1495 	if (dc->ctx->dc_bios->fw_info_valid) {
1496 		res_pool->ref_clocks.xtalin_clock_inKhz =
1497 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1498 
1499 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1500 			if (res_pool->dccg && res_pool->hubbub) {
1501 
1502 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1503 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1504 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1505 
1506 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1507 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1508 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1509 			} else {
1510 				// Not all ASICs have DCCG sw component
1511 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1512 						res_pool->ref_clocks.xtalin_clock_inKhz;
1513 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1514 						res_pool->ref_clocks.xtalin_clock_inKhz;
1515 			}
1516 		}
1517 	} else
1518 		ASSERT_CRITICAL(false);
1519 
1520 	for (i = 0; i < dc->link_count; i++) {
1521 		/* Power up AND update implementation according to the
1522 		 * required signal (which may be different from the
1523 		 * default signal on connector).
1524 		 */
1525 		struct dc_link *link = dc->links[i];
1526 
1527 		if (!is_optimized_init_done)
1528 			link->link_enc->funcs->hw_init(link->link_enc);
1529 
1530 		/* Check for enabled DIG to identify enabled display */
1531 		if (link->link_enc->funcs->is_dig_enabled &&
1532 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1533 			link->link_status.link_active = true;
1534 			if (link->link_enc->funcs->fec_is_active &&
1535 					link->link_enc->funcs->fec_is_active(link->link_enc))
1536 				link->fec_state = dc_link_fec_enabled;
1537 		}
1538 	}
1539 
1540 	/* we want to turn off all dp displays before doing detection */
1541 	dc_link_blank_all_dp_displays(dc);
1542 
1543 	if (hws->funcs.enable_power_gating_plane)
1544 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1545 
1546 	/* If taking control over from VBIOS, we may want to optimize our first
1547 	 * mode set, so we need to skip powering down pipes until we know which
1548 	 * pipes we want to use.
1549 	 * Otherwise, if taking control is not possible, we need to power
1550 	 * everything down.
1551 	 */
1552 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1553 		if (!is_optimized_init_done) {
1554 			hws->funcs.init_pipes(dc, dc->current_state);
1555 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1556 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1557 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1558 		}
1559 	}
1560 
1561 	if (!is_optimized_init_done) {
1562 
1563 		for (i = 0; i < res_pool->audio_count; i++) {
1564 			struct audio *audio = res_pool->audios[i];
1565 
1566 			audio->funcs->hw_init(audio);
1567 		}
1568 
1569 		for (i = 0; i < dc->link_count; i++) {
1570 			struct dc_link *link = dc->links[i];
1571 
1572 			if (link->panel_cntl)
1573 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1574 		}
1575 
1576 		if (abm != NULL)
1577 			abm->funcs->abm_init(abm, backlight);
1578 
1579 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1580 			dmcu->funcs->dmcu_init(dmcu);
1581 	}
1582 
1583 	if (abm != NULL && dmcu != NULL)
1584 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1585 
1586 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1587 	if (!is_optimized_init_done)
1588 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1589 
1590 	if (!dc->debug.disable_clock_gate) {
1591 		/* enable all DCN clock gating */
1592 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1593 
1594 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1595 
1596 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1597 	}
1598 
1599 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1600 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1601 }
1602 
1603 /* In headless boot cases, DIG may be turned
1604  * on which causes HW/SW discrepancies.
1605  * To avoid this, power down hardware on boot
1606  * if DIG is turned on
1607  */
1608 void dcn10_power_down_on_boot(struct dc *dc)
1609 {
1610 	struct dc_link *edp_links[MAX_NUM_EDP];
1611 	struct dc_link *edp_link = NULL;
1612 	int edp_num;
1613 	int i = 0;
1614 
1615 	get_edp_links(dc, edp_links, &edp_num);
1616 	if (edp_num)
1617 		edp_link = edp_links[0];
1618 
1619 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1620 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1621 			dc->hwseq->funcs.edp_backlight_control &&
1622 			dc->hwss.power_down &&
1623 			dc->hwss.edp_power_control) {
1624 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1625 		dc->hwss.power_down(dc);
1626 		dc->hwss.edp_power_control(edp_link, false);
1627 	} else {
1628 		for (i = 0; i < dc->link_count; i++) {
1629 			struct dc_link *link = dc->links[i];
1630 
1631 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1632 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1633 					dc->hwss.power_down) {
1634 				dc->hwss.power_down(dc);
1635 				break;
1636 			}
1637 
1638 		}
1639 	}
1640 
1641 	/*
1642 	 * Call update_clocks with empty context
1643 	 * to send DISPLAY_OFF
1644 	 * Otherwise DISPLAY_OFF may not be asserted
1645 	 */
1646 	if (dc->clk_mgr->funcs->set_low_power_state)
1647 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1648 }
1649 
1650 void dcn10_reset_hw_ctx_wrap(
1651 		struct dc *dc,
1652 		struct dc_state *context)
1653 {
1654 	int i;
1655 	struct dce_hwseq *hws = dc->hwseq;
1656 
1657 	/* Reset Back End*/
1658 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1659 		struct pipe_ctx *pipe_ctx_old =
1660 			&dc->current_state->res_ctx.pipe_ctx[i];
1661 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1662 
1663 		if (!pipe_ctx_old->stream)
1664 			continue;
1665 
1666 		if (pipe_ctx_old->top_pipe)
1667 			continue;
1668 
1669 		if (!pipe_ctx->stream ||
1670 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1671 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1672 
1673 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1674 			if (hws->funcs.enable_stream_gating)
1675 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1676 			if (old_clk)
1677 				old_clk->funcs->cs_power_down(old_clk);
1678 		}
1679 	}
1680 }
1681 
1682 static bool patch_address_for_sbs_tb_stereo(
1683 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1684 {
1685 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1686 	bool sec_split = pipe_ctx->top_pipe &&
1687 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1688 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1689 		(pipe_ctx->stream->timing.timing_3d_format ==
1690 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1691 		 pipe_ctx->stream->timing.timing_3d_format ==
1692 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1693 		*addr = plane_state->address.grph_stereo.left_addr;
1694 		plane_state->address.grph_stereo.left_addr =
1695 		plane_state->address.grph_stereo.right_addr;
1696 		return true;
1697 	} else {
1698 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1699 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1700 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1701 			plane_state->address.grph_stereo.right_addr =
1702 			plane_state->address.grph_stereo.left_addr;
1703 			plane_state->address.grph_stereo.right_meta_addr =
1704 			plane_state->address.grph_stereo.left_meta_addr;
1705 		}
1706 	}
1707 	return false;
1708 }
1709 
1710 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1711 {
1712 	bool addr_patched = false;
1713 	PHYSICAL_ADDRESS_LOC addr;
1714 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1715 
1716 	if (plane_state == NULL)
1717 		return;
1718 
1719 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1720 
1721 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1722 			pipe_ctx->plane_res.hubp,
1723 			&plane_state->address,
1724 			plane_state->flip_immediate);
1725 
1726 	plane_state->status.requested_address = plane_state->address;
1727 
1728 	if (plane_state->flip_immediate)
1729 		plane_state->status.current_address = plane_state->address;
1730 
1731 	if (addr_patched)
1732 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1733 }
1734 
1735 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1736 			const struct dc_plane_state *plane_state)
1737 {
1738 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1739 	const struct dc_transfer_func *tf = NULL;
1740 	bool result = true;
1741 
1742 	if (dpp_base == NULL)
1743 		return false;
1744 
1745 	if (plane_state->in_transfer_func)
1746 		tf = plane_state->in_transfer_func;
1747 
1748 	if (plane_state->gamma_correction &&
1749 		!dpp_base->ctx->dc->debug.always_use_regamma
1750 		&& !plane_state->gamma_correction->is_identity
1751 			&& dce_use_lut(plane_state->format))
1752 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1753 
1754 	if (tf == NULL)
1755 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1756 	else if (tf->type == TF_TYPE_PREDEFINED) {
1757 		switch (tf->tf) {
1758 		case TRANSFER_FUNCTION_SRGB:
1759 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1760 			break;
1761 		case TRANSFER_FUNCTION_BT709:
1762 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1763 			break;
1764 		case TRANSFER_FUNCTION_LINEAR:
1765 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1766 			break;
1767 		case TRANSFER_FUNCTION_PQ:
1768 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1769 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1770 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1771 			result = true;
1772 			break;
1773 		default:
1774 			result = false;
1775 			break;
1776 		}
1777 	} else if (tf->type == TF_TYPE_BYPASS) {
1778 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1779 	} else {
1780 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1781 					&dpp_base->degamma_params);
1782 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1783 				&dpp_base->degamma_params);
1784 		result = true;
1785 	}
1786 
1787 	return result;
1788 }
1789 
1790 #define MAX_NUM_HW_POINTS 0x200
1791 
1792 static void log_tf(struct dc_context *ctx,
1793 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1794 {
1795 	// DC_LOG_GAMMA is default logging of all hw points
1796 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1797 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1798 	int i = 0;
1799 
1800 	DC_LOGGER_INIT(ctx->logger);
1801 	DC_LOG_GAMMA("Gamma Correction TF");
1802 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1803 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1804 
1805 	for (i = 0; i < hw_points_num; i++) {
1806 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1807 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1808 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1809 	}
1810 
1811 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1812 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1813 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1814 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1815 	}
1816 }
1817 
1818 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1819 				const struct dc_stream_state *stream)
1820 {
1821 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1822 
1823 	if (dpp == NULL)
1824 		return false;
1825 
1826 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1827 
1828 	if (stream->out_transfer_func &&
1829 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1830 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1831 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1832 
1833 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1834 	 * update.
1835 	 */
1836 	else if (cm_helper_translate_curve_to_hw_format(
1837 			stream->out_transfer_func,
1838 			&dpp->regamma_params, false)) {
1839 		dpp->funcs->dpp_program_regamma_pwl(
1840 				dpp,
1841 				&dpp->regamma_params, OPP_REGAMMA_USER);
1842 	} else
1843 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1844 
1845 	if (stream != NULL && stream->ctx != NULL &&
1846 			stream->out_transfer_func != NULL) {
1847 		log_tf(stream->ctx,
1848 				stream->out_transfer_func,
1849 				dpp->regamma_params.hw_points_num);
1850 	}
1851 
1852 	return true;
1853 }
1854 
1855 void dcn10_pipe_control_lock(
1856 	struct dc *dc,
1857 	struct pipe_ctx *pipe,
1858 	bool lock)
1859 {
1860 	struct dce_hwseq *hws = dc->hwseq;
1861 
1862 	/* use TG master update lock to lock everything on the TG
1863 	 * therefore only top pipe need to lock
1864 	 */
1865 	if (!pipe || pipe->top_pipe)
1866 		return;
1867 
1868 	if (dc->debug.sanity_checks)
1869 		hws->funcs.verify_allow_pstate_change_high(dc);
1870 
1871 	if (lock)
1872 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1873 	else
1874 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1875 
1876 	if (dc->debug.sanity_checks)
1877 		hws->funcs.verify_allow_pstate_change_high(dc);
1878 }
1879 
1880 /**
1881  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1882  *
1883  * Software keepout workaround to prevent cursor update locking from stalling
1884  * out cursor updates indefinitely or from old values from being retained in
1885  * the case where the viewport changes in the same frame as the cursor.
1886  *
1887  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1888  * too close to VUPDATE, then stall out until VUPDATE finishes.
1889  *
1890  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1891  *       to avoid the need for this workaround.
1892  */
1893 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1894 {
1895 	struct dc_stream_state *stream = pipe_ctx->stream;
1896 	struct crtc_position position;
1897 	uint32_t vupdate_start, vupdate_end;
1898 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1899 	unsigned int us_per_line, us_vupdate;
1900 
1901 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1902 		return;
1903 
1904 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1905 		return;
1906 
1907 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1908 				       &vupdate_end);
1909 
1910 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1911 	vpos = position.vertical_count;
1912 
1913 	/* Avoid wraparound calculation issues */
1914 	vupdate_start += stream->timing.v_total;
1915 	vupdate_end += stream->timing.v_total;
1916 	vpos += stream->timing.v_total;
1917 
1918 	if (vpos <= vupdate_start) {
1919 		/* VPOS is in VACTIVE or back porch. */
1920 		lines_to_vupdate = vupdate_start - vpos;
1921 	} else if (vpos > vupdate_end) {
1922 		/* VPOS is in the front porch. */
1923 		return;
1924 	} else {
1925 		/* VPOS is in VUPDATE. */
1926 		lines_to_vupdate = 0;
1927 	}
1928 
1929 	/* Calculate time until VUPDATE in microseconds. */
1930 	us_per_line =
1931 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1932 	us_to_vupdate = lines_to_vupdate * us_per_line;
1933 
1934 	/* 70 us is a conservative estimate of cursor update time*/
1935 	if (us_to_vupdate > 70)
1936 		return;
1937 
1938 	/* Stall out until the cursor update completes. */
1939 	if (vupdate_end < vupdate_start)
1940 		vupdate_end += stream->timing.v_total;
1941 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1942 	udelay(us_to_vupdate + us_vupdate);
1943 }
1944 
1945 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1946 {
1947 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1948 	if (!pipe || pipe->top_pipe)
1949 		return;
1950 
1951 	/* Prevent cursor lock from stalling out cursor updates. */
1952 	if (lock)
1953 		delay_cursor_until_vupdate(dc, pipe);
1954 
1955 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1956 		union dmub_hw_lock_flags hw_locks = { 0 };
1957 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1958 
1959 		hw_locks.bits.lock_cursor = 1;
1960 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1961 
1962 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1963 					lock,
1964 					&hw_locks,
1965 					&inst_flags);
1966 	} else
1967 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1968 				pipe->stream_res.opp->inst, lock);
1969 }
1970 
1971 static bool wait_for_reset_trigger_to_occur(
1972 	struct dc_context *dc_ctx,
1973 	struct timing_generator *tg)
1974 {
1975 	bool rc = false;
1976 
1977 	/* To avoid endless loop we wait at most
1978 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1979 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1980 	int i;
1981 
1982 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1983 
1984 		if (!tg->funcs->is_counter_moving(tg)) {
1985 			DC_ERROR("TG counter is not moving!\n");
1986 			break;
1987 		}
1988 
1989 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1990 			rc = true;
1991 			/* usually occurs at i=1 */
1992 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1993 					i);
1994 			break;
1995 		}
1996 
1997 		/* Wait for one frame. */
1998 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1999 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2000 	}
2001 
2002 	if (false == rc)
2003 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2004 
2005 	return rc;
2006 }
2007 
2008 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2009 				      uint64_t *denominator,
2010 				      bool checkUint32Bounary)
2011 {
2012 	int i;
2013 	bool ret = checkUint32Bounary == false;
2014 	uint64_t max_int32 = 0xffffffff;
2015 	uint64_t num, denom;
2016 	static const uint16_t prime_numbers[] = {
2017 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2018 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2019 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2020 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2021 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2022 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2023 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2024 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2025 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2026 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2027 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2028 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2029 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2030 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2031 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2032 	int count = ARRAY_SIZE(prime_numbers);
2033 
2034 	num = *numerator;
2035 	denom = *denominator;
2036 	for (i = 0; i < count; i++) {
2037 		uint32_t num_remainder, denom_remainder;
2038 		uint64_t num_result, denom_result;
2039 		if (checkUint32Bounary &&
2040 			num <= max_int32 && denom <= max_int32) {
2041 			ret = true;
2042 			break;
2043 		}
2044 		do {
2045 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2046 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2047 			if (num_remainder == 0 && denom_remainder == 0) {
2048 				num = num_result;
2049 				denom = denom_result;
2050 			}
2051 		} while (num_remainder == 0 && denom_remainder == 0);
2052 	}
2053 	*numerator = num;
2054 	*denominator = denom;
2055 	return ret;
2056 }
2057 
2058 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2059 {
2060 	uint32_t master_pipe_refresh_rate =
2061 		pipe->stream->timing.pix_clk_100hz * 100 /
2062 		pipe->stream->timing.h_total /
2063 		pipe->stream->timing.v_total;
2064 	return master_pipe_refresh_rate <= 30;
2065 }
2066 
2067 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2068 				 bool account_low_refresh_rate)
2069 {
2070 	uint32_t clock_divider = 1;
2071 	uint32_t numpipes = 1;
2072 
2073 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2074 		clock_divider *= 2;
2075 
2076 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2077 		clock_divider *= 2;
2078 
2079 	while (pipe->next_odm_pipe) {
2080 		pipe = pipe->next_odm_pipe;
2081 		numpipes++;
2082 	}
2083 	clock_divider *= numpipes;
2084 
2085 	return clock_divider;
2086 }
2087 
2088 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2089 				    struct pipe_ctx *grouped_pipes[])
2090 {
2091 	struct dc_context *dc_ctx = dc->ctx;
2092 	int i, master = -1, embedded = -1;
2093 	struct dc_crtc_timing *hw_crtc_timing;
2094 	uint64_t phase[MAX_PIPES];
2095 	uint64_t modulo[MAX_PIPES];
2096 	unsigned int pclk;
2097 
2098 	uint32_t embedded_pix_clk_100hz;
2099 	uint16_t embedded_h_total;
2100 	uint16_t embedded_v_total;
2101 	uint32_t dp_ref_clk_100hz =
2102 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2103 
2104 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2105 	if (!hw_crtc_timing)
2106 		return master;
2107 
2108 	if (dc->config.vblank_alignment_dto_params &&
2109 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2110 		embedded_h_total =
2111 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2112 		embedded_v_total =
2113 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2114 		embedded_pix_clk_100hz =
2115 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2116 
2117 		for (i = 0; i < group_size; i++) {
2118 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2119 					grouped_pipes[i]->stream_res.tg,
2120 					&hw_crtc_timing[i]);
2121 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2122 				dc->res_pool->dp_clock_source,
2123 				grouped_pipes[i]->stream_res.tg->inst,
2124 				&pclk);
2125 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2126 			if (dc_is_embedded_signal(
2127 					grouped_pipes[i]->stream->signal)) {
2128 				embedded = i;
2129 				master = i;
2130 				phase[i] = embedded_pix_clk_100hz*100;
2131 				modulo[i] = dp_ref_clk_100hz*100;
2132 			} else {
2133 
2134 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2135 					hw_crtc_timing[i].h_total*
2136 					hw_crtc_timing[i].v_total;
2137 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2138 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2139 					embedded_h_total*
2140 					embedded_v_total;
2141 
2142 				if (reduceSizeAndFraction(&phase[i],
2143 						&modulo[i], true) == false) {
2144 					/*
2145 					 * this will help to stop reporting
2146 					 * this timing synchronizable
2147 					 */
2148 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2149 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2150 				}
2151 			}
2152 		}
2153 
2154 		for (i = 0; i < group_size; i++) {
2155 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2156 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2157 					dc->res_pool->dp_clock_source,
2158 					grouped_pipes[i]->stream_res.tg->inst,
2159 					phase[i], modulo[i]);
2160 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2161 					dc->res_pool->dp_clock_source,
2162 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2163 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2164 					pclk*get_clock_divider(grouped_pipes[i], false);
2165 				if (master == -1)
2166 					master = i;
2167 			}
2168 		}
2169 
2170 	}
2171 
2172 	kfree(hw_crtc_timing);
2173 	return master;
2174 }
2175 
2176 void dcn10_enable_vblanks_synchronization(
2177 	struct dc *dc,
2178 	int group_index,
2179 	int group_size,
2180 	struct pipe_ctx *grouped_pipes[])
2181 {
2182 	struct dc_context *dc_ctx = dc->ctx;
2183 	struct output_pixel_processor *opp;
2184 	struct timing_generator *tg;
2185 	int i, width, height, master;
2186 
2187 	for (i = 1; i < group_size; i++) {
2188 		opp = grouped_pipes[i]->stream_res.opp;
2189 		tg = grouped_pipes[i]->stream_res.tg;
2190 		tg->funcs->get_otg_active_size(tg, &width, &height);
2191 		if (opp->funcs->opp_program_dpg_dimensions)
2192 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2193 	}
2194 
2195 	for (i = 0; i < group_size; i++) {
2196 		if (grouped_pipes[i]->stream == NULL)
2197 			continue;
2198 		grouped_pipes[i]->stream->vblank_synchronized = false;
2199 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2200 	}
2201 
2202 	DC_SYNC_INFO("Aligning DP DTOs\n");
2203 
2204 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2205 
2206 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2207 
2208 	if (master >= 0) {
2209 		for (i = 0; i < group_size; i++) {
2210 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2211 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2212 					grouped_pipes[master]->stream_res.tg,
2213 					grouped_pipes[i]->stream_res.tg,
2214 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2215 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2216 					get_clock_divider(grouped_pipes[master], false),
2217 					get_clock_divider(grouped_pipes[i], false));
2218 			grouped_pipes[i]->stream->vblank_synchronized = true;
2219 		}
2220 		grouped_pipes[master]->stream->vblank_synchronized = true;
2221 		DC_SYNC_INFO("Sync complete\n");
2222 	}
2223 
2224 	for (i = 1; i < group_size; i++) {
2225 		opp = grouped_pipes[i]->stream_res.opp;
2226 		tg = grouped_pipes[i]->stream_res.tg;
2227 		tg->funcs->get_otg_active_size(tg, &width, &height);
2228 		if (opp->funcs->opp_program_dpg_dimensions)
2229 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2230 	}
2231 }
2232 
2233 void dcn10_enable_timing_synchronization(
2234 	struct dc *dc,
2235 	int group_index,
2236 	int group_size,
2237 	struct pipe_ctx *grouped_pipes[])
2238 {
2239 	struct dc_context *dc_ctx = dc->ctx;
2240 	struct output_pixel_processor *opp;
2241 	struct timing_generator *tg;
2242 	int i, width, height;
2243 
2244 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2245 
2246 	for (i = 1; i < group_size; i++) {
2247 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2248 			continue;
2249 
2250 		opp = grouped_pipes[i]->stream_res.opp;
2251 		tg = grouped_pipes[i]->stream_res.tg;
2252 		tg->funcs->get_otg_active_size(tg, &width, &height);
2253 		if (opp->funcs->opp_program_dpg_dimensions)
2254 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2255 	}
2256 
2257 	for (i = 0; i < group_size; i++) {
2258 		if (grouped_pipes[i]->stream == NULL)
2259 			continue;
2260 
2261 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2262 			continue;
2263 
2264 		grouped_pipes[i]->stream->vblank_synchronized = false;
2265 	}
2266 
2267 	for (i = 1; i < group_size; i++) {
2268 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2269 			continue;
2270 
2271 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2272 				grouped_pipes[i]->stream_res.tg,
2273 				grouped_pipes[0]->stream_res.tg->inst);
2274 	}
2275 
2276 	DC_SYNC_INFO("Waiting for trigger\n");
2277 
2278 	/* Need to get only check 1 pipe for having reset as all the others are
2279 	 * synchronized. Look at last pipe programmed to reset.
2280 	 */
2281 
2282 	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2283 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2284 
2285 	for (i = 1; i < group_size; i++) {
2286 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2287 			continue;
2288 
2289 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2290 				grouped_pipes[i]->stream_res.tg);
2291 	}
2292 
2293 	for (i = 1; i < group_size; i++) {
2294 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2295 			continue;
2296 
2297 		opp = grouped_pipes[i]->stream_res.opp;
2298 		tg = grouped_pipes[i]->stream_res.tg;
2299 		tg->funcs->get_otg_active_size(tg, &width, &height);
2300 		if (opp->funcs->opp_program_dpg_dimensions)
2301 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2302 	}
2303 
2304 	DC_SYNC_INFO("Sync complete\n");
2305 }
2306 
2307 void dcn10_enable_per_frame_crtc_position_reset(
2308 	struct dc *dc,
2309 	int group_size,
2310 	struct pipe_ctx *grouped_pipes[])
2311 {
2312 	struct dc_context *dc_ctx = dc->ctx;
2313 	int i;
2314 
2315 	DC_SYNC_INFO("Setting up\n");
2316 	for (i = 0; i < group_size; i++)
2317 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2318 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2319 					grouped_pipes[i]->stream_res.tg,
2320 					0,
2321 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2322 
2323 	DC_SYNC_INFO("Waiting for trigger\n");
2324 
2325 	for (i = 0; i < group_size; i++)
2326 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2327 
2328 	DC_SYNC_INFO("Multi-display sync is complete\n");
2329 }
2330 
2331 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2332 		struct vm_system_aperture_param *apt,
2333 		struct dce_hwseq *hws)
2334 {
2335 	PHYSICAL_ADDRESS_LOC physical_page_number;
2336 	uint32_t logical_addr_low;
2337 	uint32_t logical_addr_high;
2338 
2339 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2340 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2341 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2342 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2343 
2344 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2345 			LOGICAL_ADDR, &logical_addr_low);
2346 
2347 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2348 			LOGICAL_ADDR, &logical_addr_high);
2349 
2350 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2351 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2352 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2353 }
2354 
2355 /* Temporary read settings, future will get values from kmd directly */
2356 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2357 		struct vm_context0_param *vm0,
2358 		struct dce_hwseq *hws)
2359 {
2360 	PHYSICAL_ADDRESS_LOC fb_base;
2361 	PHYSICAL_ADDRESS_LOC fb_offset;
2362 	uint32_t fb_base_value;
2363 	uint32_t fb_offset_value;
2364 
2365 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2366 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2367 
2368 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2369 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2370 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2371 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2372 
2373 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2374 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2375 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2376 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2377 
2378 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2379 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2380 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2381 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2382 
2383 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2384 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2385 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2386 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2387 
2388 	/*
2389 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2390 	 * Therefore we need to do
2391 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2392 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2393 	 */
2394 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2395 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2396 	vm0->pte_base.quad_part += fb_base.quad_part;
2397 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2398 }
2399 
2400 
2401 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2402 {
2403 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2404 	struct vm_system_aperture_param apt = {0};
2405 	struct vm_context0_param vm0 = {0};
2406 
2407 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2408 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2409 
2410 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2411 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2412 }
2413 
2414 static void dcn10_enable_plane(
2415 	struct dc *dc,
2416 	struct pipe_ctx *pipe_ctx,
2417 	struct dc_state *context)
2418 {
2419 	struct dce_hwseq *hws = dc->hwseq;
2420 
2421 	if (dc->debug.sanity_checks) {
2422 		hws->funcs.verify_allow_pstate_change_high(dc);
2423 	}
2424 
2425 	undo_DEGVIDCN10_253_wa(dc);
2426 
2427 	power_on_plane(dc->hwseq,
2428 		pipe_ctx->plane_res.hubp->inst);
2429 
2430 	/* enable DCFCLK current DCHUB */
2431 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2432 
2433 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2434 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2435 			pipe_ctx->stream_res.opp,
2436 			true);
2437 
2438 	if (dc->config.gpu_vm_support)
2439 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2440 
2441 	if (dc->debug.sanity_checks) {
2442 		hws->funcs.verify_allow_pstate_change_high(dc);
2443 	}
2444 
2445 	if (!pipe_ctx->top_pipe
2446 		&& pipe_ctx->plane_state
2447 		&& pipe_ctx->plane_state->flip_int_enabled
2448 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2449 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2450 
2451 }
2452 
2453 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2454 {
2455 	int i = 0;
2456 	struct dpp_grph_csc_adjustment adjust;
2457 	memset(&adjust, 0, sizeof(adjust));
2458 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2459 
2460 
2461 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2462 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2463 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2464 			adjust.temperature_matrix[i] =
2465 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2466 	} else if (pipe_ctx->plane_state &&
2467 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2468 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2469 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2470 			adjust.temperature_matrix[i] =
2471 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2472 	}
2473 
2474 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2475 }
2476 
2477 
2478 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2479 {
2480 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2481 		if (pipe_ctx->top_pipe) {
2482 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2483 
2484 			while (top->top_pipe)
2485 				top = top->top_pipe; // Traverse to top pipe_ctx
2486 			if (top->plane_state && top->plane_state->layer_index == 0)
2487 				return true; // Front MPO plane not hidden
2488 		}
2489 	}
2490 	return false;
2491 }
2492 
2493 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2494 {
2495 	// Override rear plane RGB bias to fix MPO brightness
2496 	uint16_t rgb_bias = matrix[3];
2497 
2498 	matrix[3] = 0;
2499 	matrix[7] = 0;
2500 	matrix[11] = 0;
2501 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2502 	matrix[3] = rgb_bias;
2503 	matrix[7] = rgb_bias;
2504 	matrix[11] = rgb_bias;
2505 }
2506 
2507 void dcn10_program_output_csc(struct dc *dc,
2508 		struct pipe_ctx *pipe_ctx,
2509 		enum dc_color_space colorspace,
2510 		uint16_t *matrix,
2511 		int opp_id)
2512 {
2513 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2514 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2515 
2516 			/* MPO is broken with RGB colorspaces when OCSC matrix
2517 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2518 			 * Blending adds offsets from front + rear to rear plane
2519 			 *
2520 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2521 			 * black value pixels add offset instead of rear + front
2522 			 */
2523 
2524 			int16_t rgb_bias = matrix[3];
2525 			// matrix[3/7/11] are all the same offset value
2526 
2527 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2528 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2529 			} else {
2530 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2531 			}
2532 		}
2533 	} else {
2534 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2535 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2536 	}
2537 }
2538 
2539 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2540 {
2541 	struct dc_bias_and_scale bns_params = {0};
2542 
2543 	// program the input csc
2544 	dpp->funcs->dpp_setup(dpp,
2545 			plane_state->format,
2546 			EXPANSION_MODE_ZERO,
2547 			plane_state->input_csc_color_matrix,
2548 			plane_state->color_space,
2549 			NULL);
2550 
2551 	//set scale and bias registers
2552 	build_prescale_params(&bns_params, plane_state);
2553 	if (dpp->funcs->dpp_program_bias_and_scale)
2554 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2555 }
2556 
2557 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2558 {
2559 	struct mpc *mpc = dc->res_pool->mpc;
2560 
2561 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2562 		get_hdr_visual_confirm_color(pipe_ctx, color);
2563 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2564 		get_surface_visual_confirm_color(pipe_ctx, color);
2565 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2566 		get_surface_tile_visual_confirm_color(pipe_ctx, color);
2567 	else
2568 		color_space_to_black_color(
2569 				dc, pipe_ctx->stream->output_color_space, color);
2570 
2571 	if (mpc->funcs->set_bg_color) {
2572 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
2573 		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2574 	}
2575 }
2576 
2577 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2578 {
2579 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2580 	struct mpcc_blnd_cfg blnd_cfg = {0};
2581 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2582 	int mpcc_id;
2583 	struct mpcc *new_mpcc;
2584 	struct mpc *mpc = dc->res_pool->mpc;
2585 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2586 
2587 	blnd_cfg.overlap_only = false;
2588 	blnd_cfg.global_gain = 0xff;
2589 
2590 	if (per_pixel_alpha) {
2591 		/* DCN1.0 has output CM before MPC which seems to screw with
2592 		 * pre-multiplied alpha.
2593 		 */
2594 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2595 				pipe_ctx->stream->output_color_space)
2596 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2597 		if (pipe_ctx->plane_state->global_alpha) {
2598 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2599 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2600 		} else {
2601 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2602 		}
2603 	} else {
2604 		blnd_cfg.pre_multiplied_alpha = false;
2605 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2606 	}
2607 
2608 	if (pipe_ctx->plane_state->global_alpha)
2609 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2610 	else
2611 		blnd_cfg.global_alpha = 0xff;
2612 
2613 	/*
2614 	 * TODO: remove hack
2615 	 * Note: currently there is a bug in init_hw such that
2616 	 * on resume from hibernate, BIOS sets up MPCC0, and
2617 	 * we do mpcc_remove but the mpcc cannot go to idle
2618 	 * after remove. This cause us to pick mpcc1 here,
2619 	 * which causes a pstate hang for yet unknown reason.
2620 	 */
2621 	mpcc_id = hubp->inst;
2622 
2623 	/* If there is no full update, don't need to touch MPC tree*/
2624 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2625 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2626 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2627 		return;
2628 	}
2629 
2630 	/* check if this MPCC is already being used */
2631 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2632 	/* remove MPCC if being used */
2633 	if (new_mpcc != NULL)
2634 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2635 	else
2636 		if (dc->debug.sanity_checks)
2637 			mpc->funcs->assert_mpcc_idle_before_connect(
2638 					dc->res_pool->mpc, mpcc_id);
2639 
2640 	/* Call MPC to insert new plane */
2641 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2642 			mpc_tree_params,
2643 			&blnd_cfg,
2644 			NULL,
2645 			NULL,
2646 			hubp->inst,
2647 			mpcc_id);
2648 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2649 
2650 	ASSERT(new_mpcc != NULL);
2651 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2652 	hubp->mpcc_id = mpcc_id;
2653 }
2654 
2655 static void update_scaler(struct pipe_ctx *pipe_ctx)
2656 {
2657 	bool per_pixel_alpha =
2658 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2659 
2660 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2661 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2662 	/* scaler configuration */
2663 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2664 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2665 }
2666 
2667 static void dcn10_update_dchubp_dpp(
2668 	struct dc *dc,
2669 	struct pipe_ctx *pipe_ctx,
2670 	struct dc_state *context)
2671 {
2672 	struct dce_hwseq *hws = dc->hwseq;
2673 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2674 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2675 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2676 	struct plane_size size = plane_state->plane_size;
2677 	unsigned int compat_level = 0;
2678 	bool should_divided_by_2 = false;
2679 
2680 	/* depends on DML calculation, DPP clock value may change dynamically */
2681 	/* If request max dpp clk is lower than current dispclk, no need to
2682 	 * divided by 2
2683 	 */
2684 	if (plane_state->update_flags.bits.full_update) {
2685 
2686 		/* new calculated dispclk, dppclk are stored in
2687 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2688 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2689 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2690 		 * dispclk will put in use after optimize_bandwidth when
2691 		 * ramp_up_dispclk_with_dpp is called.
2692 		 * there are two places for dppclk be put in use. One location
2693 		 * is the same as the location as dispclk. Another is within
2694 		 * update_dchubp_dpp which happens between pre_bandwidth and
2695 		 * optimize_bandwidth.
2696 		 * dppclk updated within update_dchubp_dpp will cause new
2697 		 * clock values of dispclk and dppclk not be in use at the same
2698 		 * time. when clocks are decreased, this may cause dppclk is
2699 		 * lower than previous configuration and let pipe stuck.
2700 		 * for example, eDP + external dp,  change resolution of DP from
2701 		 * 1920x1080x144hz to 1280x960x60hz.
2702 		 * before change: dispclk = 337889 dppclk = 337889
2703 		 * change mode, dcn10_validate_bandwidth calculate
2704 		 *                dispclk = 143122 dppclk = 143122
2705 		 * update_dchubp_dpp be executed before dispclk be updated,
2706 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2707 		 * 168944. this will cause pipe pstate warning issue.
2708 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2709 		 * dispclk is going to be decreased, keep dppclk = dispclk
2710 		 **/
2711 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2712 				dc->clk_mgr->clks.dispclk_khz)
2713 			should_divided_by_2 = false;
2714 		else
2715 			should_divided_by_2 =
2716 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2717 					dc->clk_mgr->clks.dispclk_khz / 2;
2718 
2719 		dpp->funcs->dpp_dppclk_control(
2720 				dpp,
2721 				should_divided_by_2,
2722 				true);
2723 
2724 		if (dc->res_pool->dccg)
2725 			dc->res_pool->dccg->funcs->update_dpp_dto(
2726 					dc->res_pool->dccg,
2727 					dpp->inst,
2728 					pipe_ctx->plane_res.bw.dppclk_khz);
2729 		else
2730 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2731 						dc->clk_mgr->clks.dispclk_khz / 2 :
2732 							dc->clk_mgr->clks.dispclk_khz;
2733 	}
2734 
2735 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2736 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2737 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2738 	 */
2739 	if (plane_state->update_flags.bits.full_update) {
2740 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2741 
2742 		hubp->funcs->hubp_setup(
2743 			hubp,
2744 			&pipe_ctx->dlg_regs,
2745 			&pipe_ctx->ttu_regs,
2746 			&pipe_ctx->rq_regs,
2747 			&pipe_ctx->pipe_dlg_param);
2748 		hubp->funcs->hubp_setup_interdependent(
2749 			hubp,
2750 			&pipe_ctx->dlg_regs,
2751 			&pipe_ctx->ttu_regs);
2752 	}
2753 
2754 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2755 
2756 	if (plane_state->update_flags.bits.full_update ||
2757 		plane_state->update_flags.bits.bpp_change)
2758 		dcn10_update_dpp(dpp, plane_state);
2759 
2760 	if (plane_state->update_flags.bits.full_update ||
2761 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2762 		plane_state->update_flags.bits.global_alpha_change)
2763 		hws->funcs.update_mpcc(dc, pipe_ctx);
2764 
2765 	if (plane_state->update_flags.bits.full_update ||
2766 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2767 		plane_state->update_flags.bits.global_alpha_change ||
2768 		plane_state->update_flags.bits.scaling_change ||
2769 		plane_state->update_flags.bits.position_change) {
2770 		update_scaler(pipe_ctx);
2771 	}
2772 
2773 	if (plane_state->update_flags.bits.full_update ||
2774 		plane_state->update_flags.bits.scaling_change ||
2775 		plane_state->update_flags.bits.position_change) {
2776 		hubp->funcs->mem_program_viewport(
2777 			hubp,
2778 			&pipe_ctx->plane_res.scl_data.viewport,
2779 			&pipe_ctx->plane_res.scl_data.viewport_c);
2780 	}
2781 
2782 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2783 		dc->hwss.set_cursor_position(pipe_ctx);
2784 		dc->hwss.set_cursor_attribute(pipe_ctx);
2785 
2786 		if (dc->hwss.set_cursor_sdr_white_level)
2787 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2788 	}
2789 
2790 	if (plane_state->update_flags.bits.full_update) {
2791 		/*gamut remap*/
2792 		dc->hwss.program_gamut_remap(pipe_ctx);
2793 
2794 		dc->hwss.program_output_csc(dc,
2795 				pipe_ctx,
2796 				pipe_ctx->stream->output_color_space,
2797 				pipe_ctx->stream->csc_color_matrix.matrix,
2798 				pipe_ctx->stream_res.opp->inst);
2799 	}
2800 
2801 	if (plane_state->update_flags.bits.full_update ||
2802 		plane_state->update_flags.bits.pixel_format_change ||
2803 		plane_state->update_flags.bits.horizontal_mirror_change ||
2804 		plane_state->update_flags.bits.rotation_change ||
2805 		plane_state->update_flags.bits.swizzle_change ||
2806 		plane_state->update_flags.bits.dcc_change ||
2807 		plane_state->update_flags.bits.bpp_change ||
2808 		plane_state->update_flags.bits.scaling_change ||
2809 		plane_state->update_flags.bits.plane_size_change) {
2810 		hubp->funcs->hubp_program_surface_config(
2811 			hubp,
2812 			plane_state->format,
2813 			&plane_state->tiling_info,
2814 			&size,
2815 			plane_state->rotation,
2816 			&plane_state->dcc,
2817 			plane_state->horizontal_mirror,
2818 			compat_level);
2819 	}
2820 
2821 	hubp->power_gated = false;
2822 
2823 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2824 
2825 	if (is_pipe_tree_visible(pipe_ctx))
2826 		hubp->funcs->set_blank(hubp, false);
2827 }
2828 
2829 void dcn10_blank_pixel_data(
2830 		struct dc *dc,
2831 		struct pipe_ctx *pipe_ctx,
2832 		bool blank)
2833 {
2834 	enum dc_color_space color_space;
2835 	struct tg_color black_color = {0};
2836 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2837 	struct dc_stream_state *stream = pipe_ctx->stream;
2838 
2839 	/* program otg blank color */
2840 	color_space = stream->output_color_space;
2841 	color_space_to_black_color(dc, color_space, &black_color);
2842 
2843 	/*
2844 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2845 	 * alternate between Cb and Cr, so both channels need the pixel
2846 	 * value for Y
2847 	 */
2848 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2849 		black_color.color_r_cr = black_color.color_g_y;
2850 
2851 
2852 	if (stream_res->tg->funcs->set_blank_color)
2853 		stream_res->tg->funcs->set_blank_color(
2854 				stream_res->tg,
2855 				&black_color);
2856 
2857 	if (!blank) {
2858 		if (stream_res->tg->funcs->set_blank)
2859 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2860 		if (stream_res->abm) {
2861 			dc->hwss.set_pipe(pipe_ctx);
2862 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2863 		}
2864 	} else if (blank) {
2865 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2866 		if (stream_res->tg->funcs->set_blank) {
2867 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2868 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2869 		}
2870 	}
2871 }
2872 
2873 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2874 {
2875 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2876 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2877 	struct custom_float_format fmt;
2878 
2879 	fmt.exponenta_bits = 6;
2880 	fmt.mantissa_bits = 12;
2881 	fmt.sign = true;
2882 
2883 
2884 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2885 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2886 
2887 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2888 			pipe_ctx->plane_res.dpp, hw_mult);
2889 }
2890 
2891 void dcn10_program_pipe(
2892 		struct dc *dc,
2893 		struct pipe_ctx *pipe_ctx,
2894 		struct dc_state *context)
2895 {
2896 	struct dce_hwseq *hws = dc->hwseq;
2897 
2898 	if (pipe_ctx->top_pipe == NULL) {
2899 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2900 
2901 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2902 				pipe_ctx->stream_res.tg,
2903 				pipe_ctx->pipe_dlg_param.vready_offset,
2904 				pipe_ctx->pipe_dlg_param.vstartup_start,
2905 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2906 				pipe_ctx->pipe_dlg_param.vupdate_width);
2907 
2908 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2909 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2910 
2911 		if (hws->funcs.setup_vupdate_interrupt)
2912 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2913 
2914 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2915 	}
2916 
2917 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2918 		dcn10_enable_plane(dc, pipe_ctx, context);
2919 
2920 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2921 
2922 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2923 
2924 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2925 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2926 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2927 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2928 
2929 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2930 	 * only do gamma programming for full update.
2931 	 * TODO: This can be further optimized/cleaned up
2932 	 * Always call this for now since it does memcmp inside before
2933 	 * doing heavy calculation and programming
2934 	 */
2935 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2936 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2937 }
2938 
2939 void dcn10_wait_for_pending_cleared(struct dc *dc,
2940 		struct dc_state *context)
2941 {
2942 		struct pipe_ctx *pipe_ctx;
2943 		struct timing_generator *tg;
2944 		int i;
2945 
2946 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2947 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2948 			tg = pipe_ctx->stream_res.tg;
2949 
2950 			/*
2951 			 * Only wait for top pipe's tg penindg bit
2952 			 * Also skip if pipe is disabled.
2953 			 */
2954 			if (pipe_ctx->top_pipe ||
2955 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2956 			    !tg->funcs->is_tg_enabled(tg))
2957 				continue;
2958 
2959 			/*
2960 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2961 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2962 			 * seems to not trigger the update right away, and if we
2963 			 * lock again before VUPDATE then we don't get a separated
2964 			 * operation.
2965 			 */
2966 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2967 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2968 		}
2969 }
2970 
2971 void dcn10_post_unlock_program_front_end(
2972 		struct dc *dc,
2973 		struct dc_state *context)
2974 {
2975 	int i;
2976 
2977 	DC_LOGGER_INIT(dc->ctx->logger);
2978 
2979 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2980 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2981 
2982 		if (!pipe_ctx->top_pipe &&
2983 			!pipe_ctx->prev_odm_pipe &&
2984 			pipe_ctx->stream) {
2985 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
2986 
2987 			if (context->stream_status[i].plane_count == 0)
2988 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2989 		}
2990 	}
2991 
2992 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2993 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2994 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2995 
2996 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2997 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2998 			dc->hwss.optimize_bandwidth(dc, context);
2999 			break;
3000 		}
3001 
3002 	if (dc->hwseq->wa.DEGVIDCN10_254)
3003 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3004 }
3005 
3006 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3007 {
3008 	uint8_t i;
3009 
3010 	for (i = 0; i < context->stream_count; i++) {
3011 		if (context->streams[i]->timing.timing_3d_format
3012 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3013 			/*
3014 			 * Disable stutter
3015 			 */
3016 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3017 			break;
3018 		}
3019 	}
3020 }
3021 
3022 void dcn10_prepare_bandwidth(
3023 		struct dc *dc,
3024 		struct dc_state *context)
3025 {
3026 	struct dce_hwseq *hws = dc->hwseq;
3027 	struct hubbub *hubbub = dc->res_pool->hubbub;
3028 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3029 
3030 	if (dc->debug.sanity_checks)
3031 		hws->funcs.verify_allow_pstate_change_high(dc);
3032 
3033 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3034 		if (context->stream_count == 0)
3035 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3036 
3037 		dc->clk_mgr->funcs->update_clocks(
3038 				dc->clk_mgr,
3039 				context,
3040 				false);
3041 	}
3042 
3043 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3044 			&context->bw_ctx.bw.dcn.watermarks,
3045 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3046 			true);
3047 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3048 
3049 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3050 		DC_FP_START();
3051 		dcn_get_soc_clks(
3052 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3053 		DC_FP_END();
3054 		dcn_bw_notify_pplib_of_wm_ranges(
3055 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3056 	}
3057 
3058 	if (dc->debug.sanity_checks)
3059 		hws->funcs.verify_allow_pstate_change_high(dc);
3060 }
3061 
3062 void dcn10_optimize_bandwidth(
3063 		struct dc *dc,
3064 		struct dc_state *context)
3065 {
3066 	struct dce_hwseq *hws = dc->hwseq;
3067 	struct hubbub *hubbub = dc->res_pool->hubbub;
3068 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3069 
3070 	if (dc->debug.sanity_checks)
3071 		hws->funcs.verify_allow_pstate_change_high(dc);
3072 
3073 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3074 		if (context->stream_count == 0)
3075 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3076 
3077 		dc->clk_mgr->funcs->update_clocks(
3078 				dc->clk_mgr,
3079 				context,
3080 				true);
3081 	}
3082 
3083 	hubbub->funcs->program_watermarks(hubbub,
3084 			&context->bw_ctx.bw.dcn.watermarks,
3085 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3086 			true);
3087 
3088 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3089 
3090 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3091 		DC_FP_START();
3092 		dcn_get_soc_clks(
3093 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3094 		DC_FP_END();
3095 		dcn_bw_notify_pplib_of_wm_ranges(
3096 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3097 	}
3098 
3099 	if (dc->debug.sanity_checks)
3100 		hws->funcs.verify_allow_pstate_change_high(dc);
3101 }
3102 
3103 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3104 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3105 {
3106 	int i = 0;
3107 	struct drr_params params = {0};
3108 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3109 	unsigned int event_triggers = 0x800;
3110 	// Note DRR trigger events are generated regardless of whether num frames met.
3111 	unsigned int num_frames = 2;
3112 
3113 	params.vertical_total_max = adjust.v_total_max;
3114 	params.vertical_total_min = adjust.v_total_min;
3115 	params.vertical_total_mid = adjust.v_total_mid;
3116 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3117 	/* TODO: If multiple pipes are to be supported, you need
3118 	 * some GSL stuff. Static screen triggers may be programmed differently
3119 	 * as well.
3120 	 */
3121 	for (i = 0; i < num_pipes; i++) {
3122 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3123 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3124 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3125 					pipe_ctx[i]->stream_res.tg, &params);
3126 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3127 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3128 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3129 						pipe_ctx[i]->stream_res.tg,
3130 						event_triggers, num_frames);
3131 		}
3132 	}
3133 }
3134 
3135 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3136 		int num_pipes,
3137 		struct crtc_position *position)
3138 {
3139 	int i = 0;
3140 
3141 	/* TODO: handle pipes > 1
3142 	 */
3143 	for (i = 0; i < num_pipes; i++)
3144 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3145 }
3146 
3147 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3148 		int num_pipes, const struct dc_static_screen_params *params)
3149 {
3150 	unsigned int i;
3151 	unsigned int triggers = 0;
3152 
3153 	if (params->triggers.surface_update)
3154 		triggers |= 0x80;
3155 	if (params->triggers.cursor_update)
3156 		triggers |= 0x2;
3157 	if (params->triggers.force_trigger)
3158 		triggers |= 0x1;
3159 
3160 	for (i = 0; i < num_pipes; i++)
3161 		pipe_ctx[i]->stream_res.tg->funcs->
3162 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3163 					triggers, params->num_frames);
3164 }
3165 
3166 static void dcn10_config_stereo_parameters(
3167 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3168 {
3169 	enum view_3d_format view_format = stream->view_format;
3170 	enum dc_timing_3d_format timing_3d_format =\
3171 			stream->timing.timing_3d_format;
3172 	bool non_stereo_timing = false;
3173 
3174 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3175 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3176 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3177 		non_stereo_timing = true;
3178 
3179 	if (non_stereo_timing == false &&
3180 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3181 
3182 		flags->PROGRAM_STEREO         = 1;
3183 		flags->PROGRAM_POLARITY       = 1;
3184 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3185 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3186 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3187 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3188 			enum display_dongle_type dongle = \
3189 					stream->link->ddc->dongle_type;
3190 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3191 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3192 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3193 				flags->DISABLE_STEREO_DP_SYNC = 1;
3194 		}
3195 		flags->RIGHT_EYE_POLARITY =\
3196 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3197 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3198 			flags->FRAME_PACKED = 1;
3199 	}
3200 
3201 	return;
3202 }
3203 
3204 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3205 {
3206 	struct crtc_stereo_flags flags = { 0 };
3207 	struct dc_stream_state *stream = pipe_ctx->stream;
3208 
3209 	dcn10_config_stereo_parameters(stream, &flags);
3210 
3211 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3212 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3213 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3214 	} else {
3215 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3216 	}
3217 
3218 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3219 		pipe_ctx->stream_res.opp,
3220 		flags.PROGRAM_STEREO == 1,
3221 		&stream->timing);
3222 
3223 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3224 		pipe_ctx->stream_res.tg,
3225 		&stream->timing,
3226 		&flags);
3227 
3228 	return;
3229 }
3230 
3231 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3232 {
3233 	int i;
3234 
3235 	for (i = 0; i < res_pool->pipe_count; i++) {
3236 		if (res_pool->hubps[i]->inst == mpcc_inst)
3237 			return res_pool->hubps[i];
3238 	}
3239 	ASSERT(false);
3240 	return NULL;
3241 }
3242 
3243 void dcn10_wait_for_mpcc_disconnect(
3244 		struct dc *dc,
3245 		struct resource_pool *res_pool,
3246 		struct pipe_ctx *pipe_ctx)
3247 {
3248 	struct dce_hwseq *hws = dc->hwseq;
3249 	int mpcc_inst;
3250 
3251 	if (dc->debug.sanity_checks) {
3252 		hws->funcs.verify_allow_pstate_change_high(dc);
3253 	}
3254 
3255 	if (!pipe_ctx->stream_res.opp)
3256 		return;
3257 
3258 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3259 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3260 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3261 
3262 			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3263 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3264 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3265 			hubp->funcs->set_blank(hubp, true);
3266 		}
3267 	}
3268 
3269 	if (dc->debug.sanity_checks) {
3270 		hws->funcs.verify_allow_pstate_change_high(dc);
3271 	}
3272 
3273 }
3274 
3275 bool dcn10_dummy_display_power_gating(
3276 	struct dc *dc,
3277 	uint8_t controller_id,
3278 	struct dc_bios *dcb,
3279 	enum pipe_gating_control power_gating)
3280 {
3281 	return true;
3282 }
3283 
3284 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3285 {
3286 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3287 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3288 	bool flip_pending;
3289 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3290 
3291 	if (plane_state == NULL)
3292 		return;
3293 
3294 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3295 					pipe_ctx->plane_res.hubp);
3296 
3297 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3298 
3299 	if (!flip_pending)
3300 		plane_state->status.current_address = plane_state->status.requested_address;
3301 
3302 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3303 			tg->funcs->is_stereo_left_eye) {
3304 		plane_state->status.is_right_eye =
3305 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3306 	}
3307 
3308 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3309 		struct dce_hwseq *hwseq = dc->hwseq;
3310 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3311 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3312 
3313 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3314 			struct hubbub *hubbub = dc->res_pool->hubbub;
3315 
3316 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3317 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3318 		}
3319 	}
3320 }
3321 
3322 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3323 {
3324 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3325 
3326 	/* In DCN, this programming sequence is owned by the hubbub */
3327 	hubbub->funcs->update_dchub(hubbub, dh_data);
3328 }
3329 
3330 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3331 {
3332 	struct pipe_ctx *test_pipe, *split_pipe;
3333 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3334 	struct rect r1 = scl_data->recout, r2, r2_half;
3335 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3336 	int cur_layer = pipe_ctx->plane_state->layer_index;
3337 
3338 	/**
3339 	 * Disable the cursor if there's another pipe above this with a
3340 	 * plane that contains this pipe's viewport to prevent double cursor
3341 	 * and incorrect scaling artifacts.
3342 	 */
3343 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3344 	     test_pipe = test_pipe->top_pipe) {
3345 		// Skip invisible layer and pipe-split plane on same layer
3346 		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3347 			continue;
3348 
3349 		r2 = test_pipe->plane_res.scl_data.recout;
3350 		r2_r = r2.x + r2.width;
3351 		r2_b = r2.y + r2.height;
3352 		split_pipe = test_pipe;
3353 
3354 		/**
3355 		 * There is another half plane on same layer because of
3356 		 * pipe-split, merge together per same height.
3357 		 */
3358 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3359 		     split_pipe = split_pipe->top_pipe)
3360 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3361 				r2_half = split_pipe->plane_res.scl_data.recout;
3362 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3363 				r2.width = r2.width + r2_half.width;
3364 				r2_r = r2.x + r2.width;
3365 				break;
3366 			}
3367 
3368 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3369 			return true;
3370 	}
3371 
3372 	return false;
3373 }
3374 
3375 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3376 {
3377 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3378 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3379 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3380 	struct dc_cursor_mi_param param = {
3381 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3382 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3383 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3384 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3385 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3386 		.rotation = pipe_ctx->plane_state->rotation,
3387 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3388 	};
3389 	bool pipe_split_on = false;
3390 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3391 		(pipe_ctx->prev_odm_pipe != NULL);
3392 
3393 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3394 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3395 	int x_pos = pos_cpy.x;
3396 	int y_pos = pos_cpy.y;
3397 
3398 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3399 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3400 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3401 			pipe_split_on = true;
3402 		}
3403 	}
3404 
3405 	/**
3406 	 * DC cursor is stream space, HW cursor is plane space and drawn
3407 	 * as part of the framebuffer.
3408 	 *
3409 	 * Cursor position can't be negative, but hotspot can be used to
3410 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3411 	 * than the cursor size.
3412 	 */
3413 
3414 	/**
3415 	 * Translate cursor from stream space to plane space.
3416 	 *
3417 	 * If the cursor is scaled then we need to scale the position
3418 	 * to be in the approximately correct place. We can't do anything
3419 	 * about the actual size being incorrect, that's a limitation of
3420 	 * the hardware.
3421 	 */
3422 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3423 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3424 				pipe_ctx->plane_state->dst_rect.width;
3425 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3426 				pipe_ctx->plane_state->dst_rect.height;
3427 	} else {
3428 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3429 				pipe_ctx->plane_state->dst_rect.width;
3430 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3431 				pipe_ctx->plane_state->dst_rect.height;
3432 	}
3433 
3434 	/**
3435 	 * If the cursor's source viewport is clipped then we need to
3436 	 * translate the cursor to appear in the correct position on
3437 	 * the screen.
3438 	 *
3439 	 * This translation isn't affected by scaling so it needs to be
3440 	 * done *after* we adjust the position for the scale factor.
3441 	 *
3442 	 * This is only done by opt-in for now since there are still
3443 	 * some usecases like tiled display that might enable the
3444 	 * cursor on both streams while expecting dc to clip it.
3445 	 */
3446 	if (pos_cpy.translate_by_source) {
3447 		x_pos += pipe_ctx->plane_state->src_rect.x;
3448 		y_pos += pipe_ctx->plane_state->src_rect.y;
3449 	}
3450 
3451 	/**
3452 	 * If the position is negative then we need to add to the hotspot
3453 	 * to shift the cursor outside the plane.
3454 	 */
3455 
3456 	if (x_pos < 0) {
3457 		pos_cpy.x_hotspot -= x_pos;
3458 		x_pos = 0;
3459 	}
3460 
3461 	if (y_pos < 0) {
3462 		pos_cpy.y_hotspot -= y_pos;
3463 		y_pos = 0;
3464 	}
3465 
3466 	pos_cpy.x = (uint32_t)x_pos;
3467 	pos_cpy.y = (uint32_t)y_pos;
3468 
3469 	if (pipe_ctx->plane_state->address.type
3470 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3471 		pos_cpy.enable = false;
3472 
3473 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3474 		pos_cpy.enable = false;
3475 
3476 
3477 	if (param.rotation == ROTATION_ANGLE_0) {
3478 		int viewport_width =
3479 			pipe_ctx->plane_res.scl_data.viewport.width;
3480 		int viewport_x =
3481 			pipe_ctx->plane_res.scl_data.viewport.x;
3482 
3483 		if (param.mirror) {
3484 			if (pipe_split_on || odm_combine_on) {
3485 				if (pos_cpy.x >= viewport_width + viewport_x) {
3486 					pos_cpy.x = 2 * viewport_width
3487 							- pos_cpy.x + 2 * viewport_x;
3488 				} else {
3489 					uint32_t temp_x = pos_cpy.x;
3490 
3491 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3492 					if (temp_x >= viewport_x +
3493 						(int)hubp->curs_attr.width || pos_cpy.x
3494 						<= (int)hubp->curs_attr.width +
3495 						pipe_ctx->plane_state->src_rect.x) {
3496 						pos_cpy.x = temp_x + viewport_width;
3497 					}
3498 				}
3499 			} else {
3500 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3501 			}
3502 		}
3503 	}
3504 	// Swap axis and mirror horizontally
3505 	else if (param.rotation == ROTATION_ANGLE_90) {
3506 		uint32_t temp_x = pos_cpy.x;
3507 
3508 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3509 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3510 		pos_cpy.y = temp_x;
3511 	}
3512 	// Swap axis and mirror vertically
3513 	else if (param.rotation == ROTATION_ANGLE_270) {
3514 		uint32_t temp_y = pos_cpy.y;
3515 		int viewport_height =
3516 			pipe_ctx->plane_res.scl_data.viewport.height;
3517 		int viewport_y =
3518 			pipe_ctx->plane_res.scl_data.viewport.y;
3519 
3520 		/**
3521 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3522 		 * For pipe split cases:
3523 		 * - apply offset of viewport.y to normalize pos_cpy.x
3524 		 * - calculate the pos_cpy.y as before
3525 		 * - shift pos_cpy.y back by same offset to get final value
3526 		 * - since we iterate through both pipes, use the lower
3527 		 *   viewport.y for offset
3528 		 * For non pipe split cases, use the same calculation for
3529 		 *  pos_cpy.y as the 180 degree rotation case below,
3530 		 *  but use pos_cpy.x as our input because we are rotating
3531 		 *  270 degrees
3532 		 */
3533 		if (pipe_split_on || odm_combine_on) {
3534 			int pos_cpy_x_offset;
3535 			int other_pipe_viewport_y;
3536 
3537 			if (pipe_split_on) {
3538 				if (pipe_ctx->bottom_pipe) {
3539 					other_pipe_viewport_y =
3540 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3541 				} else {
3542 					other_pipe_viewport_y =
3543 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3544 				}
3545 			} else {
3546 				if (pipe_ctx->next_odm_pipe) {
3547 					other_pipe_viewport_y =
3548 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3549 				} else {
3550 					other_pipe_viewport_y =
3551 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3552 				}
3553 			}
3554 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3555 				other_pipe_viewport_y : viewport_y;
3556 			pos_cpy.x -= pos_cpy_x_offset;
3557 			if (pos_cpy.x > viewport_height) {
3558 				pos_cpy.x = pos_cpy.x - viewport_height;
3559 				pos_cpy.y = viewport_height - pos_cpy.x;
3560 			} else {
3561 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3562 			}
3563 			pos_cpy.y += pos_cpy_x_offset;
3564 		} else {
3565 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3566 		}
3567 		pos_cpy.x = temp_y;
3568 	}
3569 	// Mirror horizontally and vertically
3570 	else if (param.rotation == ROTATION_ANGLE_180) {
3571 		int viewport_width =
3572 			pipe_ctx->plane_res.scl_data.viewport.width;
3573 		int viewport_x =
3574 			pipe_ctx->plane_res.scl_data.viewport.x;
3575 
3576 		if (!param.mirror) {
3577 			if (pipe_split_on || odm_combine_on) {
3578 				if (pos_cpy.x >= viewport_width + viewport_x) {
3579 					pos_cpy.x = 2 * viewport_width
3580 							- pos_cpy.x + 2 * viewport_x;
3581 				} else {
3582 					uint32_t temp_x = pos_cpy.x;
3583 
3584 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3585 					if (temp_x >= viewport_x +
3586 						(int)hubp->curs_attr.width || pos_cpy.x
3587 						<= (int)hubp->curs_attr.width +
3588 						pipe_ctx->plane_state->src_rect.x) {
3589 						pos_cpy.x = temp_x + viewport_width;
3590 					}
3591 				}
3592 			} else {
3593 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3594 			}
3595 		}
3596 
3597 		/**
3598 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3599 		 * Calculation:
3600 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3601 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3602 		 * Simplify it as:
3603 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3604 		 */
3605 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3606 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3607 	}
3608 
3609 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3610 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3611 }
3612 
3613 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3614 {
3615 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3616 
3617 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3618 			pipe_ctx->plane_res.hubp, attributes);
3619 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3620 		pipe_ctx->plane_res.dpp, attributes);
3621 }
3622 
3623 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3624 {
3625 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3626 	struct fixed31_32 multiplier;
3627 	struct dpp_cursor_attributes opt_attr = { 0 };
3628 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3629 	struct custom_float_format fmt;
3630 
3631 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3632 		return;
3633 
3634 	fmt.exponenta_bits = 5;
3635 	fmt.mantissa_bits = 10;
3636 	fmt.sign = true;
3637 
3638 	if (sdr_white_level > 80) {
3639 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3640 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3641 	}
3642 
3643 	opt_attr.scale = hw_scale;
3644 	opt_attr.bias = 0;
3645 
3646 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3647 			pipe_ctx->plane_res.dpp, &opt_attr);
3648 }
3649 
3650 /*
3651  * apply_front_porch_workaround  TODO FPGA still need?
3652  *
3653  * This is a workaround for a bug that has existed since R5xx and has not been
3654  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3655  */
3656 static void apply_front_porch_workaround(
3657 	struct dc_crtc_timing *timing)
3658 {
3659 	if (timing->flags.INTERLACE == 1) {
3660 		if (timing->v_front_porch < 2)
3661 			timing->v_front_porch = 2;
3662 	} else {
3663 		if (timing->v_front_porch < 1)
3664 			timing->v_front_porch = 1;
3665 	}
3666 }
3667 
3668 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3669 {
3670 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3671 	struct dc_crtc_timing patched_crtc_timing;
3672 	int vesa_sync_start;
3673 	int asic_blank_end;
3674 	int interlace_factor;
3675 
3676 	patched_crtc_timing = *dc_crtc_timing;
3677 	apply_front_porch_workaround(&patched_crtc_timing);
3678 
3679 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3680 
3681 	vesa_sync_start = patched_crtc_timing.v_addressable +
3682 			patched_crtc_timing.v_border_bottom +
3683 			patched_crtc_timing.v_front_porch;
3684 
3685 	asic_blank_end = (patched_crtc_timing.v_total -
3686 			vesa_sync_start -
3687 			patched_crtc_timing.v_border_top)
3688 			* interlace_factor;
3689 
3690 	return asic_blank_end -
3691 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3692 }
3693 
3694 void dcn10_calc_vupdate_position(
3695 		struct dc *dc,
3696 		struct pipe_ctx *pipe_ctx,
3697 		uint32_t *start_line,
3698 		uint32_t *end_line)
3699 {
3700 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3701 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3702 
3703 	if (vupdate_pos >= 0)
3704 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3705 	else
3706 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3707 	*end_line = (*start_line + 2) % timing->v_total;
3708 }
3709 
3710 static void dcn10_cal_vline_position(
3711 		struct dc *dc,
3712 		struct pipe_ctx *pipe_ctx,
3713 		uint32_t *start_line,
3714 		uint32_t *end_line)
3715 {
3716 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3717 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3718 
3719 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3720 		if (vline_pos > 0)
3721 			vline_pos--;
3722 		else if (vline_pos < 0)
3723 			vline_pos++;
3724 
3725 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3726 		if (vline_pos >= 0)
3727 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3728 		else
3729 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3730 		*end_line = (*start_line + 2) % timing->v_total;
3731 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3732 		// vsync is line 0 so start_line is just the requested line offset
3733 		*start_line = vline_pos;
3734 		*end_line = (*start_line + 2) % timing->v_total;
3735 	} else
3736 		ASSERT(0);
3737 }
3738 
3739 void dcn10_setup_periodic_interrupt(
3740 		struct dc *dc,
3741 		struct pipe_ctx *pipe_ctx)
3742 {
3743 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3744 	uint32_t start_line = 0;
3745 	uint32_t end_line = 0;
3746 
3747 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3748 
3749 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3750 }
3751 
3752 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3753 {
3754 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3755 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3756 
3757 	if (start_line < 0) {
3758 		ASSERT(0);
3759 		start_line = 0;
3760 	}
3761 
3762 	if (tg->funcs->setup_vertical_interrupt2)
3763 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3764 }
3765 
3766 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3767 		struct dc_link_settings *link_settings)
3768 {
3769 	struct encoder_unblank_param params = {0};
3770 	struct dc_stream_state *stream = pipe_ctx->stream;
3771 	struct dc_link *link = stream->link;
3772 	struct dce_hwseq *hws = link->dc->hwseq;
3773 
3774 	/* only 3 items below are used by unblank */
3775 	params.timing = pipe_ctx->stream->timing;
3776 
3777 	params.link_settings.link_rate = link_settings->link_rate;
3778 
3779 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3780 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3781 			params.timing.pix_clk_100hz /= 2;
3782 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3783 	}
3784 
3785 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3786 		hws->funcs.edp_backlight_control(link, true);
3787 	}
3788 }
3789 
3790 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3791 				const uint8_t *custom_sdp_message,
3792 				unsigned int sdp_message_size)
3793 {
3794 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3795 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3796 				pipe_ctx->stream_res.stream_enc,
3797 				custom_sdp_message,
3798 				sdp_message_size);
3799 	}
3800 }
3801 enum dc_status dcn10_set_clock(struct dc *dc,
3802 			enum dc_clock_type clock_type,
3803 			uint32_t clk_khz,
3804 			uint32_t stepping)
3805 {
3806 	struct dc_state *context = dc->current_state;
3807 	struct dc_clock_config clock_cfg = {0};
3808 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3809 
3810 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3811 		return DC_FAIL_UNSUPPORTED_1;
3812 
3813 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3814 		context, clock_type, &clock_cfg);
3815 
3816 	if (clk_khz > clock_cfg.max_clock_khz)
3817 		return DC_FAIL_CLK_EXCEED_MAX;
3818 
3819 	if (clk_khz < clock_cfg.min_clock_khz)
3820 		return DC_FAIL_CLK_BELOW_MIN;
3821 
3822 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3823 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3824 
3825 	/*update internal request clock for update clock use*/
3826 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3827 		current_clocks->dispclk_khz = clk_khz;
3828 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3829 		current_clocks->dppclk_khz = clk_khz;
3830 	else
3831 		return DC_ERROR_UNEXPECTED;
3832 
3833 	if (dc->clk_mgr->funcs->update_clocks)
3834 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3835 				context, true);
3836 	return DC_OK;
3837 
3838 }
3839 
3840 void dcn10_get_clock(struct dc *dc,
3841 			enum dc_clock_type clock_type,
3842 			struct dc_clock_config *clock_cfg)
3843 {
3844 	struct dc_state *context = dc->current_state;
3845 
3846 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3847 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3848 
3849 }
3850 
3851 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3852 {
3853 	struct resource_pool *pool = dc->res_pool;
3854 	int i;
3855 
3856 	for (i = 0; i < pool->pipe_count; i++) {
3857 		struct hubp *hubp = pool->hubps[i];
3858 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3859 
3860 		hubp->funcs->hubp_read_state(hubp);
3861 
3862 		if (!s->blank_en)
3863 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3864 	}
3865 }
3866