xref: /openbmc/linux/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59 
60 #define DC_LOGGER_INIT(logger)
61 
62 #define CTX \
63 	hws->ctx
64 #define REG(reg)\
65 	hws->regs->reg
66 
67 #undef FN
68 #define FN(reg_name, field_name) \
69 	hws->shifts->field_name, hws->masks->field_name
70 
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 	print_microsec(dc_ctx, log_ctx, ref_cycle)
74 
75 #define GAMMA_HW_POINTS_NUM 256
76 
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)80 static void print_microsec(struct dc_context *dc_ctx,
81 			   struct dc_log_buffer_ctx *log_ctx,
82 			   uint32_t ref_cycle)
83 {
84 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 	static const unsigned int frac = 1000;
86 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87 
88 	DTN_INFO("  %11d.%03d",
89 			us_x10 / frac,
90 			us_x10 % frac);
91 }
92 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)93 void dcn10_lock_all_pipes(struct dc *dc,
94 	struct dc_state *context,
95 	bool lock)
96 {
97 	struct pipe_ctx *pipe_ctx;
98 	struct pipe_ctx *old_pipe_ctx;
99 	struct timing_generator *tg;
100 	int i;
101 
102 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 		old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 		tg = pipe_ctx->stream_res.tg;
106 
107 		/*
108 		 * Only lock the top pipe's tg to prevent redundant
109 		 * (un)locking. Also skip if pipe is disabled.
110 		 */
111 		if (pipe_ctx->top_pipe ||
112 		    !pipe_ctx->stream ||
113 		    (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 		    !tg->funcs->is_tg_enabled(tg) ||
115 			pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
116 			continue;
117 
118 		if (lock)
119 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
120 		else
121 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
122 	}
123 }
124 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)125 static void log_mpc_crc(struct dc *dc,
126 	struct dc_log_buffer_ctx *log_ctx)
127 {
128 	struct dc_context *dc_ctx = dc->ctx;
129 	struct dce_hwseq *hws = dc->hwseq;
130 
131 	if (REG(MPC_CRC_RESULT_GB))
132 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
133 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
134 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
135 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
136 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
137 }
138 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)139 static void dcn10_log_hubbub_state(struct dc *dc,
140 				   struct dc_log_buffer_ctx *log_ctx)
141 {
142 	struct dc_context *dc_ctx = dc->ctx;
143 	struct dcn_hubbub_wm wm;
144 	int i;
145 
146 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
147 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
148 
149 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
150 			"         sr_enter          sr_exit  dram_clk_change\n");
151 
152 	for (i = 0; i < 4; i++) {
153 		struct dcn_hubbub_wm_set *s;
154 
155 		s = &wm.sets[i];
156 		DTN_INFO("WM_Set[%d]:", s->wm_set);
157 		DTN_INFO_MICRO_SEC(s->data_urgent);
158 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
159 		DTN_INFO_MICRO_SEC(s->sr_enter);
160 		DTN_INFO_MICRO_SEC(s->sr_exit);
161 		DTN_INFO_MICRO_SEC(s->dram_clk_change);
162 		DTN_INFO("\n");
163 	}
164 
165 	DTN_INFO("\n");
166 }
167 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)168 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
169 {
170 	struct dc_context *dc_ctx = dc->ctx;
171 	struct resource_pool *pool = dc->res_pool;
172 	int i;
173 
174 	DTN_INFO(
175 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
176 	for (i = 0; i < pool->pipe_count; i++) {
177 		struct hubp *hubp = pool->hubps[i];
178 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
179 
180 		hubp->funcs->hubp_read_state(hubp);
181 
182 		if (!s->blank_en) {
183 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
184 					hubp->inst,
185 					s->pixel_format,
186 					s->inuse_addr_hi,
187 					s->viewport_width,
188 					s->viewport_height,
189 					s->rotation_angle,
190 					s->h_mirror_en,
191 					s->sw_mode,
192 					s->dcc_en,
193 					s->blank_en,
194 					s->clock_en,
195 					s->ttu_disable,
196 					s->underflow_status);
197 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
198 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
199 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
200 			DTN_INFO("\n");
201 		}
202 	}
203 
204 	DTN_INFO("\n=========RQ========\n");
205 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
206 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
207 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
208 	for (i = 0; i < pool->pipe_count; i++) {
209 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
210 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
211 
212 		if (!s->blank_en)
213 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
214 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
215 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
216 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
217 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
218 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
219 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
220 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
221 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
222 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
223 	}
224 
225 	DTN_INFO("========DLG========\n");
226 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
227 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
228 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
229 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
230 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
231 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
232 			"  x_rp_dlay  x_rr_sfl\n");
233 	for (i = 0; i < pool->pipe_count; i++) {
234 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
235 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
236 
237 		if (!s->blank_en)
238 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
239 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
240 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
241 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
242 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
243 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
244 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
245 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
246 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
247 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
248 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
249 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
250 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
251 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
252 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
253 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
254 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
255 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
256 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
257 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
258 				dlg_regs->xfc_reg_remote_surface_flip_latency);
259 	}
260 
261 	DTN_INFO("========TTU========\n");
262 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
263 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
264 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
265 	for (i = 0; i < pool->pipe_count; i++) {
266 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
267 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
268 
269 		if (!s->blank_en)
270 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
271 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
272 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
273 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
274 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
275 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
276 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
277 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
278 	}
279 	DTN_INFO("\n");
280 }
281 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)282 void dcn10_log_hw_state(struct dc *dc,
283 	struct dc_log_buffer_ctx *log_ctx)
284 {
285 	struct dc_context *dc_ctx = dc->ctx;
286 	struct resource_pool *pool = dc->res_pool;
287 	int i;
288 
289 	DTN_INFO_BEGIN();
290 
291 	dcn10_log_hubbub_state(dc, log_ctx);
292 
293 	dcn10_log_hubp_states(dc, log_ctx);
294 
295 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
296 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
297 			"C31 C32   C33 C34\n");
298 	for (i = 0; i < pool->pipe_count; i++) {
299 		struct dpp *dpp = pool->dpps[i];
300 		struct dcn_dpp_state s = {0};
301 
302 		dpp->funcs->dpp_read_state(dpp, &s);
303 
304 		if (!s.is_enabled)
305 			continue;
306 
307 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
308 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
309 				dpp->inst,
310 				s.igam_input_format,
311 				(s.igam_lut_mode == 0) ? "BypassFixed" :
312 					((s.igam_lut_mode == 1) ? "BypassFloat" :
313 					((s.igam_lut_mode == 2) ? "RAM" :
314 					((s.igam_lut_mode == 3) ? "RAM" :
315 								 "Unknown"))),
316 				(s.dgam_lut_mode == 0) ? "Bypass" :
317 					((s.dgam_lut_mode == 1) ? "sRGB" :
318 					((s.dgam_lut_mode == 2) ? "Ycc" :
319 					((s.dgam_lut_mode == 3) ? "RAM" :
320 					((s.dgam_lut_mode == 4) ? "RAM" :
321 								 "Unknown")))),
322 				(s.rgam_lut_mode == 0) ? "Bypass" :
323 					((s.rgam_lut_mode == 1) ? "sRGB" :
324 					((s.rgam_lut_mode == 2) ? "Ycc" :
325 					((s.rgam_lut_mode == 3) ? "RAM" :
326 					((s.rgam_lut_mode == 4) ? "RAM" :
327 								 "Unknown")))),
328 				s.gamut_remap_mode,
329 				s.gamut_remap_c11_c12,
330 				s.gamut_remap_c13_c14,
331 				s.gamut_remap_c21_c22,
332 				s.gamut_remap_c23_c24,
333 				s.gamut_remap_c31_c32,
334 				s.gamut_remap_c33_c34);
335 		DTN_INFO("\n");
336 	}
337 	DTN_INFO("\n");
338 
339 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
340 	for (i = 0; i < pool->pipe_count; i++) {
341 		struct mpcc_state s = {0};
342 
343 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
344 		if (s.opp_id != 0xf)
345 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
346 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
347 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
348 				s.idle);
349 	}
350 	DTN_INFO("\n");
351 
352 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
353 
354 	for (i = 0; i < pool->timing_generator_count; i++) {
355 		struct timing_generator *tg = pool->timing_generators[i];
356 		struct dcn_otg_state s = {0};
357 		/* Read shared OTG state registers for all DCNx */
358 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
359 
360 		/*
361 		 * For DCN2 and greater, a register on the OPP is used to
362 		 * determine if the CRTC is blanked instead of the OTG. So use
363 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
364 		 *
365 		 * TODO: Implement DCN-specific read_otg_state hooks.
366 		 */
367 		if (pool->opps[i]->funcs->dpg_is_blanked)
368 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
369 		else
370 			s.blank_enabled = tg->funcs->is_blanked(tg);
371 
372 		//only print if OTG master is enabled
373 		if ((s.otg_enabled & 1) == 0)
374 			continue;
375 
376 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
377 				tg->inst,
378 				s.v_blank_start,
379 				s.v_blank_end,
380 				s.v_sync_a_start,
381 				s.v_sync_a_end,
382 				s.v_sync_a_pol,
383 				s.v_total_max,
384 				s.v_total_min,
385 				s.v_total_max_sel,
386 				s.v_total_min_sel,
387 				s.h_blank_start,
388 				s.h_blank_end,
389 				s.h_sync_a_start,
390 				s.h_sync_a_end,
391 				s.h_sync_a_pol,
392 				s.h_total,
393 				s.v_total,
394 				s.underflow_occurred_status,
395 				s.blank_enabled);
396 
397 		// Clear underflow for debug purposes
398 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
399 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
400 		// it from here without affecting the original intent.
401 		tg->funcs->clear_optc_underflow(tg);
402 	}
403 	DTN_INFO("\n");
404 
405 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
406 	// TODO: Update golden log header to reflect this name change
407 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
408 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
409 		struct display_stream_compressor *dsc = pool->dscs[i];
410 		struct dcn_dsc_state s = {0};
411 
412 		dsc->funcs->dsc_read_state(dsc, &s);
413 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
414 		dsc->inst,
415 			s.dsc_clock_en,
416 			s.dsc_slice_width,
417 			s.dsc_bits_per_pixel);
418 		DTN_INFO("\n");
419 	}
420 	DTN_INFO("\n");
421 
422 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
423 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
424 	for (i = 0; i < pool->stream_enc_count; i++) {
425 		struct stream_encoder *enc = pool->stream_enc[i];
426 		struct enc_state s = {0};
427 
428 		if (enc->funcs->enc_read_state) {
429 			enc->funcs->enc_read_state(enc, &s);
430 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
431 				enc->id,
432 				s.dsc_mode,
433 				s.sec_gsp_pps_line_num,
434 				s.vbid6_line_reference,
435 				s.vbid6_line_num,
436 				s.sec_gsp_pps_enable,
437 				s.sec_stream_enable);
438 			DTN_INFO("\n");
439 		}
440 	}
441 	DTN_INFO("\n");
442 
443 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
444 	for (i = 0; i < dc->link_count; i++) {
445 		struct link_encoder *lenc = dc->links[i]->link_enc;
446 
447 		struct link_enc_state s = {0};
448 
449 		if (lenc && lenc->funcs->read_state) {
450 			lenc->funcs->read_state(lenc, &s);
451 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
452 				i,
453 				s.dphy_fec_en,
454 				s.dphy_fec_ready_shadow,
455 				s.dphy_fec_active_status,
456 				s.dp_link_training_complete);
457 			DTN_INFO("\n");
458 		}
459 	}
460 	DTN_INFO("\n");
461 
462 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
463 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
464 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
465 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
466 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
467 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
468 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
469 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
470 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
471 
472 	log_mpc_crc(dc, log_ctx);
473 
474 	{
475 		if (pool->hpo_dp_stream_enc_count > 0) {
476 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
477 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
478 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
479 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
480 
481 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
482 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
483 
484 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
485 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
486 							hpo_dp_se_state.stream_enc_enabled,
487 							hpo_dp_se_state.otg_inst,
488 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
489 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
490 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
491 							(hpo_dp_se_state.component_depth == 0) ? 6 :
492 									((hpo_dp_se_state.component_depth == 1) ? 8 :
493 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
494 							hpo_dp_se_state.vid_stream_enabled,
495 							hpo_dp_se_state.sdp_enabled,
496 							hpo_dp_se_state.compressed_format,
497 							hpo_dp_se_state.mapped_to_link_enc);
498 				}
499 			}
500 
501 			DTN_INFO("\n");
502 		}
503 
504 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
505 		if (pool->hpo_dp_link_enc_count) {
506 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
507 
508 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
509 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
510 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
511 
512 				if (hpo_dp_link_enc->funcs->read_state) {
513 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
514 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
515 							hpo_dp_link_enc->inst,
516 							hpo_dp_le_state.link_enc_enabled,
517 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
518 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
519 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
520 							hpo_dp_le_state.lane_count,
521 							hpo_dp_le_state.stream_src[0],
522 							hpo_dp_le_state.slot_count[0],
523 							hpo_dp_le_state.vc_rate_x[0],
524 							hpo_dp_le_state.vc_rate_y[0]);
525 					DTN_INFO("\n");
526 				}
527 			}
528 
529 			DTN_INFO("\n");
530 		}
531 	}
532 
533 	DTN_INFO_END();
534 }
535 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)536 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
537 {
538 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
539 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
540 
541 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
542 		tg->funcs->clear_optc_underflow(tg);
543 		return true;
544 	}
545 
546 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
547 		hubp->funcs->hubp_clear_underflow(hubp);
548 		return true;
549 	}
550 	return false;
551 }
552 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)553 void dcn10_enable_power_gating_plane(
554 	struct dce_hwseq *hws,
555 	bool enable)
556 {
557 	bool force_on = true; /* disable power gating */
558 
559 	if (enable)
560 		force_on = false;
561 
562 	/* DCHUBP0/1/2/3 */
563 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
564 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
565 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
566 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
567 
568 	/* DPP0/1/2/3 */
569 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
570 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
571 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
572 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
573 }
574 
dcn10_disable_vga(struct dce_hwseq * hws)575 void dcn10_disable_vga(
576 	struct dce_hwseq *hws)
577 {
578 	unsigned int in_vga1_mode = 0;
579 	unsigned int in_vga2_mode = 0;
580 	unsigned int in_vga3_mode = 0;
581 	unsigned int in_vga4_mode = 0;
582 
583 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
584 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
585 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
586 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
587 
588 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
589 			in_vga3_mode == 0 && in_vga4_mode == 0)
590 		return;
591 
592 	REG_WRITE(D1VGA_CONTROL, 0);
593 	REG_WRITE(D2VGA_CONTROL, 0);
594 	REG_WRITE(D3VGA_CONTROL, 0);
595 	REG_WRITE(D4VGA_CONTROL, 0);
596 
597 	/* HW Engineer's Notes:
598 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
599 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
600 	 *
601 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
602 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
603 	 */
604 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
605 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
606 }
607 
608 /**
609  * dcn10_dpp_pg_control - DPP power gate control.
610  *
611  * @hws: dce_hwseq reference.
612  * @dpp_inst: DPP instance reference.
613  * @power_on: true if we want to enable power gate, false otherwise.
614  *
615  * Enable or disable power gate in the specific DPP instance.
616  */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)617 void dcn10_dpp_pg_control(
618 		struct dce_hwseq *hws,
619 		unsigned int dpp_inst,
620 		bool power_on)
621 {
622 	uint32_t power_gate = power_on ? 0 : 1;
623 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
624 
625 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
626 		return;
627 	if (REG(DOMAIN1_PG_CONFIG) == 0)
628 		return;
629 
630 	switch (dpp_inst) {
631 	case 0: /* DPP0 */
632 		REG_UPDATE(DOMAIN1_PG_CONFIG,
633 				DOMAIN1_POWER_GATE, power_gate);
634 
635 		REG_WAIT(DOMAIN1_PG_STATUS,
636 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
637 				1, 1000);
638 		break;
639 	case 1: /* DPP1 */
640 		REG_UPDATE(DOMAIN3_PG_CONFIG,
641 				DOMAIN3_POWER_GATE, power_gate);
642 
643 		REG_WAIT(DOMAIN3_PG_STATUS,
644 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
645 				1, 1000);
646 		break;
647 	case 2: /* DPP2 */
648 		REG_UPDATE(DOMAIN5_PG_CONFIG,
649 				DOMAIN5_POWER_GATE, power_gate);
650 
651 		REG_WAIT(DOMAIN5_PG_STATUS,
652 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
653 				1, 1000);
654 		break;
655 	case 3: /* DPP3 */
656 		REG_UPDATE(DOMAIN7_PG_CONFIG,
657 				DOMAIN7_POWER_GATE, power_gate);
658 
659 		REG_WAIT(DOMAIN7_PG_STATUS,
660 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
661 				1, 1000);
662 		break;
663 	default:
664 		BREAK_TO_DEBUGGER();
665 		break;
666 	}
667 }
668 
669 /**
670  * dcn10_hubp_pg_control - HUBP power gate control.
671  *
672  * @hws: dce_hwseq reference.
673  * @hubp_inst: DPP instance reference.
674  * @power_on: true if we want to enable power gate, false otherwise.
675  *
676  * Enable or disable power gate in the specific HUBP instance.
677  */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)678 void dcn10_hubp_pg_control(
679 		struct dce_hwseq *hws,
680 		unsigned int hubp_inst,
681 		bool power_on)
682 {
683 	uint32_t power_gate = power_on ? 0 : 1;
684 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
685 
686 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
687 		return;
688 	if (REG(DOMAIN0_PG_CONFIG) == 0)
689 		return;
690 
691 	switch (hubp_inst) {
692 	case 0: /* DCHUBP0 */
693 		REG_UPDATE(DOMAIN0_PG_CONFIG,
694 				DOMAIN0_POWER_GATE, power_gate);
695 
696 		REG_WAIT(DOMAIN0_PG_STATUS,
697 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
698 				1, 1000);
699 		break;
700 	case 1: /* DCHUBP1 */
701 		REG_UPDATE(DOMAIN2_PG_CONFIG,
702 				DOMAIN2_POWER_GATE, power_gate);
703 
704 		REG_WAIT(DOMAIN2_PG_STATUS,
705 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
706 				1, 1000);
707 		break;
708 	case 2: /* DCHUBP2 */
709 		REG_UPDATE(DOMAIN4_PG_CONFIG,
710 				DOMAIN4_POWER_GATE, power_gate);
711 
712 		REG_WAIT(DOMAIN4_PG_STATUS,
713 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
714 				1, 1000);
715 		break;
716 	case 3: /* DCHUBP3 */
717 		REG_UPDATE(DOMAIN6_PG_CONFIG,
718 				DOMAIN6_POWER_GATE, power_gate);
719 
720 		REG_WAIT(DOMAIN6_PG_STATUS,
721 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
722 				1, 1000);
723 		break;
724 	default:
725 		BREAK_TO_DEBUGGER();
726 		break;
727 	}
728 }
729 
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)730 static void power_on_plane_resources(
731 	struct dce_hwseq *hws,
732 	int plane_id)
733 {
734 	DC_LOGGER_INIT(hws->ctx->logger);
735 
736 	if (hws->funcs.dpp_root_clock_control)
737 		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
738 
739 	if (REG(DC_IP_REQUEST_CNTL)) {
740 		REG_SET(DC_IP_REQUEST_CNTL, 0,
741 				IP_REQUEST_EN, 1);
742 
743 		if (hws->funcs.dpp_pg_control)
744 			hws->funcs.dpp_pg_control(hws, plane_id, true);
745 
746 		if (hws->funcs.hubp_pg_control)
747 			hws->funcs.hubp_pg_control(hws, plane_id, true);
748 
749 		REG_SET(DC_IP_REQUEST_CNTL, 0,
750 				IP_REQUEST_EN, 0);
751 		DC_LOG_DEBUG(
752 				"Un-gated front end for pipe %d\n", plane_id);
753 	}
754 }
755 
undo_DEGVIDCN10_253_wa(struct dc * dc)756 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
757 {
758 	struct dce_hwseq *hws = dc->hwseq;
759 	struct hubp *hubp = dc->res_pool->hubps[0];
760 
761 	if (!hws->wa_state.DEGVIDCN10_253_applied)
762 		return;
763 
764 	hubp->funcs->set_blank(hubp, true);
765 
766 	REG_SET(DC_IP_REQUEST_CNTL, 0,
767 			IP_REQUEST_EN, 1);
768 
769 	hws->funcs.hubp_pg_control(hws, 0, false);
770 	REG_SET(DC_IP_REQUEST_CNTL, 0,
771 			IP_REQUEST_EN, 0);
772 
773 	hws->wa_state.DEGVIDCN10_253_applied = false;
774 }
775 
apply_DEGVIDCN10_253_wa(struct dc * dc)776 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
777 {
778 	struct dce_hwseq *hws = dc->hwseq;
779 	struct hubp *hubp = dc->res_pool->hubps[0];
780 	int i;
781 
782 	if (dc->debug.disable_stutter)
783 		return;
784 
785 	if (!hws->wa.DEGVIDCN10_253)
786 		return;
787 
788 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
789 		if (!dc->res_pool->hubps[i]->power_gated)
790 			return;
791 	}
792 
793 	/* all pipe power gated, apply work around to enable stutter. */
794 
795 	REG_SET(DC_IP_REQUEST_CNTL, 0,
796 			IP_REQUEST_EN, 1);
797 
798 	hws->funcs.hubp_pg_control(hws, 0, true);
799 	REG_SET(DC_IP_REQUEST_CNTL, 0,
800 			IP_REQUEST_EN, 0);
801 
802 	hubp->funcs->set_hubp_blank_en(hubp, false);
803 	hws->wa_state.DEGVIDCN10_253_applied = true;
804 }
805 
dcn10_bios_golden_init(struct dc * dc)806 void dcn10_bios_golden_init(struct dc *dc)
807 {
808 	struct dce_hwseq *hws = dc->hwseq;
809 	struct dc_bios *bp = dc->ctx->dc_bios;
810 	int i;
811 	bool allow_self_fresh_force_enable = true;
812 
813 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
814 		return;
815 
816 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
817 		allow_self_fresh_force_enable =
818 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
819 
820 
821 	/* WA for making DF sleep when idle after resume from S0i3.
822 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
823 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
824 	 * before calling command table and it changed to 1 after,
825 	 * it should be set back to 0.
826 	 */
827 
828 	/* initialize dcn global */
829 	bp->funcs->enable_disp_power_gating(bp,
830 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
831 
832 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
833 		/* initialize dcn per pipe */
834 		bp->funcs->enable_disp_power_gating(bp,
835 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
836 	}
837 
838 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
839 		if (allow_self_fresh_force_enable == false &&
840 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
841 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
842 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
843 
844 }
845 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)846 static void false_optc_underflow_wa(
847 		struct dc *dc,
848 		const struct dc_stream_state *stream,
849 		struct timing_generator *tg)
850 {
851 	int i;
852 	bool underflow;
853 
854 	if (!dc->hwseq->wa.false_optc_underflow)
855 		return;
856 
857 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
858 
859 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
860 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
861 
862 		if (old_pipe_ctx->stream != stream)
863 			continue;
864 
865 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
866 	}
867 
868 	if (tg->funcs->set_blank_data_double_buffer)
869 		tg->funcs->set_blank_data_double_buffer(tg, true);
870 
871 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
872 		tg->funcs->clear_optc_underflow(tg);
873 }
874 
calculate_vready_offset_for_group(struct pipe_ctx * pipe)875 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
876 {
877 	struct pipe_ctx *other_pipe;
878 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
879 
880 	/* Always use the largest vready_offset of all connected pipes */
881 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
882 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
883 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
884 	}
885 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
886 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
887 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
888 	}
889 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
890 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
891 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
892 	}
893 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
894 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
895 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
896 	}
897 
898 	return vready_offset;
899 }
900 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)901 enum dc_status dcn10_enable_stream_timing(
902 		struct pipe_ctx *pipe_ctx,
903 		struct dc_state *context,
904 		struct dc *dc)
905 {
906 	struct dc_stream_state *stream = pipe_ctx->stream;
907 	enum dc_color_space color_space;
908 	struct tg_color black_color = {0};
909 
910 	/* by upper caller loop, pipe0 is parent pipe and be called first.
911 	 * back end is set up by for pipe0. Other children pipe share back end
912 	 * with pipe 0. No program is needed.
913 	 */
914 	if (pipe_ctx->top_pipe != NULL)
915 		return DC_OK;
916 
917 	/* TODO check if timing_changed, disable stream if timing changed */
918 
919 	/* HW program guide assume display already disable
920 	 * by unplug sequence. OTG assume stop.
921 	 */
922 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
923 
924 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
925 			pipe_ctx->clock_source,
926 			&pipe_ctx->stream_res.pix_clk_params,
927 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
928 			&pipe_ctx->pll_settings)) {
929 		BREAK_TO_DEBUGGER();
930 		return DC_ERROR_UNEXPECTED;
931 	}
932 
933 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
934 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
935 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
936 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
937 		else
938 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
939 	}
940 
941 	pipe_ctx->stream_res.tg->funcs->program_timing(
942 			pipe_ctx->stream_res.tg,
943 			&stream->timing,
944 			calculate_vready_offset_for_group(pipe_ctx),
945 			pipe_ctx->pipe_dlg_param.vstartup_start,
946 			pipe_ctx->pipe_dlg_param.vupdate_offset,
947 			pipe_ctx->pipe_dlg_param.vupdate_width,
948 			pipe_ctx->stream->signal,
949 			true);
950 
951 #if 0 /* move to after enable_crtc */
952 	/* TODO: OPP FMT, ABM. etc. should be done here. */
953 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
954 
955 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
956 
957 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
958 				pipe_ctx->stream_res.opp,
959 				&stream->bit_depth_params,
960 				&stream->clamping);
961 #endif
962 	/* program otg blank color */
963 	color_space = stream->output_color_space;
964 	color_space_to_black_color(dc, color_space, &black_color);
965 
966 	/*
967 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
968 	 * alternate between Cb and Cr, so both channels need the pixel
969 	 * value for Y
970 	 */
971 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
972 		black_color.color_r_cr = black_color.color_g_y;
973 
974 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
975 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
976 				pipe_ctx->stream_res.tg,
977 				&black_color);
978 
979 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
980 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
981 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
982 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
983 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
984 	}
985 
986 	/* VTG is  within DCHUB command block. DCFCLK is always on */
987 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
988 		BREAK_TO_DEBUGGER();
989 		return DC_ERROR_UNEXPECTED;
990 	}
991 
992 	/* TODO program crtc source select for non-virtual signal*/
993 	/* TODO program FMT */
994 	/* TODO setup link_enc */
995 	/* TODO set stream attributes */
996 	/* TODO program audio */
997 	/* TODO enable stream if timing changed */
998 	/* TODO unblank stream if DP */
999 
1000 	return DC_OK;
1001 }
1002 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1003 static void dcn10_reset_back_end_for_pipe(
1004 		struct dc *dc,
1005 		struct pipe_ctx *pipe_ctx,
1006 		struct dc_state *context)
1007 {
1008 	int i;
1009 	struct dc_link *link;
1010 	DC_LOGGER_INIT(dc->ctx->logger);
1011 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1012 		pipe_ctx->stream = NULL;
1013 		return;
1014 	}
1015 
1016 	link = pipe_ctx->stream->link;
1017 	/* DPMS may already disable or */
1018 	/* dpms_off status is incorrect due to fastboot
1019 	 * feature. When system resume from S4 with second
1020 	 * screen only, the dpms_off would be true but
1021 	 * VBIOS lit up eDP, so check link status too.
1022 	 */
1023 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1024 		dc->link_srv->set_dpms_off(pipe_ctx);
1025 	else if (pipe_ctx->stream_res.audio)
1026 		dc->hwss.disable_audio_stream(pipe_ctx);
1027 
1028 	if (pipe_ctx->stream_res.audio) {
1029 		/*disable az_endpoint*/
1030 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1031 
1032 		/*free audio*/
1033 		if (dc->caps.dynamic_audio == true) {
1034 			/*we have to dynamic arbitrate the audio endpoints*/
1035 			/*we free the resource, need reset is_audio_acquired*/
1036 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1037 					pipe_ctx->stream_res.audio, false);
1038 			pipe_ctx->stream_res.audio = NULL;
1039 		}
1040 	}
1041 
1042 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1043 	 * back end share by all pipes and will be disable only when disable
1044 	 * parent pipe.
1045 	 */
1046 	if (pipe_ctx->top_pipe == NULL) {
1047 
1048 		if (pipe_ctx->stream_res.abm)
1049 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1050 
1051 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1052 
1053 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1054 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1055 			pipe_ctx->stream_res.tg->funcs->set_drr(
1056 					pipe_ctx->stream_res.tg, NULL);
1057 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1058 			pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1059 	}
1060 
1061 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1062 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1063 			break;
1064 
1065 	if (i == dc->res_pool->pipe_count)
1066 		return;
1067 
1068 	pipe_ctx->stream = NULL;
1069 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1070 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1071 }
1072 
dcn10_hw_wa_force_recovery(struct dc * dc)1073 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1074 {
1075 	struct hubp *hubp ;
1076 	unsigned int i;
1077 	bool need_recover = true;
1078 
1079 	if (!dc->debug.recovery_enabled)
1080 		return false;
1081 
1082 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1083 		struct pipe_ctx *pipe_ctx =
1084 			&dc->current_state->res_ctx.pipe_ctx[i];
1085 		if (pipe_ctx != NULL) {
1086 			hubp = pipe_ctx->plane_res.hubp;
1087 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1088 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1089 					/* one pipe underflow, we will reset all the pipes*/
1090 					need_recover = true;
1091 				}
1092 			}
1093 		}
1094 	}
1095 	if (!need_recover)
1096 		return false;
1097 	/*
1098 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1099 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1100 	DCHUBP_CNTL:HUBP_DISABLE=1
1101 	DCHUBP_CNTL:HUBP_DISABLE=0
1102 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1103 	DCSURF_PRIMARY_SURFACE_ADDRESS
1104 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1105 	*/
1106 
1107 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1108 		struct pipe_ctx *pipe_ctx =
1109 			&dc->current_state->res_ctx.pipe_ctx[i];
1110 		if (pipe_ctx != NULL) {
1111 			hubp = pipe_ctx->plane_res.hubp;
1112 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1113 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1114 				hubp->funcs->set_hubp_blank_en(hubp, true);
1115 		}
1116 	}
1117 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1118 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1119 
1120 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1121 		struct pipe_ctx *pipe_ctx =
1122 			&dc->current_state->res_ctx.pipe_ctx[i];
1123 		if (pipe_ctx != NULL) {
1124 			hubp = pipe_ctx->plane_res.hubp;
1125 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1126 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1127 				hubp->funcs->hubp_disable_control(hubp, true);
1128 		}
1129 	}
1130 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1131 		struct pipe_ctx *pipe_ctx =
1132 			&dc->current_state->res_ctx.pipe_ctx[i];
1133 		if (pipe_ctx != NULL) {
1134 			hubp = pipe_ctx->plane_res.hubp;
1135 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1136 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1137 				hubp->funcs->hubp_disable_control(hubp, true);
1138 		}
1139 	}
1140 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1141 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1142 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1143 		struct pipe_ctx *pipe_ctx =
1144 			&dc->current_state->res_ctx.pipe_ctx[i];
1145 		if (pipe_ctx != NULL) {
1146 			hubp = pipe_ctx->plane_res.hubp;
1147 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1148 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1149 				hubp->funcs->set_hubp_blank_en(hubp, true);
1150 		}
1151 	}
1152 	return true;
1153 
1154 }
1155 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1156 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1157 {
1158 	struct hubbub *hubbub = dc->res_pool->hubbub;
1159 	static bool should_log_hw_state; /* prevent hw state log by default */
1160 
1161 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1162 		return;
1163 
1164 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1165 		int i = 0;
1166 
1167 		if (should_log_hw_state)
1168 			dcn10_log_hw_state(dc, NULL);
1169 
1170 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1171 		BREAK_TO_DEBUGGER();
1172 		if (dcn10_hw_wa_force_recovery(dc)) {
1173 			/*check again*/
1174 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1175 				BREAK_TO_DEBUGGER();
1176 		}
1177 	}
1178 }
1179 
1180 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1181 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1182 {
1183 	struct dce_hwseq *hws = dc->hwseq;
1184 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1185 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1186 	struct mpc *mpc = dc->res_pool->mpc;
1187 	struct mpc_tree *mpc_tree_params;
1188 	struct mpcc *mpcc_to_remove = NULL;
1189 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1190 
1191 	mpc_tree_params = &(opp->mpc_tree_params);
1192 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1193 
1194 	/*Already reset*/
1195 	if (mpcc_to_remove == NULL)
1196 		return;
1197 
1198 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1199 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1200 	// so don't wait for MPCC_IDLE in the programming sequence
1201 	if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1202 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1203 
1204 	dc->optimized_required = true;
1205 
1206 	if (hubp->funcs->hubp_disconnect)
1207 		hubp->funcs->hubp_disconnect(hubp);
1208 
1209 	if (dc->debug.sanity_checks)
1210 		hws->funcs.verify_allow_pstate_change_high(dc);
1211 }
1212 
1213 /**
1214  * dcn10_plane_atomic_power_down - Power down plane components.
1215  *
1216  * @dc: dc struct reference. used for grab hwseq.
1217  * @dpp: dpp struct reference.
1218  * @hubp: hubp struct reference.
1219  *
1220  * Keep in mind that this operation requires a power gate configuration;
1221  * however, requests for switch power gate are precisely controlled to avoid
1222  * problems. For this reason, power gate request is usually disabled. This
1223  * function first needs to enable the power gate request before disabling DPP
1224  * and HUBP. Finally, it disables the power gate request again.
1225  */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1226 void dcn10_plane_atomic_power_down(struct dc *dc,
1227 		struct dpp *dpp,
1228 		struct hubp *hubp)
1229 {
1230 	struct dce_hwseq *hws = dc->hwseq;
1231 	DC_LOGGER_INIT(dc->ctx->logger);
1232 
1233 	if (REG(DC_IP_REQUEST_CNTL)) {
1234 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1235 				IP_REQUEST_EN, 1);
1236 
1237 		if (hws->funcs.dpp_pg_control)
1238 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1239 
1240 		if (hws->funcs.hubp_pg_control)
1241 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1242 
1243 		dpp->funcs->dpp_reset(dpp);
1244 
1245 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1246 				IP_REQUEST_EN, 0);
1247 		DC_LOG_DEBUG(
1248 				"Power gated front end %d\n", hubp->inst);
1249 	}
1250 
1251 	if (hws->funcs.dpp_root_clock_control)
1252 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1253 }
1254 
1255 /* disable HW used by plane.
1256  * note:  cannot disable until disconnect is complete
1257  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1258 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1259 {
1260 	struct dce_hwseq *hws = dc->hwseq;
1261 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1262 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1263 	int opp_id = hubp->opp_id;
1264 
1265 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1266 
1267 	hubp->funcs->hubp_clk_cntl(hubp, false);
1268 
1269 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1270 
1271 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1272 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1273 				pipe_ctx->stream_res.opp,
1274 				false);
1275 
1276 	hubp->power_gated = true;
1277 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1278 
1279 	hws->funcs.plane_atomic_power_down(dc,
1280 			pipe_ctx->plane_res.dpp,
1281 			pipe_ctx->plane_res.hubp);
1282 
1283 	pipe_ctx->stream = NULL;
1284 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1285 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1286 	pipe_ctx->top_pipe = NULL;
1287 	pipe_ctx->bottom_pipe = NULL;
1288 	pipe_ctx->plane_state = NULL;
1289 }
1290 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1291 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1292 {
1293 	struct dce_hwseq *hws = dc->hwseq;
1294 	DC_LOGGER_INIT(dc->ctx->logger);
1295 
1296 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1297 		return;
1298 
1299 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1300 
1301 	apply_DEGVIDCN10_253_wa(dc);
1302 
1303 	DC_LOG_DC("Power down front end %d\n",
1304 					pipe_ctx->pipe_idx);
1305 }
1306 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1307 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1308 {
1309 	int i;
1310 	struct dce_hwseq *hws = dc->hwseq;
1311 	struct hubbub *hubbub = dc->res_pool->hubbub;
1312 	bool can_apply_seamless_boot = false;
1313 
1314 	for (i = 0; i < context->stream_count; i++) {
1315 		if (context->streams[i]->apply_seamless_boot_optimization) {
1316 			can_apply_seamless_boot = true;
1317 			break;
1318 		}
1319 	}
1320 
1321 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1322 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1323 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1324 
1325 		/* There is assumption that pipe_ctx is not mapping irregularly
1326 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1327 		 * we will use the pipe, so don't disable
1328 		 */
1329 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1330 			continue;
1331 
1332 		/* Blank controller using driver code instead of
1333 		 * command table.
1334 		 */
1335 		if (tg->funcs->is_tg_enabled(tg)) {
1336 			if (hws->funcs.init_blank != NULL) {
1337 				hws->funcs.init_blank(dc, tg);
1338 				tg->funcs->lock(tg);
1339 			} else {
1340 				tg->funcs->lock(tg);
1341 				tg->funcs->set_blank(tg, true);
1342 				hwss_wait_for_blank_complete(tg);
1343 			}
1344 		}
1345 	}
1346 
1347 	/* Reset det size */
1348 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1349 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1350 		struct hubp *hubp = dc->res_pool->hubps[i];
1351 
1352 		/* Do not need to reset for seamless boot */
1353 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1354 			continue;
1355 
1356 		if (hubbub && hubp) {
1357 			if (hubbub->funcs->program_det_size)
1358 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1359 		}
1360 	}
1361 
1362 	/* num_opp will be equal to number of mpcc */
1363 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1364 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1365 
1366 		/* Cannot reset the MPC mux if seamless boot */
1367 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1368 			continue;
1369 
1370 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1371 				dc->res_pool->mpc, i);
1372 	}
1373 
1374 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1375 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1376 		struct hubp *hubp = dc->res_pool->hubps[i];
1377 		struct dpp *dpp = dc->res_pool->dpps[i];
1378 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1379 
1380 		/* There is assumption that pipe_ctx is not mapping irregularly
1381 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1382 		 * we will use the pipe, so don't disable
1383 		 */
1384 		if (can_apply_seamless_boot &&
1385 			pipe_ctx->stream != NULL &&
1386 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1387 				pipe_ctx->stream_res.tg)) {
1388 			// Enable double buffering for OTG_BLANK no matter if
1389 			// seamless boot is enabled or not to suppress global sync
1390 			// signals when OTG blanked. This is to prevent pipe from
1391 			// requesting data while in PSR.
1392 			tg->funcs->tg_init(tg);
1393 			hubp->power_gated = true;
1394 			continue;
1395 		}
1396 
1397 		/* Disable on the current state so the new one isn't cleared. */
1398 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1399 
1400 		dpp->funcs->dpp_reset(dpp);
1401 
1402 		pipe_ctx->stream_res.tg = tg;
1403 		pipe_ctx->pipe_idx = i;
1404 
1405 		pipe_ctx->plane_res.hubp = hubp;
1406 		pipe_ctx->plane_res.dpp = dpp;
1407 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1408 		hubp->mpcc_id = dpp->inst;
1409 		hubp->opp_id = OPP_ID_INVALID;
1410 		hubp->power_gated = false;
1411 
1412 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1413 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1414 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1415 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1416 
1417 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1418 
1419 		if (tg->funcs->is_tg_enabled(tg))
1420 			tg->funcs->unlock(tg);
1421 
1422 		dc->hwss.disable_plane(dc, pipe_ctx);
1423 
1424 		pipe_ctx->stream_res.tg = NULL;
1425 		pipe_ctx->plane_res.hubp = NULL;
1426 
1427 		if (tg->funcs->is_tg_enabled(tg)) {
1428 			if (tg->funcs->init_odm)
1429 				tg->funcs->init_odm(tg);
1430 		}
1431 
1432 		tg->funcs->tg_init(tg);
1433 	}
1434 
1435 	/* Power gate DSCs */
1436 	if (hws->funcs.dsc_pg_control != NULL) {
1437 		uint32_t num_opps = 0;
1438 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1439 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1440 
1441 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1442 		// We can't use res_pool->res_cap->num_timing_generator to check
1443 		// Because it records display pipes default setting built in driver,
1444 		// not display pipes of the current chip.
1445 		// Some ASICs would be fused display pipes less than the default setting.
1446 		// In dcnxx_resource_construct function, driver would obatin real information.
1447 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1448 			uint32_t optc_dsc_state = 0;
1449 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1450 
1451 			if (tg->funcs->is_tg_enabled(tg)) {
1452 				if (tg->funcs->get_dsc_status)
1453 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1454 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1455 				// non-zero value is DSC enabled
1456 				if (optc_dsc_state != 0) {
1457 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1458 					break;
1459 				}
1460 			}
1461 		}
1462 
1463 		// Step 2: To power down DSC but skip DSC  of running OPTC
1464 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1465 			struct dcn_dsc_state s  = {0};
1466 
1467 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1468 
1469 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1470 				s.dsc_clock_en && s.dsc_fw_en)
1471 				continue;
1472 
1473 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1474 		}
1475 	}
1476 }
1477 
dcn10_init_hw(struct dc * dc)1478 void dcn10_init_hw(struct dc *dc)
1479 {
1480 	int i;
1481 	struct abm *abm = dc->res_pool->abm;
1482 	struct dmcu *dmcu = dc->res_pool->dmcu;
1483 	struct dce_hwseq *hws = dc->hwseq;
1484 	struct dc_bios *dcb = dc->ctx->dc_bios;
1485 	struct resource_pool *res_pool = dc->res_pool;
1486 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1487 	bool   is_optimized_init_done = false;
1488 
1489 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1490 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1491 
1492 	/* Align bw context with hw config when system resume. */
1493 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1494 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1495 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1496 	}
1497 
1498 	// Initialize the dccg
1499 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1500 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1501 
1502 	if (!dcb->funcs->is_accelerated_mode(dcb))
1503 		hws->funcs.disable_vga(dc->hwseq);
1504 
1505 	if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1506 		hws->funcs.bios_golden_init(dc);
1507 
1508 
1509 	if (dc->ctx->dc_bios->fw_info_valid) {
1510 		res_pool->ref_clocks.xtalin_clock_inKhz =
1511 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1512 
1513 		if (res_pool->dccg && res_pool->hubbub) {
1514 
1515 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1516 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1517 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1518 
1519 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1520 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
1521 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1522 		} else {
1523 			// Not all ASICs have DCCG sw component
1524 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
1525 					res_pool->ref_clocks.xtalin_clock_inKhz;
1526 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
1527 					res_pool->ref_clocks.xtalin_clock_inKhz;
1528 		}
1529 	} else
1530 		ASSERT_CRITICAL(false);
1531 
1532 	for (i = 0; i < dc->link_count; i++) {
1533 		/* Power up AND update implementation according to the
1534 		 * required signal (which may be different from the
1535 		 * default signal on connector).
1536 		 */
1537 		struct dc_link *link = dc->links[i];
1538 
1539 		if (!is_optimized_init_done)
1540 			link->link_enc->funcs->hw_init(link->link_enc);
1541 
1542 		/* Check for enabled DIG to identify enabled display */
1543 		if (link->link_enc->funcs->is_dig_enabled &&
1544 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1545 			link->link_status.link_active = true;
1546 			if (link->link_enc->funcs->fec_is_active &&
1547 					link->link_enc->funcs->fec_is_active(link->link_enc))
1548 				link->fec_state = dc_link_fec_enabled;
1549 		}
1550 	}
1551 
1552 	/* we want to turn off all dp displays before doing detection */
1553 	dc->link_srv->blank_all_dp_displays(dc);
1554 
1555 	if (hws->funcs.enable_power_gating_plane)
1556 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1557 
1558 	/* If taking control over from VBIOS, we may want to optimize our first
1559 	 * mode set, so we need to skip powering down pipes until we know which
1560 	 * pipes we want to use.
1561 	 * Otherwise, if taking control is not possible, we need to power
1562 	 * everything down.
1563 	 */
1564 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1565 		if (!is_optimized_init_done) {
1566 			hws->funcs.init_pipes(dc, dc->current_state);
1567 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1568 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1569 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1570 		}
1571 	}
1572 
1573 	if (!is_optimized_init_done) {
1574 
1575 		for (i = 0; i < res_pool->audio_count; i++) {
1576 			struct audio *audio = res_pool->audios[i];
1577 
1578 			audio->funcs->hw_init(audio);
1579 		}
1580 
1581 		for (i = 0; i < dc->link_count; i++) {
1582 			struct dc_link *link = dc->links[i];
1583 
1584 			if (link->panel_cntl)
1585 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1586 		}
1587 
1588 		if (abm != NULL)
1589 			abm->funcs->abm_init(abm, backlight);
1590 
1591 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1592 			dmcu->funcs->dmcu_init(dmcu);
1593 	}
1594 
1595 	if (abm != NULL && dmcu != NULL)
1596 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1597 
1598 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1599 	if (!is_optimized_init_done)
1600 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1601 
1602 	if (!dc->debug.disable_clock_gate) {
1603 		/* enable all DCN clock gating */
1604 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1605 
1606 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1607 
1608 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1609 	}
1610 
1611 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1612 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1613 }
1614 
1615 /* In headless boot cases, DIG may be turned
1616  * on which causes HW/SW discrepancies.
1617  * To avoid this, power down hardware on boot
1618  * if DIG is turned on
1619  */
dcn10_power_down_on_boot(struct dc * dc)1620 void dcn10_power_down_on_boot(struct dc *dc)
1621 {
1622 	struct dc_link *edp_links[MAX_NUM_EDP];
1623 	struct dc_link *edp_link = NULL;
1624 	int edp_num;
1625 	int i = 0;
1626 
1627 	dc_get_edp_links(dc, edp_links, &edp_num);
1628 	if (edp_num)
1629 		edp_link = edp_links[0];
1630 
1631 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1632 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1633 			dc->hwseq->funcs.edp_backlight_control &&
1634 			dc->hwss.power_down &&
1635 			dc->hwss.edp_power_control) {
1636 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1637 		dc->hwss.power_down(dc);
1638 		dc->hwss.edp_power_control(edp_link, false);
1639 	} else {
1640 		for (i = 0; i < dc->link_count; i++) {
1641 			struct dc_link *link = dc->links[i];
1642 
1643 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1644 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1645 					dc->hwss.power_down) {
1646 				dc->hwss.power_down(dc);
1647 				break;
1648 			}
1649 
1650 		}
1651 	}
1652 
1653 	/*
1654 	 * Call update_clocks with empty context
1655 	 * to send DISPLAY_OFF
1656 	 * Otherwise DISPLAY_OFF may not be asserted
1657 	 */
1658 	if (dc->clk_mgr->funcs->set_low_power_state)
1659 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1660 }
1661 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1662 void dcn10_reset_hw_ctx_wrap(
1663 		struct dc *dc,
1664 		struct dc_state *context)
1665 {
1666 	int i;
1667 	struct dce_hwseq *hws = dc->hwseq;
1668 
1669 	/* Reset Back End*/
1670 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1671 		struct pipe_ctx *pipe_ctx_old =
1672 			&dc->current_state->res_ctx.pipe_ctx[i];
1673 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1674 
1675 		if (!pipe_ctx_old->stream)
1676 			continue;
1677 
1678 		if (pipe_ctx_old->top_pipe)
1679 			continue;
1680 
1681 		if (!pipe_ctx->stream ||
1682 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1683 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1684 
1685 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1686 			if (hws->funcs.enable_stream_gating)
1687 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1688 			if (old_clk)
1689 				old_clk->funcs->cs_power_down(old_clk);
1690 		}
1691 	}
1692 }
1693 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1694 static bool patch_address_for_sbs_tb_stereo(
1695 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1696 {
1697 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1698 	bool sec_split = pipe_ctx->top_pipe &&
1699 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1700 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1701 		(pipe_ctx->stream->timing.timing_3d_format ==
1702 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1703 		 pipe_ctx->stream->timing.timing_3d_format ==
1704 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1705 		*addr = plane_state->address.grph_stereo.left_addr;
1706 		plane_state->address.grph_stereo.left_addr =
1707 		plane_state->address.grph_stereo.right_addr;
1708 		return true;
1709 	} else {
1710 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1711 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1712 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1713 			plane_state->address.grph_stereo.right_addr =
1714 			plane_state->address.grph_stereo.left_addr;
1715 			plane_state->address.grph_stereo.right_meta_addr =
1716 			plane_state->address.grph_stereo.left_meta_addr;
1717 		}
1718 	}
1719 	return false;
1720 }
1721 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1722 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1723 {
1724 	bool addr_patched = false;
1725 	PHYSICAL_ADDRESS_LOC addr;
1726 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1727 
1728 	if (plane_state == NULL)
1729 		return;
1730 
1731 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1732 
1733 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1734 			pipe_ctx->plane_res.hubp,
1735 			&plane_state->address,
1736 			plane_state->flip_immediate);
1737 
1738 	plane_state->status.requested_address = plane_state->address;
1739 
1740 	if (plane_state->flip_immediate)
1741 		plane_state->status.current_address = plane_state->address;
1742 
1743 	if (addr_patched)
1744 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1745 }
1746 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1747 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1748 			const struct dc_plane_state *plane_state)
1749 {
1750 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1751 	const struct dc_transfer_func *tf = NULL;
1752 	bool result = true;
1753 
1754 	if (dpp_base == NULL)
1755 		return false;
1756 
1757 	if (plane_state->in_transfer_func)
1758 		tf = plane_state->in_transfer_func;
1759 
1760 	if (plane_state->gamma_correction &&
1761 		!dpp_base->ctx->dc->debug.always_use_regamma
1762 		&& !plane_state->gamma_correction->is_identity
1763 			&& dce_use_lut(plane_state->format))
1764 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1765 
1766 	if (tf == NULL)
1767 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1768 	else if (tf->type == TF_TYPE_PREDEFINED) {
1769 		switch (tf->tf) {
1770 		case TRANSFER_FUNCTION_SRGB:
1771 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1772 			break;
1773 		case TRANSFER_FUNCTION_BT709:
1774 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1775 			break;
1776 		case TRANSFER_FUNCTION_LINEAR:
1777 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1778 			break;
1779 		case TRANSFER_FUNCTION_PQ:
1780 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1781 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1782 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1783 			result = true;
1784 			break;
1785 		default:
1786 			result = false;
1787 			break;
1788 		}
1789 	} else if (tf->type == TF_TYPE_BYPASS) {
1790 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1791 	} else {
1792 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1793 					&dpp_base->degamma_params);
1794 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1795 				&dpp_base->degamma_params);
1796 		result = true;
1797 	}
1798 
1799 	return result;
1800 }
1801 
1802 #define MAX_NUM_HW_POINTS 0x200
1803 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1804 static void log_tf(struct dc_context *ctx,
1805 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1806 {
1807 	// DC_LOG_GAMMA is default logging of all hw points
1808 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1809 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1810 	int i = 0;
1811 
1812 	DC_LOGGER_INIT(ctx->logger);
1813 	DC_LOG_GAMMA("Gamma Correction TF");
1814 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1815 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1816 
1817 	for (i = 0; i < hw_points_num; i++) {
1818 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1819 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1820 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1821 	}
1822 
1823 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1824 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1825 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1826 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1827 	}
1828 }
1829 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1830 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1831 				const struct dc_stream_state *stream)
1832 {
1833 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1834 
1835 	if (!stream)
1836 		return false;
1837 
1838 	if (dpp == NULL)
1839 		return false;
1840 
1841 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1842 
1843 	if (stream->out_transfer_func &&
1844 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1845 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1846 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1847 
1848 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1849 	 * update.
1850 	 */
1851 	else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1852 			stream->out_transfer_func,
1853 			&dpp->regamma_params, false)) {
1854 		dpp->funcs->dpp_program_regamma_pwl(
1855 				dpp,
1856 				&dpp->regamma_params, OPP_REGAMMA_USER);
1857 	} else
1858 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1859 
1860 	if (stream->ctx &&
1861 	    stream->out_transfer_func) {
1862 		log_tf(stream->ctx,
1863 				stream->out_transfer_func,
1864 				dpp->regamma_params.hw_points_num);
1865 	}
1866 
1867 	return true;
1868 }
1869 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1870 void dcn10_pipe_control_lock(
1871 	struct dc *dc,
1872 	struct pipe_ctx *pipe,
1873 	bool lock)
1874 {
1875 	struct dce_hwseq *hws = dc->hwseq;
1876 
1877 	/* use TG master update lock to lock everything on the TG
1878 	 * therefore only top pipe need to lock
1879 	 */
1880 	if (!pipe || pipe->top_pipe)
1881 		return;
1882 
1883 	if (dc->debug.sanity_checks)
1884 		hws->funcs.verify_allow_pstate_change_high(dc);
1885 
1886 	if (lock)
1887 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1888 	else
1889 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1890 
1891 	if (dc->debug.sanity_checks)
1892 		hws->funcs.verify_allow_pstate_change_high(dc);
1893 }
1894 
1895 /**
1896  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1897  *
1898  * Software keepout workaround to prevent cursor update locking from stalling
1899  * out cursor updates indefinitely or from old values from being retained in
1900  * the case where the viewport changes in the same frame as the cursor.
1901  *
1902  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1903  * too close to VUPDATE, then stall out until VUPDATE finishes.
1904  *
1905  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1906  *       to avoid the need for this workaround.
1907  *
1908  * @dc: Current DC state
1909  * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1910  *
1911  * Return: void
1912  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1913 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1914 {
1915 	struct dc_stream_state *stream = pipe_ctx->stream;
1916 	struct crtc_position position;
1917 	uint32_t vupdate_start, vupdate_end;
1918 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1919 	unsigned int us_per_line, us_vupdate;
1920 
1921 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1922 		return;
1923 
1924 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1925 		return;
1926 
1927 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1928 				       &vupdate_end);
1929 
1930 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1931 	vpos = position.vertical_count;
1932 
1933 	if (vpos <= vupdate_start) {
1934 		/* VPOS is in VACTIVE or back porch. */
1935 		lines_to_vupdate = vupdate_start - vpos;
1936 	} else {
1937 		lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
1938 	}
1939 
1940 	/* Calculate time until VUPDATE in microseconds. */
1941 	us_per_line =
1942 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1943 	us_to_vupdate = lines_to_vupdate * us_per_line;
1944 
1945 	/* Stall out until the cursor update completes. */
1946 	if (vupdate_end < vupdate_start)
1947 		vupdate_end += stream->timing.v_total;
1948 
1949 	/* Position is in the range of vupdate start and end*/
1950 	if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
1951 		us_to_vupdate = 0;
1952 
1953 	/* 70 us is a conservative estimate of cursor update time*/
1954 	if (us_to_vupdate > 70)
1955 		return;
1956 
1957 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1958 	udelay(us_to_vupdate + us_vupdate);
1959 }
1960 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1961 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1962 {
1963 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1964 	if (!pipe || pipe->top_pipe)
1965 		return;
1966 
1967 	/* Prevent cursor lock from stalling out cursor updates. */
1968 	if (lock)
1969 		delay_cursor_until_vupdate(dc, pipe);
1970 
1971 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1972 		union dmub_hw_lock_flags hw_locks = { 0 };
1973 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1974 
1975 		hw_locks.bits.lock_cursor = 1;
1976 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1977 
1978 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1979 					lock,
1980 					&hw_locks,
1981 					&inst_flags);
1982 	} else
1983 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1984 				pipe->stream_res.opp->inst, lock);
1985 }
1986 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1987 static bool wait_for_reset_trigger_to_occur(
1988 	struct dc_context *dc_ctx,
1989 	struct timing_generator *tg)
1990 {
1991 	bool rc = false;
1992 
1993 	/* To avoid endless loop we wait at most
1994 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1995 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1996 	int i;
1997 
1998 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1999 
2000 		if (!tg->funcs->is_counter_moving(tg)) {
2001 			DC_ERROR("TG counter is not moving!\n");
2002 			break;
2003 		}
2004 
2005 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2006 			rc = true;
2007 			/* usually occurs at i=1 */
2008 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2009 					i);
2010 			break;
2011 		}
2012 
2013 		/* Wait for one frame. */
2014 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2015 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2016 	}
2017 
2018 	if (false == rc)
2019 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2020 
2021 	return rc;
2022 }
2023 
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2024 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2025 				      uint64_t *denominator,
2026 				      bool checkUint32Bounary)
2027 {
2028 	int i;
2029 	bool ret = checkUint32Bounary == false;
2030 	uint64_t max_int32 = 0xffffffff;
2031 	uint64_t num, denom;
2032 	static const uint16_t prime_numbers[] = {
2033 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2034 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2035 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2036 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2037 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2038 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2039 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2040 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2041 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2042 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2043 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2044 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2045 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2046 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2047 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2048 	int count = ARRAY_SIZE(prime_numbers);
2049 
2050 	num = *numerator;
2051 	denom = *denominator;
2052 	for (i = 0; i < count; i++) {
2053 		uint32_t num_remainder, denom_remainder;
2054 		uint64_t num_result, denom_result;
2055 		if (checkUint32Bounary &&
2056 			num <= max_int32 && denom <= max_int32) {
2057 			ret = true;
2058 			break;
2059 		}
2060 		do {
2061 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2062 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2063 			if (num_remainder == 0 && denom_remainder == 0) {
2064 				num = num_result;
2065 				denom = denom_result;
2066 			}
2067 		} while (num_remainder == 0 && denom_remainder == 0);
2068 	}
2069 	*numerator = num;
2070 	*denominator = denom;
2071 	return ret;
2072 }
2073 
is_low_refresh_rate(struct pipe_ctx * pipe)2074 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2075 {
2076 	uint32_t master_pipe_refresh_rate =
2077 		pipe->stream->timing.pix_clk_100hz * 100 /
2078 		pipe->stream->timing.h_total /
2079 		pipe->stream->timing.v_total;
2080 	return master_pipe_refresh_rate <= 30;
2081 }
2082 
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2083 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2084 				 bool account_low_refresh_rate)
2085 {
2086 	uint32_t clock_divider = 1;
2087 	uint32_t numpipes = 1;
2088 
2089 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2090 		clock_divider *= 2;
2091 
2092 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2093 		clock_divider *= 2;
2094 
2095 	while (pipe->next_odm_pipe) {
2096 		pipe = pipe->next_odm_pipe;
2097 		numpipes++;
2098 	}
2099 	clock_divider *= numpipes;
2100 
2101 	return clock_divider;
2102 }
2103 
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2104 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2105 				    struct pipe_ctx *grouped_pipes[])
2106 {
2107 	struct dc_context *dc_ctx = dc->ctx;
2108 	int i, master = -1, embedded = -1;
2109 	struct dc_crtc_timing *hw_crtc_timing;
2110 	uint64_t phase[MAX_PIPES];
2111 	uint64_t modulo[MAX_PIPES];
2112 	unsigned int pclk;
2113 
2114 	uint32_t embedded_pix_clk_100hz;
2115 	uint16_t embedded_h_total;
2116 	uint16_t embedded_v_total;
2117 	uint32_t dp_ref_clk_100hz =
2118 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2119 
2120 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2121 	if (!hw_crtc_timing)
2122 		return master;
2123 
2124 	if (dc->config.vblank_alignment_dto_params &&
2125 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2126 		embedded_h_total =
2127 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2128 		embedded_v_total =
2129 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2130 		embedded_pix_clk_100hz =
2131 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2132 
2133 		for (i = 0; i < group_size; i++) {
2134 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2135 					grouped_pipes[i]->stream_res.tg,
2136 					&hw_crtc_timing[i]);
2137 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2138 				dc->res_pool->dp_clock_source,
2139 				grouped_pipes[i]->stream_res.tg->inst,
2140 				&pclk);
2141 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2142 			if (dc_is_embedded_signal(
2143 					grouped_pipes[i]->stream->signal)) {
2144 				embedded = i;
2145 				master = i;
2146 				phase[i] = embedded_pix_clk_100hz*100;
2147 				modulo[i] = dp_ref_clk_100hz*100;
2148 			} else {
2149 
2150 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2151 					hw_crtc_timing[i].h_total*
2152 					hw_crtc_timing[i].v_total;
2153 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2154 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2155 					embedded_h_total*
2156 					embedded_v_total;
2157 
2158 				if (reduceSizeAndFraction(&phase[i],
2159 						&modulo[i], true) == false) {
2160 					/*
2161 					 * this will help to stop reporting
2162 					 * this timing synchronizable
2163 					 */
2164 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2165 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2166 				}
2167 			}
2168 		}
2169 
2170 		for (i = 0; i < group_size; i++) {
2171 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2172 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2173 					dc->res_pool->dp_clock_source,
2174 					grouped_pipes[i]->stream_res.tg->inst,
2175 					phase[i], modulo[i]);
2176 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2177 					dc->res_pool->dp_clock_source,
2178 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2179 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2180 					pclk*get_clock_divider(grouped_pipes[i], false);
2181 				if (master == -1)
2182 					master = i;
2183 			}
2184 		}
2185 
2186 	}
2187 
2188 	kfree(hw_crtc_timing);
2189 	return master;
2190 }
2191 
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2192 void dcn10_enable_vblanks_synchronization(
2193 	struct dc *dc,
2194 	int group_index,
2195 	int group_size,
2196 	struct pipe_ctx *grouped_pipes[])
2197 {
2198 	struct dc_context *dc_ctx = dc->ctx;
2199 	struct output_pixel_processor *opp;
2200 	struct timing_generator *tg;
2201 	int i, width, height, master;
2202 
2203 	for (i = 1; i < group_size; i++) {
2204 		opp = grouped_pipes[i]->stream_res.opp;
2205 		tg = grouped_pipes[i]->stream_res.tg;
2206 		tg->funcs->get_otg_active_size(tg, &width, &height);
2207 
2208 		if (!tg->funcs->is_tg_enabled(tg)) {
2209 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2210 			return;
2211 		}
2212 
2213 		if (opp->funcs->opp_program_dpg_dimensions)
2214 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2215 	}
2216 
2217 	for (i = 0; i < group_size; i++) {
2218 		if (grouped_pipes[i]->stream == NULL)
2219 			continue;
2220 		grouped_pipes[i]->stream->vblank_synchronized = false;
2221 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2222 	}
2223 
2224 	DC_SYNC_INFO("Aligning DP DTOs\n");
2225 
2226 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2227 
2228 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2229 
2230 	if (master >= 0) {
2231 		for (i = 0; i < group_size; i++) {
2232 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2233 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2234 					grouped_pipes[master]->stream_res.tg,
2235 					grouped_pipes[i]->stream_res.tg,
2236 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2237 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2238 					get_clock_divider(grouped_pipes[master], false),
2239 					get_clock_divider(grouped_pipes[i], false));
2240 			grouped_pipes[i]->stream->vblank_synchronized = true;
2241 		}
2242 		grouped_pipes[master]->stream->vblank_synchronized = true;
2243 		DC_SYNC_INFO("Sync complete\n");
2244 	}
2245 
2246 	for (i = 1; i < group_size; i++) {
2247 		opp = grouped_pipes[i]->stream_res.opp;
2248 		tg = grouped_pipes[i]->stream_res.tg;
2249 		tg->funcs->get_otg_active_size(tg, &width, &height);
2250 		if (opp->funcs->opp_program_dpg_dimensions)
2251 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2252 	}
2253 }
2254 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2255 void dcn10_enable_timing_synchronization(
2256 	struct dc *dc,
2257 	int group_index,
2258 	int group_size,
2259 	struct pipe_ctx *grouped_pipes[])
2260 {
2261 	struct dc_context *dc_ctx = dc->ctx;
2262 	struct output_pixel_processor *opp;
2263 	struct timing_generator *tg;
2264 	int i, width, height;
2265 
2266 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2267 
2268 	for (i = 1; i < group_size; i++) {
2269 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2270 			continue;
2271 
2272 		opp = grouped_pipes[i]->stream_res.opp;
2273 		tg = grouped_pipes[i]->stream_res.tg;
2274 		tg->funcs->get_otg_active_size(tg, &width, &height);
2275 
2276 		if (!tg->funcs->is_tg_enabled(tg)) {
2277 			DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2278 			return;
2279 		}
2280 
2281 		if (opp->funcs->opp_program_dpg_dimensions)
2282 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2283 	}
2284 
2285 	for (i = 0; i < group_size; i++) {
2286 		if (grouped_pipes[i]->stream == NULL)
2287 			continue;
2288 
2289 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2290 			continue;
2291 
2292 		grouped_pipes[i]->stream->vblank_synchronized = false;
2293 	}
2294 
2295 	for (i = 1; i < group_size; i++) {
2296 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2297 			continue;
2298 
2299 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2300 				grouped_pipes[i]->stream_res.tg,
2301 				grouped_pipes[0]->stream_res.tg->inst);
2302 	}
2303 
2304 	DC_SYNC_INFO("Waiting for trigger\n");
2305 
2306 	/* Need to get only check 1 pipe for having reset as all the others are
2307 	 * synchronized. Look at last pipe programmed to reset.
2308 	 */
2309 
2310 	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2311 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2312 
2313 	for (i = 1; i < group_size; i++) {
2314 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2315 			continue;
2316 
2317 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2318 				grouped_pipes[i]->stream_res.tg);
2319 	}
2320 
2321 	for (i = 1; i < group_size; i++) {
2322 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2323 			continue;
2324 
2325 		opp = grouped_pipes[i]->stream_res.opp;
2326 		tg = grouped_pipes[i]->stream_res.tg;
2327 		tg->funcs->get_otg_active_size(tg, &width, &height);
2328 		if (opp->funcs->opp_program_dpg_dimensions)
2329 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2330 	}
2331 
2332 	DC_SYNC_INFO("Sync complete\n");
2333 }
2334 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2335 void dcn10_enable_per_frame_crtc_position_reset(
2336 	struct dc *dc,
2337 	int group_size,
2338 	struct pipe_ctx *grouped_pipes[])
2339 {
2340 	struct dc_context *dc_ctx = dc->ctx;
2341 	int i;
2342 
2343 	DC_SYNC_INFO("Setting up\n");
2344 	for (i = 0; i < group_size; i++)
2345 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2346 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2347 					grouped_pipes[i]->stream_res.tg,
2348 					0,
2349 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2350 
2351 	DC_SYNC_INFO("Waiting for trigger\n");
2352 
2353 	for (i = 0; i < group_size; i++)
2354 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2355 
2356 	DC_SYNC_INFO("Multi-display sync is complete\n");
2357 }
2358 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2359 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2360 		struct vm_system_aperture_param *apt,
2361 		struct dce_hwseq *hws)
2362 {
2363 	PHYSICAL_ADDRESS_LOC physical_page_number;
2364 	uint32_t logical_addr_low;
2365 	uint32_t logical_addr_high;
2366 
2367 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2368 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2369 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2370 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2371 
2372 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2373 			LOGICAL_ADDR, &logical_addr_low);
2374 
2375 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2376 			LOGICAL_ADDR, &logical_addr_high);
2377 
2378 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2379 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2380 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2381 }
2382 
2383 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2384 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2385 		struct vm_context0_param *vm0,
2386 		struct dce_hwseq *hws)
2387 {
2388 	PHYSICAL_ADDRESS_LOC fb_base;
2389 	PHYSICAL_ADDRESS_LOC fb_offset;
2390 	uint32_t fb_base_value;
2391 	uint32_t fb_offset_value;
2392 
2393 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2394 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2395 
2396 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2397 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2398 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2399 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2400 
2401 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2402 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2403 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2404 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2405 
2406 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2407 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2408 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2409 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2410 
2411 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2412 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2413 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2414 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2415 
2416 	/*
2417 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2418 	 * Therefore we need to do
2419 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2420 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2421 	 */
2422 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2423 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2424 	vm0->pte_base.quad_part += fb_base.quad_part;
2425 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2426 }
2427 
2428 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2429 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2430 {
2431 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2432 	struct vm_system_aperture_param apt = {0};
2433 	struct vm_context0_param vm0 = {0};
2434 
2435 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2436 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2437 
2438 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2439 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2440 }
2441 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2442 static void dcn10_enable_plane(
2443 	struct dc *dc,
2444 	struct pipe_ctx *pipe_ctx,
2445 	struct dc_state *context)
2446 {
2447 	struct dce_hwseq *hws = dc->hwseq;
2448 
2449 	if (dc->debug.sanity_checks) {
2450 		hws->funcs.verify_allow_pstate_change_high(dc);
2451 	}
2452 
2453 	undo_DEGVIDCN10_253_wa(dc);
2454 
2455 	power_on_plane_resources(dc->hwseq,
2456 		pipe_ctx->plane_res.hubp->inst);
2457 
2458 	/* enable DCFCLK current DCHUB */
2459 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2460 
2461 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2462 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2463 			pipe_ctx->stream_res.opp,
2464 			true);
2465 
2466 	if (dc->config.gpu_vm_support)
2467 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2468 
2469 	if (dc->debug.sanity_checks) {
2470 		hws->funcs.verify_allow_pstate_change_high(dc);
2471 	}
2472 
2473 	if (!pipe_ctx->top_pipe
2474 		&& pipe_ctx->plane_state
2475 		&& pipe_ctx->plane_state->flip_int_enabled
2476 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2477 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2478 
2479 }
2480 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2481 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2482 {
2483 	int i = 0;
2484 	struct dpp_grph_csc_adjustment adjust;
2485 	memset(&adjust, 0, sizeof(adjust));
2486 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2487 
2488 
2489 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2490 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2491 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2492 			adjust.temperature_matrix[i] =
2493 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2494 	} else if (pipe_ctx->plane_state &&
2495 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2496 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2497 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2498 			adjust.temperature_matrix[i] =
2499 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2500 	}
2501 
2502 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2503 }
2504 
2505 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2506 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2507 {
2508 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2509 		if (pipe_ctx->top_pipe) {
2510 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2511 
2512 			while (top->top_pipe)
2513 				top = top->top_pipe; // Traverse to top pipe_ctx
2514 			if (top->plane_state && top->plane_state->layer_index == 0)
2515 				return true; // Front MPO plane not hidden
2516 		}
2517 	}
2518 	return false;
2519 }
2520 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2521 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2522 {
2523 	// Override rear plane RGB bias to fix MPO brightness
2524 	uint16_t rgb_bias = matrix[3];
2525 
2526 	matrix[3] = 0;
2527 	matrix[7] = 0;
2528 	matrix[11] = 0;
2529 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2530 	matrix[3] = rgb_bias;
2531 	matrix[7] = rgb_bias;
2532 	matrix[11] = rgb_bias;
2533 }
2534 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2535 void dcn10_program_output_csc(struct dc *dc,
2536 		struct pipe_ctx *pipe_ctx,
2537 		enum dc_color_space colorspace,
2538 		uint16_t *matrix,
2539 		int opp_id)
2540 {
2541 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2542 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2543 
2544 			/* MPO is broken with RGB colorspaces when OCSC matrix
2545 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2546 			 * Blending adds offsets from front + rear to rear plane
2547 			 *
2548 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2549 			 * black value pixels add offset instead of rear + front
2550 			 */
2551 
2552 			int16_t rgb_bias = matrix[3];
2553 			// matrix[3/7/11] are all the same offset value
2554 
2555 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2556 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2557 			} else {
2558 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2559 			}
2560 		}
2561 	} else {
2562 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2563 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2564 	}
2565 }
2566 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2567 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2568 {
2569 	struct dc_bias_and_scale bns_params = {0};
2570 
2571 	// program the input csc
2572 	dpp->funcs->dpp_setup(dpp,
2573 			plane_state->format,
2574 			EXPANSION_MODE_ZERO,
2575 			plane_state->input_csc_color_matrix,
2576 			plane_state->color_space,
2577 			NULL);
2578 
2579 	//set scale and bias registers
2580 	build_prescale_params(&bns_params, plane_state);
2581 	if (dpp->funcs->dpp_program_bias_and_scale)
2582 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2583 }
2584 
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2585 void dcn10_update_visual_confirm_color(struct dc *dc,
2586 		struct pipe_ctx *pipe_ctx,
2587 		int mpcc_id)
2588 {
2589 	struct mpc *mpc = dc->res_pool->mpc;
2590 
2591 	if (mpc->funcs->set_bg_color) {
2592 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2593 		mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2594 	}
2595 }
2596 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2597 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2598 {
2599 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2600 	struct mpcc_blnd_cfg blnd_cfg = {0};
2601 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2602 	int mpcc_id;
2603 	struct mpcc *new_mpcc;
2604 	struct mpc *mpc = dc->res_pool->mpc;
2605 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2606 
2607 	blnd_cfg.overlap_only = false;
2608 	blnd_cfg.global_gain = 0xff;
2609 
2610 	if (per_pixel_alpha) {
2611 		/* DCN1.0 has output CM before MPC which seems to screw with
2612 		 * pre-multiplied alpha.
2613 		 */
2614 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2615 				pipe_ctx->stream->output_color_space)
2616 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2617 		if (pipe_ctx->plane_state->global_alpha) {
2618 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2619 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2620 		} else {
2621 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2622 		}
2623 	} else {
2624 		blnd_cfg.pre_multiplied_alpha = false;
2625 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2626 	}
2627 
2628 	if (pipe_ctx->plane_state->global_alpha)
2629 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2630 	else
2631 		blnd_cfg.global_alpha = 0xff;
2632 
2633 	/*
2634 	 * TODO: remove hack
2635 	 * Note: currently there is a bug in init_hw such that
2636 	 * on resume from hibernate, BIOS sets up MPCC0, and
2637 	 * we do mpcc_remove but the mpcc cannot go to idle
2638 	 * after remove. This cause us to pick mpcc1 here,
2639 	 * which causes a pstate hang for yet unknown reason.
2640 	 */
2641 	mpcc_id = hubp->inst;
2642 
2643 	/* If there is no full update, don't need to touch MPC tree*/
2644 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2645 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2646 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2647 		return;
2648 	}
2649 
2650 	/* check if this MPCC is already being used */
2651 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2652 	/* remove MPCC if being used */
2653 	if (new_mpcc != NULL)
2654 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2655 	else
2656 		if (dc->debug.sanity_checks)
2657 			mpc->funcs->assert_mpcc_idle_before_connect(
2658 					dc->res_pool->mpc, mpcc_id);
2659 
2660 	/* Call MPC to insert new plane */
2661 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2662 			mpc_tree_params,
2663 			&blnd_cfg,
2664 			NULL,
2665 			NULL,
2666 			hubp->inst,
2667 			mpcc_id);
2668 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2669 
2670 	ASSERT(new_mpcc != NULL);
2671 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2672 	hubp->mpcc_id = mpcc_id;
2673 }
2674 
update_scaler(struct pipe_ctx * pipe_ctx)2675 static void update_scaler(struct pipe_ctx *pipe_ctx)
2676 {
2677 	bool per_pixel_alpha =
2678 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2679 
2680 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2681 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2682 	/* scaler configuration */
2683 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2684 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2685 }
2686 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2687 static void dcn10_update_dchubp_dpp(
2688 	struct dc *dc,
2689 	struct pipe_ctx *pipe_ctx,
2690 	struct dc_state *context)
2691 {
2692 	struct dce_hwseq *hws = dc->hwseq;
2693 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2694 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2695 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2696 	struct plane_size size = plane_state->plane_size;
2697 	unsigned int compat_level = 0;
2698 	bool should_divided_by_2 = false;
2699 
2700 	/* depends on DML calculation, DPP clock value may change dynamically */
2701 	/* If request max dpp clk is lower than current dispclk, no need to
2702 	 * divided by 2
2703 	 */
2704 	if (plane_state->update_flags.bits.full_update) {
2705 
2706 		/* new calculated dispclk, dppclk are stored in
2707 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2708 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2709 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2710 		 * dispclk will put in use after optimize_bandwidth when
2711 		 * ramp_up_dispclk_with_dpp is called.
2712 		 * there are two places for dppclk be put in use. One location
2713 		 * is the same as the location as dispclk. Another is within
2714 		 * update_dchubp_dpp which happens between pre_bandwidth and
2715 		 * optimize_bandwidth.
2716 		 * dppclk updated within update_dchubp_dpp will cause new
2717 		 * clock values of dispclk and dppclk not be in use at the same
2718 		 * time. when clocks are decreased, this may cause dppclk is
2719 		 * lower than previous configuration and let pipe stuck.
2720 		 * for example, eDP + external dp,  change resolution of DP from
2721 		 * 1920x1080x144hz to 1280x960x60hz.
2722 		 * before change: dispclk = 337889 dppclk = 337889
2723 		 * change mode, dcn10_validate_bandwidth calculate
2724 		 *                dispclk = 143122 dppclk = 143122
2725 		 * update_dchubp_dpp be executed before dispclk be updated,
2726 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2727 		 * 168944. this will cause pipe pstate warning issue.
2728 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2729 		 * dispclk is going to be decreased, keep dppclk = dispclk
2730 		 **/
2731 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2732 				dc->clk_mgr->clks.dispclk_khz)
2733 			should_divided_by_2 = false;
2734 		else
2735 			should_divided_by_2 =
2736 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2737 					dc->clk_mgr->clks.dispclk_khz / 2;
2738 
2739 		dpp->funcs->dpp_dppclk_control(
2740 				dpp,
2741 				should_divided_by_2,
2742 				true);
2743 
2744 		if (dc->res_pool->dccg)
2745 			dc->res_pool->dccg->funcs->update_dpp_dto(
2746 					dc->res_pool->dccg,
2747 					dpp->inst,
2748 					pipe_ctx->plane_res.bw.dppclk_khz);
2749 		else
2750 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2751 						dc->clk_mgr->clks.dispclk_khz / 2 :
2752 							dc->clk_mgr->clks.dispclk_khz;
2753 	}
2754 
2755 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2756 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2757 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2758 	 */
2759 	if (plane_state->update_flags.bits.full_update) {
2760 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2761 
2762 		hubp->funcs->hubp_setup(
2763 			hubp,
2764 			&pipe_ctx->dlg_regs,
2765 			&pipe_ctx->ttu_regs,
2766 			&pipe_ctx->rq_regs,
2767 			&pipe_ctx->pipe_dlg_param);
2768 		hubp->funcs->hubp_setup_interdependent(
2769 			hubp,
2770 			&pipe_ctx->dlg_regs,
2771 			&pipe_ctx->ttu_regs);
2772 	}
2773 
2774 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2775 
2776 	if (plane_state->update_flags.bits.full_update ||
2777 		plane_state->update_flags.bits.bpp_change)
2778 		dcn10_update_dpp(dpp, plane_state);
2779 
2780 	if (plane_state->update_flags.bits.full_update ||
2781 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2782 		plane_state->update_flags.bits.global_alpha_change)
2783 		hws->funcs.update_mpcc(dc, pipe_ctx);
2784 
2785 	if (plane_state->update_flags.bits.full_update ||
2786 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2787 		plane_state->update_flags.bits.global_alpha_change ||
2788 		plane_state->update_flags.bits.scaling_change ||
2789 		plane_state->update_flags.bits.position_change) {
2790 		update_scaler(pipe_ctx);
2791 	}
2792 
2793 	if (plane_state->update_flags.bits.full_update ||
2794 		plane_state->update_flags.bits.scaling_change ||
2795 		plane_state->update_flags.bits.position_change) {
2796 		hubp->funcs->mem_program_viewport(
2797 			hubp,
2798 			&pipe_ctx->plane_res.scl_data.viewport,
2799 			&pipe_ctx->plane_res.scl_data.viewport_c);
2800 	}
2801 
2802 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2803 		dc->hwss.set_cursor_position(pipe_ctx);
2804 		dc->hwss.set_cursor_attribute(pipe_ctx);
2805 
2806 		if (dc->hwss.set_cursor_sdr_white_level)
2807 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2808 	}
2809 
2810 	if (plane_state->update_flags.bits.full_update) {
2811 		/*gamut remap*/
2812 		dc->hwss.program_gamut_remap(pipe_ctx);
2813 
2814 		dc->hwss.program_output_csc(dc,
2815 				pipe_ctx,
2816 				pipe_ctx->stream->output_color_space,
2817 				pipe_ctx->stream->csc_color_matrix.matrix,
2818 				pipe_ctx->stream_res.opp->inst);
2819 	}
2820 
2821 	if (plane_state->update_flags.bits.full_update ||
2822 		plane_state->update_flags.bits.pixel_format_change ||
2823 		plane_state->update_flags.bits.horizontal_mirror_change ||
2824 		plane_state->update_flags.bits.rotation_change ||
2825 		plane_state->update_flags.bits.swizzle_change ||
2826 		plane_state->update_flags.bits.dcc_change ||
2827 		plane_state->update_flags.bits.bpp_change ||
2828 		plane_state->update_flags.bits.scaling_change ||
2829 		plane_state->update_flags.bits.plane_size_change) {
2830 		hubp->funcs->hubp_program_surface_config(
2831 			hubp,
2832 			plane_state->format,
2833 			&plane_state->tiling_info,
2834 			&size,
2835 			plane_state->rotation,
2836 			&plane_state->dcc,
2837 			plane_state->horizontal_mirror,
2838 			compat_level);
2839 	}
2840 
2841 	hubp->power_gated = false;
2842 
2843 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2844 
2845 	if (is_pipe_tree_visible(pipe_ctx))
2846 		hubp->funcs->set_blank(hubp, false);
2847 }
2848 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2849 void dcn10_blank_pixel_data(
2850 		struct dc *dc,
2851 		struct pipe_ctx *pipe_ctx,
2852 		bool blank)
2853 {
2854 	enum dc_color_space color_space;
2855 	struct tg_color black_color = {0};
2856 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2857 	struct dc_stream_state *stream = pipe_ctx->stream;
2858 
2859 	/* program otg blank color */
2860 	color_space = stream->output_color_space;
2861 	color_space_to_black_color(dc, color_space, &black_color);
2862 
2863 	/*
2864 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2865 	 * alternate between Cb and Cr, so both channels need the pixel
2866 	 * value for Y
2867 	 */
2868 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2869 		black_color.color_r_cr = black_color.color_g_y;
2870 
2871 
2872 	if (stream_res->tg->funcs->set_blank_color)
2873 		stream_res->tg->funcs->set_blank_color(
2874 				stream_res->tg,
2875 				&black_color);
2876 
2877 	if (!blank) {
2878 		if (stream_res->tg->funcs->set_blank)
2879 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2880 		if (stream_res->abm) {
2881 			dc->hwss.set_pipe(pipe_ctx);
2882 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2883 		}
2884 	} else {
2885 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2886 		if (stream_res->tg->funcs->set_blank) {
2887 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2888 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2889 		}
2890 	}
2891 }
2892 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2893 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2894 {
2895 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2896 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2897 	struct custom_float_format fmt;
2898 
2899 	fmt.exponenta_bits = 6;
2900 	fmt.mantissa_bits = 12;
2901 	fmt.sign = true;
2902 
2903 
2904 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2905 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2906 
2907 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2908 			pipe_ctx->plane_res.dpp, hw_mult);
2909 }
2910 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2911 void dcn10_program_pipe(
2912 		struct dc *dc,
2913 		struct pipe_ctx *pipe_ctx,
2914 		struct dc_state *context)
2915 {
2916 	struct dce_hwseq *hws = dc->hwseq;
2917 
2918 	if (pipe_ctx->top_pipe == NULL) {
2919 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2920 
2921 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2922 				pipe_ctx->stream_res.tg,
2923 				calculate_vready_offset_for_group(pipe_ctx),
2924 				pipe_ctx->pipe_dlg_param.vstartup_start,
2925 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2926 				pipe_ctx->pipe_dlg_param.vupdate_width);
2927 
2928 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2929 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2930 
2931 		if (hws->funcs.setup_vupdate_interrupt)
2932 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2933 
2934 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2935 	}
2936 
2937 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2938 		dcn10_enable_plane(dc, pipe_ctx, context);
2939 
2940 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2941 
2942 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2943 
2944 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2945 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2946 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2947 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2948 
2949 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2950 	 * only do gamma programming for full update.
2951 	 * TODO: This can be further optimized/cleaned up
2952 	 * Always call this for now since it does memcmp inside before
2953 	 * doing heavy calculation and programming
2954 	 */
2955 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2956 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2957 }
2958 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2959 void dcn10_wait_for_pending_cleared(struct dc *dc,
2960 		struct dc_state *context)
2961 {
2962 		struct pipe_ctx *pipe_ctx;
2963 		struct timing_generator *tg;
2964 		int i;
2965 
2966 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2967 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2968 			tg = pipe_ctx->stream_res.tg;
2969 
2970 			/*
2971 			 * Only wait for top pipe's tg penindg bit
2972 			 * Also skip if pipe is disabled.
2973 			 */
2974 			if (pipe_ctx->top_pipe ||
2975 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2976 			    !tg->funcs->is_tg_enabled(tg))
2977 				continue;
2978 
2979 			/*
2980 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2981 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2982 			 * seems to not trigger the update right away, and if we
2983 			 * lock again before VUPDATE then we don't get a separated
2984 			 * operation.
2985 			 */
2986 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2987 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2988 		}
2989 }
2990 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2991 void dcn10_post_unlock_program_front_end(
2992 		struct dc *dc,
2993 		struct dc_state *context)
2994 {
2995 	int i;
2996 
2997 	DC_LOGGER_INIT(dc->ctx->logger);
2998 
2999 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3000 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3001 
3002 		if (!pipe_ctx->top_pipe &&
3003 			!pipe_ctx->prev_odm_pipe &&
3004 			pipe_ctx->stream) {
3005 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3006 
3007 			if (context->stream_status[i].plane_count == 0)
3008 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3009 		}
3010 	}
3011 
3012 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3013 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3014 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3015 
3016 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3017 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3018 			dc->hwss.optimize_bandwidth(dc, context);
3019 			break;
3020 		}
3021 
3022 	if (dc->hwseq->wa.DEGVIDCN10_254)
3023 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3024 }
3025 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3026 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3027 {
3028 	uint8_t i;
3029 
3030 	for (i = 0; i < context->stream_count; i++) {
3031 		if (context->streams[i]->timing.timing_3d_format
3032 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3033 			/*
3034 			 * Disable stutter
3035 			 */
3036 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3037 			break;
3038 		}
3039 	}
3040 }
3041 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3042 void dcn10_prepare_bandwidth(
3043 		struct dc *dc,
3044 		struct dc_state *context)
3045 {
3046 	struct dce_hwseq *hws = dc->hwseq;
3047 	struct hubbub *hubbub = dc->res_pool->hubbub;
3048 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3049 
3050 	if (dc->debug.sanity_checks)
3051 		hws->funcs.verify_allow_pstate_change_high(dc);
3052 
3053 	if (context->stream_count == 0)
3054 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3055 
3056 	dc->clk_mgr->funcs->update_clocks(
3057 			dc->clk_mgr,
3058 			context,
3059 			false);
3060 
3061 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3062 			&context->bw_ctx.bw.dcn.watermarks,
3063 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3064 			true);
3065 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3066 
3067 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3068 		DC_FP_START();
3069 		dcn_get_soc_clks(
3070 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3071 		DC_FP_END();
3072 		dcn_bw_notify_pplib_of_wm_ranges(
3073 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3074 	}
3075 
3076 	if (dc->debug.sanity_checks)
3077 		hws->funcs.verify_allow_pstate_change_high(dc);
3078 }
3079 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3080 void dcn10_optimize_bandwidth(
3081 		struct dc *dc,
3082 		struct dc_state *context)
3083 {
3084 	struct dce_hwseq *hws = dc->hwseq;
3085 	struct hubbub *hubbub = dc->res_pool->hubbub;
3086 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3087 
3088 	if (dc->debug.sanity_checks)
3089 		hws->funcs.verify_allow_pstate_change_high(dc);
3090 
3091 	if (context->stream_count == 0)
3092 		context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3093 
3094 	dc->clk_mgr->funcs->update_clocks(
3095 			dc->clk_mgr,
3096 			context,
3097 			true);
3098 
3099 	hubbub->funcs->program_watermarks(hubbub,
3100 			&context->bw_ctx.bw.dcn.watermarks,
3101 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3102 			true);
3103 
3104 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3105 
3106 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3107 		DC_FP_START();
3108 		dcn_get_soc_clks(
3109 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3110 		DC_FP_END();
3111 		dcn_bw_notify_pplib_of_wm_ranges(
3112 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3113 	}
3114 
3115 	if (dc->debug.sanity_checks)
3116 		hws->funcs.verify_allow_pstate_change_high(dc);
3117 }
3118 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3119 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3120 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3121 {
3122 	int i = 0;
3123 	struct drr_params params = {0};
3124 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3125 	unsigned int event_triggers = 0x800;
3126 	// Note DRR trigger events are generated regardless of whether num frames met.
3127 	unsigned int num_frames = 2;
3128 
3129 	params.vertical_total_max = adjust.v_total_max;
3130 	params.vertical_total_min = adjust.v_total_min;
3131 	params.vertical_total_mid = adjust.v_total_mid;
3132 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3133 	/* TODO: If multiple pipes are to be supported, you need
3134 	 * some GSL stuff. Static screen triggers may be programmed differently
3135 	 * as well.
3136 	 */
3137 	for (i = 0; i < num_pipes; i++) {
3138 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3139 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3140 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3141 					pipe_ctx[i]->stream_res.tg, &params);
3142 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3143 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3144 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3145 						pipe_ctx[i]->stream_res.tg,
3146 						event_triggers, num_frames);
3147 		}
3148 	}
3149 }
3150 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3151 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3152 		int num_pipes,
3153 		struct crtc_position *position)
3154 {
3155 	int i = 0;
3156 
3157 	/* TODO: handle pipes > 1
3158 	 */
3159 	for (i = 0; i < num_pipes; i++)
3160 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3161 }
3162 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3163 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3164 		int num_pipes, const struct dc_static_screen_params *params)
3165 {
3166 	unsigned int i;
3167 	unsigned int triggers = 0;
3168 
3169 	if (params->triggers.surface_update)
3170 		triggers |= 0x80;
3171 	if (params->triggers.cursor_update)
3172 		triggers |= 0x2;
3173 	if (params->triggers.force_trigger)
3174 		triggers |= 0x1;
3175 
3176 	for (i = 0; i < num_pipes; i++)
3177 		pipe_ctx[i]->stream_res.tg->funcs->
3178 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3179 					triggers, params->num_frames);
3180 }
3181 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3182 static void dcn10_config_stereo_parameters(
3183 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3184 {
3185 	enum view_3d_format view_format = stream->view_format;
3186 	enum dc_timing_3d_format timing_3d_format =\
3187 			stream->timing.timing_3d_format;
3188 	bool non_stereo_timing = false;
3189 
3190 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3191 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3192 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3193 		non_stereo_timing = true;
3194 
3195 	if (non_stereo_timing == false &&
3196 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3197 
3198 		flags->PROGRAM_STEREO         = 1;
3199 		flags->PROGRAM_POLARITY       = 1;
3200 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3201 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3202 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3203 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3204 
3205 			if (stream->link && stream->link->ddc) {
3206 				enum display_dongle_type dongle = \
3207 						stream->link->ddc->dongle_type;
3208 
3209 				if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3210 					dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3211 					dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3212 					flags->DISABLE_STEREO_DP_SYNC = 1;
3213 			}
3214 		}
3215 		flags->RIGHT_EYE_POLARITY =\
3216 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3217 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3218 			flags->FRAME_PACKED = 1;
3219 	}
3220 
3221 	return;
3222 }
3223 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3224 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3225 {
3226 	struct crtc_stereo_flags flags = { 0 };
3227 	struct dc_stream_state *stream = pipe_ctx->stream;
3228 
3229 	dcn10_config_stereo_parameters(stream, &flags);
3230 
3231 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3232 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3233 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3234 	} else {
3235 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3236 	}
3237 
3238 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3239 		pipe_ctx->stream_res.opp,
3240 		flags.PROGRAM_STEREO == 1,
3241 		&stream->timing);
3242 
3243 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3244 		pipe_ctx->stream_res.tg,
3245 		&stream->timing,
3246 		&flags);
3247 
3248 	return;
3249 }
3250 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3251 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3252 {
3253 	int i;
3254 
3255 	for (i = 0; i < res_pool->pipe_count; i++) {
3256 		if (res_pool->hubps[i]->inst == mpcc_inst)
3257 			return res_pool->hubps[i];
3258 	}
3259 	ASSERT(false);
3260 	return NULL;
3261 }
3262 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3263 void dcn10_wait_for_mpcc_disconnect(
3264 		struct dc *dc,
3265 		struct resource_pool *res_pool,
3266 		struct pipe_ctx *pipe_ctx)
3267 {
3268 	struct dce_hwseq *hws = dc->hwseq;
3269 	int mpcc_inst;
3270 
3271 	if (dc->debug.sanity_checks) {
3272 		hws->funcs.verify_allow_pstate_change_high(dc);
3273 	}
3274 
3275 	if (!pipe_ctx->stream_res.opp)
3276 		return;
3277 
3278 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3279 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3280 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3281 
3282 			if (pipe_ctx->stream_res.tg &&
3283 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3284 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3285 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3286 			hubp->funcs->set_blank(hubp, true);
3287 		}
3288 	}
3289 
3290 	if (dc->debug.sanity_checks) {
3291 		hws->funcs.verify_allow_pstate_change_high(dc);
3292 	}
3293 
3294 }
3295 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3296 bool dcn10_dummy_display_power_gating(
3297 	struct dc *dc,
3298 	uint8_t controller_id,
3299 	struct dc_bios *dcb,
3300 	enum pipe_gating_control power_gating)
3301 {
3302 	return true;
3303 }
3304 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3305 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3306 {
3307 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3308 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3309 	bool flip_pending;
3310 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3311 
3312 	if (plane_state == NULL)
3313 		return;
3314 
3315 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3316 					pipe_ctx->plane_res.hubp);
3317 
3318 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3319 
3320 	if (!flip_pending)
3321 		plane_state->status.current_address = plane_state->status.requested_address;
3322 
3323 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3324 			tg->funcs->is_stereo_left_eye) {
3325 		plane_state->status.is_right_eye =
3326 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3327 	}
3328 
3329 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3330 		struct dce_hwseq *hwseq = dc->hwseq;
3331 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3332 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3333 
3334 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3335 			struct hubbub *hubbub = dc->res_pool->hubbub;
3336 
3337 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3338 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3339 		}
3340 	}
3341 }
3342 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3343 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3344 {
3345 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3346 
3347 	/* In DCN, this programming sequence is owned by the hubbub */
3348 	hubbub->funcs->update_dchub(hubbub, dh_data);
3349 }
3350 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3351 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3352 {
3353 	struct pipe_ctx *test_pipe, *split_pipe;
3354 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3355 	struct rect r1 = scl_data->recout, r2, r2_half;
3356 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3357 	int cur_layer = pipe_ctx->plane_state->layer_index;
3358 
3359 	/**
3360 	 * Disable the cursor if there's another pipe above this with a
3361 	 * plane that contains this pipe's viewport to prevent double cursor
3362 	 * and incorrect scaling artifacts.
3363 	 */
3364 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3365 	     test_pipe = test_pipe->top_pipe) {
3366 		// Skip invisible layer and pipe-split plane on same layer
3367 		if (!test_pipe->plane_state ||
3368 		    !test_pipe->plane_state->visible ||
3369 		    test_pipe->plane_state->layer_index == cur_layer)
3370 			continue;
3371 
3372 		r2 = test_pipe->plane_res.scl_data.recout;
3373 		r2_r = r2.x + r2.width;
3374 		r2_b = r2.y + r2.height;
3375 		split_pipe = test_pipe;
3376 
3377 		/**
3378 		 * There is another half plane on same layer because of
3379 		 * pipe-split, merge together per same height.
3380 		 */
3381 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3382 		     split_pipe = split_pipe->top_pipe)
3383 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3384 				r2_half = split_pipe->plane_res.scl_data.recout;
3385 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3386 				r2.width = r2.width + r2_half.width;
3387 				r2_r = r2.x + r2.width;
3388 				break;
3389 			}
3390 
3391 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3392 			return true;
3393 	}
3394 
3395 	return false;
3396 }
3397 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3398 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3399 {
3400 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3401 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3402 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3403 	struct dc_cursor_mi_param param = {
3404 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3405 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3406 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3407 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3408 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3409 		.rotation = pipe_ctx->plane_state->rotation,
3410 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
3411 		.stream = pipe_ctx->stream,
3412 	};
3413 	bool pipe_split_on = false;
3414 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3415 		(pipe_ctx->prev_odm_pipe != NULL);
3416 
3417 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3418 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3419 	int x_pos = pos_cpy.x;
3420 	int y_pos = pos_cpy.y;
3421 
3422 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3423 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3424 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3425 			pipe_split_on = true;
3426 		}
3427 	}
3428 
3429 	/**
3430 	 * DC cursor is stream space, HW cursor is plane space and drawn
3431 	 * as part of the framebuffer.
3432 	 *
3433 	 * Cursor position can't be negative, but hotspot can be used to
3434 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3435 	 * than the cursor size.
3436 	 */
3437 
3438 	/**
3439 	 * Translate cursor from stream space to plane space.
3440 	 *
3441 	 * If the cursor is scaled then we need to scale the position
3442 	 * to be in the approximately correct place. We can't do anything
3443 	 * about the actual size being incorrect, that's a limitation of
3444 	 * the hardware.
3445 	 */
3446 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3447 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3448 				pipe_ctx->plane_state->dst_rect.width;
3449 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3450 				pipe_ctx->plane_state->dst_rect.height;
3451 	} else {
3452 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3453 				pipe_ctx->plane_state->dst_rect.width;
3454 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3455 				pipe_ctx->plane_state->dst_rect.height;
3456 	}
3457 
3458 	/**
3459 	 * If the cursor's source viewport is clipped then we need to
3460 	 * translate the cursor to appear in the correct position on
3461 	 * the screen.
3462 	 *
3463 	 * This translation isn't affected by scaling so it needs to be
3464 	 * done *after* we adjust the position for the scale factor.
3465 	 *
3466 	 * This is only done by opt-in for now since there are still
3467 	 * some usecases like tiled display that might enable the
3468 	 * cursor on both streams while expecting dc to clip it.
3469 	 */
3470 	if (pos_cpy.translate_by_source) {
3471 		x_pos += pipe_ctx->plane_state->src_rect.x;
3472 		y_pos += pipe_ctx->plane_state->src_rect.y;
3473 	}
3474 
3475 	/**
3476 	 * If the position is negative then we need to add to the hotspot
3477 	 * to shift the cursor outside the plane.
3478 	 */
3479 
3480 	if (x_pos < 0) {
3481 		pos_cpy.x_hotspot -= x_pos;
3482 		x_pos = 0;
3483 	}
3484 
3485 	if (y_pos < 0) {
3486 		pos_cpy.y_hotspot -= y_pos;
3487 		y_pos = 0;
3488 	}
3489 
3490 	pos_cpy.x = (uint32_t)x_pos;
3491 	pos_cpy.y = (uint32_t)y_pos;
3492 
3493 	if (pipe_ctx->plane_state->address.type
3494 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3495 		pos_cpy.enable = false;
3496 
3497 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3498 		pos_cpy.enable = false;
3499 
3500 
3501 	if (param.rotation == ROTATION_ANGLE_0) {
3502 		int viewport_width =
3503 			pipe_ctx->plane_res.scl_data.viewport.width;
3504 		int viewport_x =
3505 			pipe_ctx->plane_res.scl_data.viewport.x;
3506 
3507 		if (param.mirror) {
3508 			if (pipe_split_on || odm_combine_on) {
3509 				if (pos_cpy.x >= viewport_width + viewport_x) {
3510 					pos_cpy.x = 2 * viewport_width
3511 							- pos_cpy.x + 2 * viewport_x;
3512 				} else {
3513 					uint32_t temp_x = pos_cpy.x;
3514 
3515 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3516 					if (temp_x >= viewport_x +
3517 						(int)hubp->curs_attr.width || pos_cpy.x
3518 						<= (int)hubp->curs_attr.width +
3519 						pipe_ctx->plane_state->src_rect.x) {
3520 						pos_cpy.x = 2 * viewport_width - temp_x;
3521 					}
3522 				}
3523 			} else {
3524 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3525 			}
3526 		}
3527 	}
3528 	// Swap axis and mirror horizontally
3529 	else if (param.rotation == ROTATION_ANGLE_90) {
3530 		uint32_t temp_x = pos_cpy.x;
3531 
3532 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3533 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3534 		pos_cpy.y = temp_x;
3535 	}
3536 	// Swap axis and mirror vertically
3537 	else if (param.rotation == ROTATION_ANGLE_270) {
3538 		uint32_t temp_y = pos_cpy.y;
3539 		int viewport_height =
3540 			pipe_ctx->plane_res.scl_data.viewport.height;
3541 		int viewport_y =
3542 			pipe_ctx->plane_res.scl_data.viewport.y;
3543 
3544 		/**
3545 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3546 		 * For pipe split cases:
3547 		 * - apply offset of viewport.y to normalize pos_cpy.x
3548 		 * - calculate the pos_cpy.y as before
3549 		 * - shift pos_cpy.y back by same offset to get final value
3550 		 * - since we iterate through both pipes, use the lower
3551 		 *   viewport.y for offset
3552 		 * For non pipe split cases, use the same calculation for
3553 		 *  pos_cpy.y as the 180 degree rotation case below,
3554 		 *  but use pos_cpy.x as our input because we are rotating
3555 		 *  270 degrees
3556 		 */
3557 		if (pipe_split_on || odm_combine_on) {
3558 			int pos_cpy_x_offset;
3559 			int other_pipe_viewport_y;
3560 
3561 			if (pipe_split_on) {
3562 				if (pipe_ctx->bottom_pipe) {
3563 					other_pipe_viewport_y =
3564 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3565 				} else {
3566 					other_pipe_viewport_y =
3567 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3568 				}
3569 			} else {
3570 				if (pipe_ctx->next_odm_pipe) {
3571 					other_pipe_viewport_y =
3572 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3573 				} else {
3574 					other_pipe_viewport_y =
3575 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3576 				}
3577 			}
3578 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3579 				other_pipe_viewport_y : viewport_y;
3580 			pos_cpy.x -= pos_cpy_x_offset;
3581 			if (pos_cpy.x > viewport_height) {
3582 				pos_cpy.x = pos_cpy.x - viewport_height;
3583 				pos_cpy.y = viewport_height - pos_cpy.x;
3584 			} else {
3585 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3586 			}
3587 			pos_cpy.y += pos_cpy_x_offset;
3588 		} else {
3589 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3590 		}
3591 		pos_cpy.x = temp_y;
3592 	}
3593 	// Mirror horizontally and vertically
3594 	else if (param.rotation == ROTATION_ANGLE_180) {
3595 		int viewport_width =
3596 			pipe_ctx->plane_res.scl_data.viewport.width;
3597 		int viewport_x =
3598 			pipe_ctx->plane_res.scl_data.viewport.x;
3599 
3600 		if (!param.mirror) {
3601 			if (pipe_split_on || odm_combine_on) {
3602 				if (pos_cpy.x >= viewport_width + viewport_x) {
3603 					pos_cpy.x = 2 * viewport_width
3604 							- pos_cpy.x + 2 * viewport_x;
3605 				} else {
3606 					uint32_t temp_x = pos_cpy.x;
3607 
3608 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3609 					if (temp_x >= viewport_x +
3610 						(int)hubp->curs_attr.width || pos_cpy.x
3611 						<= (int)hubp->curs_attr.width +
3612 						pipe_ctx->plane_state->src_rect.x) {
3613 						pos_cpy.x = temp_x + viewport_width;
3614 					}
3615 				}
3616 			} else {
3617 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3618 			}
3619 		}
3620 
3621 		/**
3622 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3623 		 * Calculation:
3624 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3625 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3626 		 * Simplify it as:
3627 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3628 		 */
3629 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3630 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3631 	}
3632 
3633 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3634 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3635 }
3636 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3637 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3638 {
3639 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3640 
3641 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3642 			pipe_ctx->plane_res.hubp, attributes);
3643 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3644 		pipe_ctx->plane_res.dpp, attributes);
3645 }
3646 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3647 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3648 {
3649 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3650 	struct fixed31_32 multiplier;
3651 	struct dpp_cursor_attributes opt_attr = { 0 };
3652 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3653 	struct custom_float_format fmt;
3654 
3655 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3656 		return;
3657 
3658 	fmt.exponenta_bits = 5;
3659 	fmt.mantissa_bits = 10;
3660 	fmt.sign = true;
3661 
3662 	if (sdr_white_level > 80) {
3663 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3664 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3665 	}
3666 
3667 	opt_attr.scale = hw_scale;
3668 	opt_attr.bias = 0;
3669 
3670 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3671 			pipe_ctx->plane_res.dpp, &opt_attr);
3672 }
3673 
3674 /*
3675  * apply_front_porch_workaround  TODO FPGA still need?
3676  *
3677  * This is a workaround for a bug that has existed since R5xx and has not been
3678  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3679  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3680 static void apply_front_porch_workaround(
3681 	struct dc_crtc_timing *timing)
3682 {
3683 	if (timing->flags.INTERLACE == 1) {
3684 		if (timing->v_front_porch < 2)
3685 			timing->v_front_porch = 2;
3686 	} else {
3687 		if (timing->v_front_porch < 1)
3688 			timing->v_front_porch = 1;
3689 	}
3690 }
3691 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3692 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3693 {
3694 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3695 	struct dc_crtc_timing patched_crtc_timing;
3696 	int vesa_sync_start;
3697 	int asic_blank_end;
3698 	int interlace_factor;
3699 
3700 	patched_crtc_timing = *dc_crtc_timing;
3701 	apply_front_porch_workaround(&patched_crtc_timing);
3702 
3703 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3704 
3705 	vesa_sync_start = patched_crtc_timing.v_addressable +
3706 			patched_crtc_timing.v_border_bottom +
3707 			patched_crtc_timing.v_front_porch;
3708 
3709 	asic_blank_end = (patched_crtc_timing.v_total -
3710 			vesa_sync_start -
3711 			patched_crtc_timing.v_border_top)
3712 			* interlace_factor;
3713 
3714 	return asic_blank_end -
3715 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3716 }
3717 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3718 void dcn10_calc_vupdate_position(
3719 		struct dc *dc,
3720 		struct pipe_ctx *pipe_ctx,
3721 		uint32_t *start_line,
3722 		uint32_t *end_line)
3723 {
3724 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3725 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3726 
3727 	if (vupdate_pos >= 0)
3728 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3729 	else
3730 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3731 	*end_line = (*start_line + 2) % timing->v_total;
3732 }
3733 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3734 static void dcn10_cal_vline_position(
3735 		struct dc *dc,
3736 		struct pipe_ctx *pipe_ctx,
3737 		uint32_t *start_line,
3738 		uint32_t *end_line)
3739 {
3740 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3741 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3742 
3743 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3744 		if (vline_pos > 0)
3745 			vline_pos--;
3746 		else if (vline_pos < 0)
3747 			vline_pos++;
3748 
3749 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3750 		if (vline_pos >= 0)
3751 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3752 		else
3753 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3754 		*end_line = (*start_line + 2) % timing->v_total;
3755 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3756 		// vsync is line 0 so start_line is just the requested line offset
3757 		*start_line = vline_pos;
3758 		*end_line = (*start_line + 2) % timing->v_total;
3759 	} else
3760 		ASSERT(0);
3761 }
3762 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3763 void dcn10_setup_periodic_interrupt(
3764 		struct dc *dc,
3765 		struct pipe_ctx *pipe_ctx)
3766 {
3767 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3768 	uint32_t start_line = 0;
3769 	uint32_t end_line = 0;
3770 
3771 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3772 
3773 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3774 }
3775 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3776 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3777 {
3778 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3779 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3780 
3781 	if (start_line < 0) {
3782 		ASSERT(0);
3783 		start_line = 0;
3784 	}
3785 
3786 	if (tg->funcs->setup_vertical_interrupt2)
3787 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3788 }
3789 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3790 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3791 		struct dc_link_settings *link_settings)
3792 {
3793 	struct encoder_unblank_param params = {0};
3794 	struct dc_stream_state *stream = pipe_ctx->stream;
3795 	struct dc_link *link = stream->link;
3796 	struct dce_hwseq *hws = link->dc->hwseq;
3797 
3798 	/* only 3 items below are used by unblank */
3799 	params.timing = pipe_ctx->stream->timing;
3800 
3801 	params.link_settings.link_rate = link_settings->link_rate;
3802 
3803 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3804 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3805 			params.timing.pix_clk_100hz /= 2;
3806 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3807 	}
3808 
3809 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3810 		hws->funcs.edp_backlight_control(link, true);
3811 	}
3812 }
3813 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3814 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3815 				const uint8_t *custom_sdp_message,
3816 				unsigned int sdp_message_size)
3817 {
3818 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3819 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3820 				pipe_ctx->stream_res.stream_enc,
3821 				custom_sdp_message,
3822 				sdp_message_size);
3823 	}
3824 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3825 enum dc_status dcn10_set_clock(struct dc *dc,
3826 			enum dc_clock_type clock_type,
3827 			uint32_t clk_khz,
3828 			uint32_t stepping)
3829 {
3830 	struct dc_state *context = dc->current_state;
3831 	struct dc_clock_config clock_cfg = {0};
3832 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3833 
3834 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3835 		return DC_FAIL_UNSUPPORTED_1;
3836 
3837 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3838 		context, clock_type, &clock_cfg);
3839 
3840 	if (clk_khz > clock_cfg.max_clock_khz)
3841 		return DC_FAIL_CLK_EXCEED_MAX;
3842 
3843 	if (clk_khz < clock_cfg.min_clock_khz)
3844 		return DC_FAIL_CLK_BELOW_MIN;
3845 
3846 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3847 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3848 
3849 	/*update internal request clock for update clock use*/
3850 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3851 		current_clocks->dispclk_khz = clk_khz;
3852 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3853 		current_clocks->dppclk_khz = clk_khz;
3854 	else
3855 		return DC_ERROR_UNEXPECTED;
3856 
3857 	if (dc->clk_mgr->funcs->update_clocks)
3858 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3859 				context, true);
3860 	return DC_OK;
3861 
3862 }
3863 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3864 void dcn10_get_clock(struct dc *dc,
3865 			enum dc_clock_type clock_type,
3866 			struct dc_clock_config *clock_cfg)
3867 {
3868 	struct dc_state *context = dc->current_state;
3869 
3870 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3871 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3872 
3873 }
3874 
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3875 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3876 {
3877 	struct resource_pool *pool = dc->res_pool;
3878 	int i;
3879 
3880 	for (i = 0; i < pool->pipe_count; i++) {
3881 		struct hubp *hubp = pool->hubps[i];
3882 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3883 
3884 		hubp->funcs->hubp_read_state(hubp);
3885 
3886 		if (!s->blank_en)
3887 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3888 	}
3889 }
3890