1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 #include <linux/delay.h>
26 
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "dm_helpers.h"
30 #include "core_types.h"
31 #include "resource.h"
32 #include "dcn20_resource.h"
33 #include "dcn20_hwseq.h"
34 #include "dce/dce_hwseq.h"
35 #include "dcn20_dsc.h"
36 #include "dcn20_optc.h"
37 #include "abm.h"
38 #include "clk_mgr.h"
39 #include "dmcu.h"
40 #include "hubp.h"
41 #include "timing_generator.h"
42 #include "opp.h"
43 #include "ipp.h"
44 #include "mpc.h"
45 #include "mcif_wb.h"
46 #include "dchubbub.h"
47 #include "reg_helper.h"
48 #include "dcn10/dcn10_cm_common.h"
49 #include "dc_link_dp.h"
50 #include "vm_helper.h"
51 #include "dccg.h"
52 #include "dc_dmub_srv.h"
53 #include "dce/dmub_hw_lock_mgr.h"
54 
55 #define DC_LOGGER_INIT(logger)
56 
57 #define CTX \
58 	hws->ctx
59 #define REG(reg)\
60 	hws->regs->reg
61 
62 #undef FN
63 #define FN(reg_name, field_name) \
64 	hws->shifts->field_name, hws->masks->field_name
65 
66 static int find_free_gsl_group(const struct dc *dc)
67 {
68 	if (dc->res_pool->gsl_groups.gsl_0 == 0)
69 		return 1;
70 	if (dc->res_pool->gsl_groups.gsl_1 == 0)
71 		return 2;
72 	if (dc->res_pool->gsl_groups.gsl_2 == 0)
73 		return 3;
74 
75 	return 0;
76 }
77 
78 /* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock)
79  * This is only used to lock pipes in pipe splitting case with immediate flip
80  * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate,
81  * so we get tearing with freesync since we cannot flip multiple pipes
82  * atomically.
83  * We use GSL for this:
84  * - immediate flip: find first available GSL group if not already assigned
85  *                   program gsl with that group, set current OTG as master
86  *                   and always us 0x4 = AND of flip_ready from all pipes
87  * - vsync flip: disable GSL if used
88  *
89  * Groups in stream_res are stored as +1 from HW registers, i.e.
90  * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
91  * Using a magic value like -1 would require tracking all inits/resets
92  */
93 static void dcn20_setup_gsl_group_as_lock(
94 		const struct dc *dc,
95 		struct pipe_ctx *pipe_ctx,
96 		bool enable)
97 {
98 	struct gsl_params gsl;
99 	int group_idx;
100 
101 	memset(&gsl, 0, sizeof(struct gsl_params));
102 
103 	if (enable) {
104 		/* return if group already assigned since GSL was set up
105 		 * for vsync flip, we would unassign so it can't be "left over"
106 		 */
107 		if (pipe_ctx->stream_res.gsl_group > 0)
108 			return;
109 
110 		group_idx = find_free_gsl_group(dc);
111 		ASSERT(group_idx != 0);
112 		pipe_ctx->stream_res.gsl_group = group_idx;
113 
114 		/* set gsl group reg field and mark resource used */
115 		switch (group_idx) {
116 		case 1:
117 			gsl.gsl0_en = 1;
118 			dc->res_pool->gsl_groups.gsl_0 = 1;
119 			break;
120 		case 2:
121 			gsl.gsl1_en = 1;
122 			dc->res_pool->gsl_groups.gsl_1 = 1;
123 			break;
124 		case 3:
125 			gsl.gsl2_en = 1;
126 			dc->res_pool->gsl_groups.gsl_2 = 1;
127 			break;
128 		default:
129 			BREAK_TO_DEBUGGER();
130 			return; // invalid case
131 		}
132 		gsl.gsl_master_en = 1;
133 	} else {
134 		group_idx = pipe_ctx->stream_res.gsl_group;
135 		if (group_idx == 0)
136 			return; // if not in use, just return
137 
138 		pipe_ctx->stream_res.gsl_group = 0;
139 
140 		/* unset gsl group reg field and mark resource free */
141 		switch (group_idx) {
142 		case 1:
143 			gsl.gsl0_en = 0;
144 			dc->res_pool->gsl_groups.gsl_0 = 0;
145 			break;
146 		case 2:
147 			gsl.gsl1_en = 0;
148 			dc->res_pool->gsl_groups.gsl_1 = 0;
149 			break;
150 		case 3:
151 			gsl.gsl2_en = 0;
152 			dc->res_pool->gsl_groups.gsl_2 = 0;
153 			break;
154 		default:
155 			BREAK_TO_DEBUGGER();
156 			return;
157 		}
158 		gsl.gsl_master_en = 0;
159 	}
160 
161 	/* at this point we want to program whether it's to enable or disable */
162 	if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL &&
163 		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) {
164 		pipe_ctx->stream_res.tg->funcs->set_gsl(
165 			pipe_ctx->stream_res.tg,
166 			&gsl);
167 
168 		pipe_ctx->stream_res.tg->funcs->set_gsl_source_select(
169 			pipe_ctx->stream_res.tg, group_idx,	enable ? 4 : 0);
170 	} else
171 		BREAK_TO_DEBUGGER();
172 }
173 
174 void dcn20_set_flip_control_gsl(
175 		struct pipe_ctx *pipe_ctx,
176 		bool flip_immediate)
177 {
178 	if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl)
179 		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl(
180 				pipe_ctx->plane_res.hubp, flip_immediate);
181 
182 }
183 
184 void dcn20_enable_power_gating_plane(
185 	struct dce_hwseq *hws,
186 	bool enable)
187 {
188 	bool force_on = true; /* disable power gating */
189 
190 	if (enable)
191 		force_on = false;
192 
193 	/* DCHUBP0/1/2/3/4/5 */
194 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
195 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
196 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
197 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
198 	if (REG(DOMAIN8_PG_CONFIG))
199 		REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
200 	if (REG(DOMAIN10_PG_CONFIG))
201 		REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on);
202 
203 	/* DPP0/1/2/3/4/5 */
204 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
205 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
206 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
207 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
208 	if (REG(DOMAIN9_PG_CONFIG))
209 		REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
210 	if (REG(DOMAIN11_PG_CONFIG))
211 		REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on);
212 
213 	/* DCS0/1/2/3/4/5 */
214 	REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on);
215 	REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on);
216 	REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on);
217 	if (REG(DOMAIN19_PG_CONFIG))
218 		REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on);
219 	if (REG(DOMAIN20_PG_CONFIG))
220 		REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on);
221 	if (REG(DOMAIN21_PG_CONFIG))
222 		REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on);
223 }
224 
225 void dcn20_dccg_init(struct dce_hwseq *hws)
226 {
227 	/*
228 	 * set MICROSECOND_TIME_BASE_DIV
229 	 * 100Mhz refclk -> 0x120264
230 	 * 27Mhz refclk -> 0x12021b
231 	 * 48Mhz refclk -> 0x120230
232 	 *
233 	 */
234 	REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264);
235 
236 	/*
237 	 * set MILLISECOND_TIME_BASE_DIV
238 	 * 100Mhz refclk -> 0x1186a0
239 	 * 27Mhz refclk -> 0x106978
240 	 * 48Mhz refclk -> 0x10bb80
241 	 *
242 	 */
243 	REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0);
244 
245 	/* This value is dependent on the hardware pipeline delay so set once per SOC */
246 	REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c);
247 }
248 
249 void dcn20_disable_vga(
250 	struct dce_hwseq *hws)
251 {
252 	REG_WRITE(D1VGA_CONTROL, 0);
253 	REG_WRITE(D2VGA_CONTROL, 0);
254 	REG_WRITE(D3VGA_CONTROL, 0);
255 	REG_WRITE(D4VGA_CONTROL, 0);
256 	REG_WRITE(D5VGA_CONTROL, 0);
257 	REG_WRITE(D6VGA_CONTROL, 0);
258 }
259 
260 void dcn20_program_triple_buffer(
261 	const struct dc *dc,
262 	struct pipe_ctx *pipe_ctx,
263 	bool enable_triple_buffer)
264 {
265 	if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) {
266 		pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer(
267 			pipe_ctx->plane_res.hubp,
268 			enable_triple_buffer);
269 	}
270 }
271 
272 /* Blank pixel data during initialization */
273 void dcn20_init_blank(
274 		struct dc *dc,
275 		struct timing_generator *tg)
276 {
277 	struct dce_hwseq *hws = dc->hwseq;
278 	enum dc_color_space color_space;
279 	struct tg_color black_color = {0};
280 	struct output_pixel_processor *opp = NULL;
281 	struct output_pixel_processor *bottom_opp = NULL;
282 	uint32_t num_opps, opp_id_src0, opp_id_src1;
283 	uint32_t otg_active_width, otg_active_height;
284 
285 	/* program opp dpg blank color */
286 	color_space = COLOR_SPACE_SRGB;
287 	color_space_to_black_color(dc, color_space, &black_color);
288 
289 	/* get the OTG active size */
290 	tg->funcs->get_otg_active_size(tg,
291 			&otg_active_width,
292 			&otg_active_height);
293 
294 	/* get the OPTC source */
295 	tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
296 
297 	if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) {
298 		ASSERT(false);
299 		return;
300 	}
301 	opp = dc->res_pool->opps[opp_id_src0];
302 
303 	if (num_opps == 2) {
304 		otg_active_width = otg_active_width / 2;
305 
306 		if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) {
307 			ASSERT(false);
308 			return;
309 		}
310 		bottom_opp = dc->res_pool->opps[opp_id_src1];
311 	}
312 
313 	opp->funcs->opp_set_disp_pattern_generator(
314 			opp,
315 			CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
316 			CONTROLLER_DP_COLOR_SPACE_UDEFINED,
317 			COLOR_DEPTH_UNDEFINED,
318 			&black_color,
319 			otg_active_width,
320 			otg_active_height,
321 			0);
322 
323 	if (num_opps == 2) {
324 		bottom_opp->funcs->opp_set_disp_pattern_generator(
325 				bottom_opp,
326 				CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
327 				CONTROLLER_DP_COLOR_SPACE_UDEFINED,
328 				COLOR_DEPTH_UNDEFINED,
329 				&black_color,
330 				otg_active_width,
331 				otg_active_height,
332 				0);
333 	}
334 
335 	hws->funcs.wait_for_blank_complete(opp);
336 }
337 
338 void dcn20_dsc_pg_control(
339 		struct dce_hwseq *hws,
340 		unsigned int dsc_inst,
341 		bool power_on)
342 {
343 	uint32_t power_gate = power_on ? 0 : 1;
344 	uint32_t pwr_status = power_on ? 0 : 2;
345 	uint32_t org_ip_request_cntl = 0;
346 
347 	if (hws->ctx->dc->debug.disable_dsc_power_gate)
348 		return;
349 
350 	if (REG(DOMAIN16_PG_CONFIG) == 0)
351 		return;
352 
353 	REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
354 	if (org_ip_request_cntl == 0)
355 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
356 
357 	switch (dsc_inst) {
358 	case 0: /* DSC0 */
359 		REG_UPDATE(DOMAIN16_PG_CONFIG,
360 				DOMAIN16_POWER_GATE, power_gate);
361 
362 		REG_WAIT(DOMAIN16_PG_STATUS,
363 				DOMAIN16_PGFSM_PWR_STATUS, pwr_status,
364 				1, 1000);
365 		break;
366 	case 1: /* DSC1 */
367 		REG_UPDATE(DOMAIN17_PG_CONFIG,
368 				DOMAIN17_POWER_GATE, power_gate);
369 
370 		REG_WAIT(DOMAIN17_PG_STATUS,
371 				DOMAIN17_PGFSM_PWR_STATUS, pwr_status,
372 				1, 1000);
373 		break;
374 	case 2: /* DSC2 */
375 		REG_UPDATE(DOMAIN18_PG_CONFIG,
376 				DOMAIN18_POWER_GATE, power_gate);
377 
378 		REG_WAIT(DOMAIN18_PG_STATUS,
379 				DOMAIN18_PGFSM_PWR_STATUS, pwr_status,
380 				1, 1000);
381 		break;
382 	case 3: /* DSC3 */
383 		REG_UPDATE(DOMAIN19_PG_CONFIG,
384 				DOMAIN19_POWER_GATE, power_gate);
385 
386 		REG_WAIT(DOMAIN19_PG_STATUS,
387 				DOMAIN19_PGFSM_PWR_STATUS, pwr_status,
388 				1, 1000);
389 		break;
390 	case 4: /* DSC4 */
391 		REG_UPDATE(DOMAIN20_PG_CONFIG,
392 				DOMAIN20_POWER_GATE, power_gate);
393 
394 		REG_WAIT(DOMAIN20_PG_STATUS,
395 				DOMAIN20_PGFSM_PWR_STATUS, pwr_status,
396 				1, 1000);
397 		break;
398 	case 5: /* DSC5 */
399 		REG_UPDATE(DOMAIN21_PG_CONFIG,
400 				DOMAIN21_POWER_GATE, power_gate);
401 
402 		REG_WAIT(DOMAIN21_PG_STATUS,
403 				DOMAIN21_PGFSM_PWR_STATUS, pwr_status,
404 				1, 1000);
405 		break;
406 	default:
407 		BREAK_TO_DEBUGGER();
408 		break;
409 	}
410 
411 	if (org_ip_request_cntl == 0)
412 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
413 }
414 
415 void dcn20_dpp_pg_control(
416 		struct dce_hwseq *hws,
417 		unsigned int dpp_inst,
418 		bool power_on)
419 {
420 	uint32_t power_gate = power_on ? 0 : 1;
421 	uint32_t pwr_status = power_on ? 0 : 2;
422 
423 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
424 		return;
425 	if (REG(DOMAIN1_PG_CONFIG) == 0)
426 		return;
427 
428 	switch (dpp_inst) {
429 	case 0: /* DPP0 */
430 		REG_UPDATE(DOMAIN1_PG_CONFIG,
431 				DOMAIN1_POWER_GATE, power_gate);
432 
433 		REG_WAIT(DOMAIN1_PG_STATUS,
434 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
435 				1, 1000);
436 		break;
437 	case 1: /* DPP1 */
438 		REG_UPDATE(DOMAIN3_PG_CONFIG,
439 				DOMAIN3_POWER_GATE, power_gate);
440 
441 		REG_WAIT(DOMAIN3_PG_STATUS,
442 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
443 				1, 1000);
444 		break;
445 	case 2: /* DPP2 */
446 		REG_UPDATE(DOMAIN5_PG_CONFIG,
447 				DOMAIN5_POWER_GATE, power_gate);
448 
449 		REG_WAIT(DOMAIN5_PG_STATUS,
450 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
451 				1, 1000);
452 		break;
453 	case 3: /* DPP3 */
454 		REG_UPDATE(DOMAIN7_PG_CONFIG,
455 				DOMAIN7_POWER_GATE, power_gate);
456 
457 		REG_WAIT(DOMAIN7_PG_STATUS,
458 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
459 				1, 1000);
460 		break;
461 	case 4: /* DPP4 */
462 		REG_UPDATE(DOMAIN9_PG_CONFIG,
463 				DOMAIN9_POWER_GATE, power_gate);
464 
465 		REG_WAIT(DOMAIN9_PG_STATUS,
466 				DOMAIN9_PGFSM_PWR_STATUS, pwr_status,
467 				1, 1000);
468 		break;
469 	case 5: /* DPP5 */
470 		/*
471 		 * Do not power gate DPP5, should be left at HW default, power on permanently.
472 		 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard
473 		 * reset.
474 		 * REG_UPDATE(DOMAIN11_PG_CONFIG,
475 		 *		DOMAIN11_POWER_GATE, power_gate);
476 		 *
477 		 * REG_WAIT(DOMAIN11_PG_STATUS,
478 		 *		DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
479 		 * 		1, 1000);
480 		 */
481 		break;
482 	default:
483 		BREAK_TO_DEBUGGER();
484 		break;
485 	}
486 }
487 
488 
489 void dcn20_hubp_pg_control(
490 		struct dce_hwseq *hws,
491 		unsigned int hubp_inst,
492 		bool power_on)
493 {
494 	uint32_t power_gate = power_on ? 0 : 1;
495 	uint32_t pwr_status = power_on ? 0 : 2;
496 
497 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
498 		return;
499 	if (REG(DOMAIN0_PG_CONFIG) == 0)
500 		return;
501 
502 	switch (hubp_inst) {
503 	case 0: /* DCHUBP0 */
504 		REG_UPDATE(DOMAIN0_PG_CONFIG,
505 				DOMAIN0_POWER_GATE, power_gate);
506 
507 		REG_WAIT(DOMAIN0_PG_STATUS,
508 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
509 				1, 1000);
510 		break;
511 	case 1: /* DCHUBP1 */
512 		REG_UPDATE(DOMAIN2_PG_CONFIG,
513 				DOMAIN2_POWER_GATE, power_gate);
514 
515 		REG_WAIT(DOMAIN2_PG_STATUS,
516 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
517 				1, 1000);
518 		break;
519 	case 2: /* DCHUBP2 */
520 		REG_UPDATE(DOMAIN4_PG_CONFIG,
521 				DOMAIN4_POWER_GATE, power_gate);
522 
523 		REG_WAIT(DOMAIN4_PG_STATUS,
524 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
525 				1, 1000);
526 		break;
527 	case 3: /* DCHUBP3 */
528 		REG_UPDATE(DOMAIN6_PG_CONFIG,
529 				DOMAIN6_POWER_GATE, power_gate);
530 
531 		REG_WAIT(DOMAIN6_PG_STATUS,
532 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
533 				1, 1000);
534 		break;
535 	case 4: /* DCHUBP4 */
536 		REG_UPDATE(DOMAIN8_PG_CONFIG,
537 				DOMAIN8_POWER_GATE, power_gate);
538 
539 		REG_WAIT(DOMAIN8_PG_STATUS,
540 				DOMAIN8_PGFSM_PWR_STATUS, pwr_status,
541 				1, 1000);
542 		break;
543 	case 5: /* DCHUBP5 */
544 		/*
545 		 * Do not power gate DCHUB5, should be left at HW default, power on permanently.
546 		 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard
547 		 * reset.
548 		 * REG_UPDATE(DOMAIN10_PG_CONFIG,
549 		 *		DOMAIN10_POWER_GATE, power_gate);
550 		 *
551 		 * REG_WAIT(DOMAIN10_PG_STATUS,
552 		 *		DOMAIN10_PGFSM_PWR_STATUS, pwr_status,
553 		 *		1, 1000);
554 		 */
555 		break;
556 	default:
557 		BREAK_TO_DEBUGGER();
558 		break;
559 	}
560 }
561 
562 
563 /* disable HW used by plane.
564  * note:  cannot disable until disconnect is complete
565  */
566 void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
567 {
568 	struct dce_hwseq *hws = dc->hwseq;
569 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
570 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
571 
572 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
573 
574 	/* In flip immediate with pipe splitting case GSL is used for
575 	 * synchronization so we must disable it when the plane is disabled.
576 	 */
577 	if (pipe_ctx->stream_res.gsl_group != 0)
578 		dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
579 
580 	dc->hwss.set_flip_control_gsl(pipe_ctx, false);
581 
582 	hubp->funcs->hubp_clk_cntl(hubp, false);
583 
584 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
585 
586 	hubp->power_gated = true;
587 
588 	hws->funcs.plane_atomic_power_down(dc,
589 			pipe_ctx->plane_res.dpp,
590 			pipe_ctx->plane_res.hubp);
591 
592 	pipe_ctx->stream = NULL;
593 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
594 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
595 	pipe_ctx->top_pipe = NULL;
596 	pipe_ctx->bottom_pipe = NULL;
597 	pipe_ctx->plane_state = NULL;
598 }
599 
600 
601 void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
602 {
603 	DC_LOGGER_INIT(dc->ctx->logger);
604 
605 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
606 		return;
607 
608 	dcn20_plane_atomic_disable(dc, pipe_ctx);
609 
610 	DC_LOG_DC("Power down front end %d\n",
611 					pipe_ctx->pipe_idx);
612 }
613 
614 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
615 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
616 		int opp_cnt)
617 {
618 	bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
619 	int flow_ctrl_cnt;
620 
621 	if (opp_cnt >= 2)
622 		hblank_halved = true;
623 
624 	flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
625 			stream->timing.h_border_left -
626 			stream->timing.h_border_right;
627 
628 	if (hblank_halved)
629 		flow_ctrl_cnt /= 2;
630 
631 	/* ODM combine 4:1 case */
632 	if (opp_cnt == 4)
633 		flow_ctrl_cnt /= 2;
634 
635 	return flow_ctrl_cnt;
636 }
637 #endif
638 
639 enum dc_status dcn20_enable_stream_timing(
640 		struct pipe_ctx *pipe_ctx,
641 		struct dc_state *context,
642 		struct dc *dc)
643 {
644 	struct dce_hwseq *hws = dc->hwseq;
645 	struct dc_stream_state *stream = pipe_ctx->stream;
646 	struct drr_params params = {0};
647 	unsigned int event_triggers = 0;
648 	struct pipe_ctx *odm_pipe;
649 	int opp_cnt = 1;
650 	int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
651 
652 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
653 	bool interlace = stream->timing.flags.INTERLACE;
654 	int i;
655 
656 	struct mpc_dwb_flow_control flow_control;
657 	struct mpc *mpc = dc->res_pool->mpc;
658 	bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing));
659 
660 #endif
661 	/* by upper caller loop, pipe0 is parent pipe and be called first.
662 	 * back end is set up by for pipe0. Other children pipe share back end
663 	 * with pipe 0. No program is needed.
664 	 */
665 	if (pipe_ctx->top_pipe != NULL)
666 		return DC_OK;
667 
668 	/* TODO check if timing_changed, disable stream if timing changed */
669 
670 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
671 		opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
672 		opp_cnt++;
673 	}
674 
675 	if (opp_cnt > 1)
676 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
677 				pipe_ctx->stream_res.tg,
678 				opp_inst, opp_cnt,
679 				&pipe_ctx->stream->timing);
680 
681 	/* HW program guide assume display already disable
682 	 * by unplug sequence. OTG assume stop.
683 	 */
684 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
685 
686 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
687 			pipe_ctx->clock_source,
688 			&pipe_ctx->stream_res.pix_clk_params,
689 			&pipe_ctx->pll_settings)) {
690 		BREAK_TO_DEBUGGER();
691 		return DC_ERROR_UNEXPECTED;
692 	}
693 
694 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
695 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
696 
697 	pipe_ctx->stream_res.tg->funcs->program_timing(
698 			pipe_ctx->stream_res.tg,
699 			&stream->timing,
700 			pipe_ctx->pipe_dlg_param.vready_offset,
701 			pipe_ctx->pipe_dlg_param.vstartup_start,
702 			pipe_ctx->pipe_dlg_param.vupdate_offset,
703 			pipe_ctx->pipe_dlg_param.vupdate_width,
704 			pipe_ctx->stream->signal,
705 			true);
706 
707 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
708 	rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
709 	flow_control.flow_ctrl_mode = 0;
710 	flow_control.flow_ctrl_cnt0 = 0x80;
711 	flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt);
712 	if (mpc->funcs->set_out_rate_control) {
713 		for (i = 0; i < opp_cnt; ++i) {
714 			mpc->funcs->set_out_rate_control(
715 					mpc, opp_inst[i],
716 					true,
717 					rate_control_2x_pclk,
718 					&flow_control);
719 		}
720 	}
721 #endif
722 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
723 		odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
724 				odm_pipe->stream_res.opp,
725 				true);
726 
727 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
728 			pipe_ctx->stream_res.opp,
729 			true);
730 
731 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
732 
733 	/* VTG is  within DCHUB command block. DCFCLK is always on */
734 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
735 		BREAK_TO_DEBUGGER();
736 		return DC_ERROR_UNEXPECTED;
737 	}
738 
739 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
740 
741 	params.vertical_total_min = stream->adjust.v_total_min;
742 	params.vertical_total_max = stream->adjust.v_total_max;
743 	params.vertical_total_mid = stream->adjust.v_total_mid;
744 	params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
745 	if (pipe_ctx->stream_res.tg->funcs->set_drr)
746 		pipe_ctx->stream_res.tg->funcs->set_drr(
747 			pipe_ctx->stream_res.tg, &params);
748 
749 	// DRR should set trigger event to monitor surface update event
750 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
751 		event_triggers = 0x80;
752 	/* Event triggers and num frames initialized for DRR, but can be
753 	 * later updated for PSR use. Note DRR trigger events are generated
754 	 * regardless of whether num frames met.
755 	 */
756 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
757 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
758 				pipe_ctx->stream_res.tg, event_triggers, 2);
759 
760 	/* TODO program crtc source select for non-virtual signal*/
761 	/* TODO program FMT */
762 	/* TODO setup link_enc */
763 	/* TODO set stream attributes */
764 	/* TODO program audio */
765 	/* TODO enable stream if timing changed */
766 	/* TODO unblank stream if DP */
767 
768 	return DC_OK;
769 }
770 
771 void dcn20_program_output_csc(struct dc *dc,
772 		struct pipe_ctx *pipe_ctx,
773 		enum dc_color_space colorspace,
774 		uint16_t *matrix,
775 		int opp_id)
776 {
777 	struct mpc *mpc = dc->res_pool->mpc;
778 	enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
779 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
780 
781 	if (mpc->funcs->power_on_mpc_mem_pwr)
782 		mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
783 
784 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
785 		if (mpc->funcs->set_output_csc != NULL)
786 			mpc->funcs->set_output_csc(mpc,
787 					opp_id,
788 					matrix,
789 					ocsc_mode);
790 	} else {
791 		if (mpc->funcs->set_ocsc_default != NULL)
792 			mpc->funcs->set_ocsc_default(mpc,
793 					opp_id,
794 					colorspace,
795 					ocsc_mode);
796 	}
797 }
798 
799 bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
800 				const struct dc_stream_state *stream)
801 {
802 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
803 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
804 	struct pwl_params *params = NULL;
805 	/*
806 	 * program OGAM only for the top pipe
807 	 * if there is a pipe split then fix diagnostic is required:
808 	 * how to pass OGAM parameter for stream.
809 	 * if programming for all pipes is required then remove condition
810 	 * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic.
811 	 */
812 	if (mpc->funcs->power_on_mpc_mem_pwr)
813 		mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true);
814 	if (pipe_ctx->top_pipe == NULL
815 			&& mpc->funcs->set_output_gamma && stream->out_transfer_func) {
816 		if (stream->out_transfer_func->type == TF_TYPE_HWPWL)
817 			params = &stream->out_transfer_func->pwl;
818 		else if (pipe_ctx->stream->out_transfer_func->type ==
819 			TF_TYPE_DISTRIBUTED_POINTS &&
820 			cm_helper_translate_curve_to_hw_format(
821 			stream->out_transfer_func,
822 			&mpc->blender_params, false))
823 			params = &mpc->blender_params;
824 		/*
825 		 * there is no ROM
826 		 */
827 		if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
828 			BREAK_TO_DEBUGGER();
829 	}
830 	/*
831 	 * if above if is not executed then 'params' equal to 0 and set in bypass
832 	 */
833 	mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
834 
835 	return true;
836 }
837 
838 bool dcn20_set_blend_lut(
839 	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
840 {
841 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
842 	bool result = true;
843 	struct pwl_params *blend_lut = NULL;
844 
845 	if (plane_state->blend_tf) {
846 		if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
847 			blend_lut = &plane_state->blend_tf->pwl;
848 		else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
849 			cm_helper_translate_curve_to_hw_format(
850 					plane_state->blend_tf,
851 					&dpp_base->regamma_params, false);
852 			blend_lut = &dpp_base->regamma_params;
853 		}
854 	}
855 	result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut);
856 
857 	return result;
858 }
859 
860 bool dcn20_set_shaper_3dlut(
861 	struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
862 {
863 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
864 	bool result = true;
865 	struct pwl_params *shaper_lut = NULL;
866 
867 	if (plane_state->in_shaper_func) {
868 		if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL)
869 			shaper_lut = &plane_state->in_shaper_func->pwl;
870 		else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
871 			cm_helper_translate_curve_to_hw_format(
872 					plane_state->in_shaper_func,
873 					&dpp_base->shaper_params, true);
874 			shaper_lut = &dpp_base->shaper_params;
875 		}
876 	}
877 
878 	result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut);
879 	if (plane_state->lut3d_func &&
880 		plane_state->lut3d_func->state.bits.initialized == 1)
881 		result = dpp_base->funcs->dpp_program_3dlut(dpp_base,
882 								&plane_state->lut3d_func->lut_3d);
883 	else
884 		result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL);
885 
886 	return result;
887 }
888 
889 bool dcn20_set_input_transfer_func(struct dc *dc,
890 				struct pipe_ctx *pipe_ctx,
891 				const struct dc_plane_state *plane_state)
892 {
893 	struct dce_hwseq *hws = dc->hwseq;
894 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
895 	const struct dc_transfer_func *tf = NULL;
896 	bool result = true;
897 	bool use_degamma_ram = false;
898 
899 	if (dpp_base == NULL || plane_state == NULL)
900 		return false;
901 
902 	hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state);
903 	hws->funcs.set_blend_lut(pipe_ctx, plane_state);
904 
905 	if (plane_state->in_transfer_func)
906 		tf = plane_state->in_transfer_func;
907 
908 
909 	if (tf == NULL) {
910 		dpp_base->funcs->dpp_set_degamma(dpp_base,
911 				IPP_DEGAMMA_MODE_BYPASS);
912 		return true;
913 	}
914 
915 	if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS)
916 		use_degamma_ram = true;
917 
918 	if (use_degamma_ram == true) {
919 		if (tf->type == TF_TYPE_HWPWL)
920 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
921 					&tf->pwl);
922 		else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
923 			cm_helper_translate_curve_to_degamma_hw_format(tf,
924 					&dpp_base->degamma_params);
925 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
926 				&dpp_base->degamma_params);
927 		}
928 		return true;
929 	}
930 	/* handle here the optimized cases when de-gamma ROM could be used.
931 	 *
932 	 */
933 	if (tf->type == TF_TYPE_PREDEFINED) {
934 		switch (tf->tf) {
935 		case TRANSFER_FUNCTION_SRGB:
936 			dpp_base->funcs->dpp_set_degamma(dpp_base,
937 					IPP_DEGAMMA_MODE_HW_sRGB);
938 			break;
939 		case TRANSFER_FUNCTION_BT709:
940 			dpp_base->funcs->dpp_set_degamma(dpp_base,
941 					IPP_DEGAMMA_MODE_HW_xvYCC);
942 			break;
943 		case TRANSFER_FUNCTION_LINEAR:
944 			dpp_base->funcs->dpp_set_degamma(dpp_base,
945 					IPP_DEGAMMA_MODE_BYPASS);
946 			break;
947 		case TRANSFER_FUNCTION_PQ:
948 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
949 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
950 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
951 			result = true;
952 			break;
953 		default:
954 			result = false;
955 			break;
956 		}
957 	} else if (tf->type == TF_TYPE_BYPASS)
958 		dpp_base->funcs->dpp_set_degamma(dpp_base,
959 				IPP_DEGAMMA_MODE_BYPASS);
960 	else {
961 		/*
962 		 * if we are here, we did not handle correctly.
963 		 * fix is required for this use case
964 		 */
965 		BREAK_TO_DEBUGGER();
966 		dpp_base->funcs->dpp_set_degamma(dpp_base,
967 				IPP_DEGAMMA_MODE_BYPASS);
968 	}
969 
970 	return result;
971 }
972 
973 void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
974 {
975 	struct pipe_ctx *odm_pipe;
976 	int opp_cnt = 1;
977 	int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst };
978 
979 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
980 		opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst;
981 		opp_cnt++;
982 	}
983 
984 	if (opp_cnt > 1)
985 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
986 				pipe_ctx->stream_res.tg,
987 				opp_inst, opp_cnt,
988 				&pipe_ctx->stream->timing);
989 	else
990 		pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
991 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
992 }
993 
994 void dcn20_blank_pixel_data(
995 		struct dc *dc,
996 		struct pipe_ctx *pipe_ctx,
997 		bool blank)
998 {
999 	struct tg_color black_color = {0};
1000 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
1001 	struct dc_stream_state *stream = pipe_ctx->stream;
1002 	enum dc_color_space color_space = stream->output_color_space;
1003 	enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
1004 	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
1005 	struct pipe_ctx *odm_pipe;
1006 	int odm_cnt = 1;
1007 
1008 	int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
1009 	int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
1010 
1011 	if (stream->link->test_pattern_enabled)
1012 		return;
1013 
1014 	/* get opp dpg blank color */
1015 	color_space_to_black_color(dc, color_space, &black_color);
1016 
1017 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1018 		odm_cnt++;
1019 
1020 	width = width / odm_cnt;
1021 
1022 	if (blank) {
1023 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1024 
1025 		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
1026 			test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
1027 			test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
1028 		}
1029 	} else {
1030 		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
1031 	}
1032 
1033 	stream_res->opp->funcs->opp_set_disp_pattern_generator(
1034 			stream_res->opp,
1035 			test_pattern,
1036 			test_pattern_color_space,
1037 			stream->timing.display_color_depth,
1038 			&black_color,
1039 			width,
1040 			height,
1041 			0);
1042 
1043 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
1044 		odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
1045 				odm_pipe->stream_res.opp,
1046 				dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ?
1047 						CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern,
1048 				test_pattern_color_space,
1049 				stream->timing.display_color_depth,
1050 				&black_color,
1051 				width,
1052 				height,
1053 				0);
1054 	}
1055 
1056 	if (!blank)
1057 		if (stream_res->abm) {
1058 			dc->hwss.set_pipe(pipe_ctx);
1059 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
1060 		}
1061 }
1062 
1063 
1064 static void dcn20_power_on_plane(
1065 	struct dce_hwseq *hws,
1066 	struct pipe_ctx *pipe_ctx)
1067 {
1068 	DC_LOGGER_INIT(hws->ctx->logger);
1069 	if (REG(DC_IP_REQUEST_CNTL)) {
1070 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1071 				IP_REQUEST_EN, 1);
1072 		dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
1073 		dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
1074 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1075 				IP_REQUEST_EN, 0);
1076 		DC_LOG_DEBUG(
1077 				"Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst);
1078 	}
1079 }
1080 
1081 void dcn20_enable_plane(
1082 	struct dc *dc,
1083 	struct pipe_ctx *pipe_ctx,
1084 	struct dc_state *context)
1085 {
1086 	//if (dc->debug.sanity_checks) {
1087 	//	dcn10_verify_allow_pstate_change_high(dc);
1088 	//}
1089 	dcn20_power_on_plane(dc->hwseq, pipe_ctx);
1090 
1091 	/* enable DCFCLK current DCHUB */
1092 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1093 
1094 	/* initialize HUBP on power up */
1095 	pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
1096 
1097 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
1098 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1099 			pipe_ctx->stream_res.opp,
1100 			true);
1101 
1102 /* TODO: enable/disable in dm as per update type.
1103 	if (plane_state) {
1104 		DC_LOG_DC(dc->ctx->logger,
1105 				"Pipe:%d 0x%x: addr hi:0x%x, "
1106 				"addr low:0x%x, "
1107 				"src: %d, %d, %d,"
1108 				" %d; dst: %d, %d, %d, %d;\n",
1109 				pipe_ctx->pipe_idx,
1110 				plane_state,
1111 				plane_state->address.grph.addr.high_part,
1112 				plane_state->address.grph.addr.low_part,
1113 				plane_state->src_rect.x,
1114 				plane_state->src_rect.y,
1115 				plane_state->src_rect.width,
1116 				plane_state->src_rect.height,
1117 				plane_state->dst_rect.x,
1118 				plane_state->dst_rect.y,
1119 				plane_state->dst_rect.width,
1120 				plane_state->dst_rect.height);
1121 
1122 		DC_LOG_DC(dc->ctx->logger,
1123 				"Pipe %d: width, height, x, y         format:%d\n"
1124 				"viewport:%d, %d, %d, %d\n"
1125 				"recout:  %d, %d, %d, %d\n",
1126 				pipe_ctx->pipe_idx,
1127 				plane_state->format,
1128 				pipe_ctx->plane_res.scl_data.viewport.width,
1129 				pipe_ctx->plane_res.scl_data.viewport.height,
1130 				pipe_ctx->plane_res.scl_data.viewport.x,
1131 				pipe_ctx->plane_res.scl_data.viewport.y,
1132 				pipe_ctx->plane_res.scl_data.recout.width,
1133 				pipe_ctx->plane_res.scl_data.recout.height,
1134 				pipe_ctx->plane_res.scl_data.recout.x,
1135 				pipe_ctx->plane_res.scl_data.recout.y);
1136 		print_rq_dlg_ttu(dc, pipe_ctx);
1137 	}
1138 */
1139 	if (dc->vm_pa_config.valid) {
1140 		struct vm_system_aperture_param apt;
1141 
1142 		apt.sys_default.quad_part = 0;
1143 
1144 		apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
1145 		apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
1146 
1147 		// Program system aperture settings
1148 		pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
1149 	}
1150 
1151 //	if (dc->debug.sanity_checks) {
1152 //		dcn10_verify_allow_pstate_change_high(dc);
1153 //	}
1154 }
1155 
1156 void dcn20_pipe_control_lock(
1157 	struct dc *dc,
1158 	struct pipe_ctx *pipe,
1159 	bool lock)
1160 {
1161 	bool flip_immediate = false;
1162 
1163 	/* use TG master update lock to lock everything on the TG
1164 	 * therefore only top pipe need to lock
1165 	 */
1166 	if (!pipe || pipe->top_pipe)
1167 		return;
1168 
1169 	if (pipe->plane_state != NULL)
1170 		flip_immediate = pipe->plane_state->flip_immediate;
1171 
1172 	if (flip_immediate && lock) {
1173 		const int TIMEOUT_FOR_FLIP_PENDING = 100000;
1174 		int i;
1175 
1176 		for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
1177 			if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp))
1178 				break;
1179 			udelay(1);
1180 		}
1181 
1182 		if (pipe->bottom_pipe != NULL) {
1183 			for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) {
1184 				if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp))
1185 					break;
1186 				udelay(1);
1187 			}
1188 		}
1189 	}
1190 
1191 	/* In flip immediate and pipe splitting case, we need to use GSL
1192 	 * for synchronization. Only do setup on locking and on flip type change.
1193 	 */
1194 	if (lock && pipe->bottom_pipe != NULL)
1195 		if ((flip_immediate && pipe->stream_res.gsl_group == 0) ||
1196 		    (!flip_immediate && pipe->stream_res.gsl_group > 0))
1197 			dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate);
1198 
1199 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1200 		union dmub_hw_lock_flags hw_locks = { 0 };
1201 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1202 
1203 		hw_locks.bits.lock_pipe = 1;
1204 		inst_flags.otg_inst =  pipe->stream_res.tg->inst;
1205 
1206 		if (pipe->plane_state != NULL)
1207 			hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips;
1208 
1209 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1210 					lock,
1211 					&hw_locks,
1212 					&inst_flags);
1213 	} else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
1214 		if (lock)
1215 			pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
1216 		else
1217 			pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
1218 	} else {
1219 		if (lock)
1220 			pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1221 		else
1222 			pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1223 	}
1224 }
1225 
1226 static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
1227 {
1228 	new_pipe->update_flags.raw = 0;
1229 
1230 	/* Exit on unchanged, unused pipe */
1231 	if (!old_pipe->plane_state && !new_pipe->plane_state)
1232 		return;
1233 	/* Detect pipe enable/disable */
1234 	if (!old_pipe->plane_state && new_pipe->plane_state) {
1235 		new_pipe->update_flags.bits.enable = 1;
1236 		new_pipe->update_flags.bits.mpcc = 1;
1237 		new_pipe->update_flags.bits.dppclk = 1;
1238 		new_pipe->update_flags.bits.hubp_interdependent = 1;
1239 		new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
1240 		new_pipe->update_flags.bits.gamut_remap = 1;
1241 		new_pipe->update_flags.bits.scaler = 1;
1242 		new_pipe->update_flags.bits.viewport = 1;
1243 		if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
1244 			new_pipe->update_flags.bits.odm = 1;
1245 			new_pipe->update_flags.bits.global_sync = 1;
1246 		}
1247 		return;
1248 	}
1249 	if (old_pipe->plane_state && !new_pipe->plane_state) {
1250 		new_pipe->update_flags.bits.disable = 1;
1251 		return;
1252 	}
1253 
1254 	/* Detect plane change */
1255 	if (old_pipe->plane_state != new_pipe->plane_state) {
1256 		new_pipe->update_flags.bits.plane_changed = true;
1257 	}
1258 
1259 	/* Detect top pipe only changes */
1260 	if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
1261 		/* Detect odm changes */
1262 		if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe
1263 			&& old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx)
1264 				|| (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe)
1265 				|| (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe)
1266 				|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
1267 			new_pipe->update_flags.bits.odm = 1;
1268 
1269 		/* Detect global sync changes */
1270 		if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
1271 				|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
1272 				|| old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset
1273 				|| old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width)
1274 			new_pipe->update_flags.bits.global_sync = 1;
1275 	}
1276 
1277 	/*
1278 	 * Detect opp / tg change, only set on change, not on enable
1279 	 * Assume mpcc inst = pipe index, if not this code needs to be updated
1280 	 * since mpcc is what is affected by these. In fact all of our sequence
1281 	 * makes this assumption at the moment with how hubp reset is matched to
1282 	 * same index mpcc reset.
1283 	 */
1284 	if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
1285 		new_pipe->update_flags.bits.opp_changed = 1;
1286 	if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
1287 		new_pipe->update_flags.bits.tg_changed = 1;
1288 
1289 	/*
1290 	 * Detect mpcc blending changes, only dpp inst and opp matter here,
1291 	 * mpccs getting removed/inserted update connected ones during their own
1292 	 * programming
1293 	 */
1294 	if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
1295 			|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
1296 		new_pipe->update_flags.bits.mpcc = 1;
1297 
1298 	/* Detect dppclk change */
1299 	if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
1300 		new_pipe->update_flags.bits.dppclk = 1;
1301 
1302 	/* Check for scl update */
1303 	if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
1304 			new_pipe->update_flags.bits.scaler = 1;
1305 	/* Check for vp update */
1306 	if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
1307 			|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
1308 				&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
1309 		new_pipe->update_flags.bits.viewport = 1;
1310 
1311 	/* Detect dlg/ttu/rq updates */
1312 	{
1313 		struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs;
1314 		struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs;
1315 		struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs;
1316 		struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs;
1317 
1318 		/* Detect pipe interdependent updates */
1319 		if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch ||
1320 				old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch ||
1321 				old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c ||
1322 				old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank ||
1323 				old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank ||
1324 				old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip ||
1325 				old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip ||
1326 				old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l ||
1327 				old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c ||
1328 				old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l ||
1329 				old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l ||
1330 				old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c ||
1331 				old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l ||
1332 				old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c ||
1333 				old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 ||
1334 				old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 ||
1335 				old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank ||
1336 				old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) {
1337 			old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch;
1338 			old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch;
1339 			old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c;
1340 			old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank;
1341 			old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank;
1342 			old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip;
1343 			old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip;
1344 			old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l;
1345 			old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c;
1346 			old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l;
1347 			old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l;
1348 			old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c;
1349 			old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l;
1350 			old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c;
1351 			old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0;
1352 			old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1;
1353 			old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank;
1354 			old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip;
1355 			new_pipe->update_flags.bits.hubp_interdependent = 1;
1356 		}
1357 		/* Detect any other updates to ttu/rq/dlg */
1358 		if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) ||
1359 				memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) ||
1360 				memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs)))
1361 			new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
1362 	}
1363 }
1364 
1365 static void dcn20_update_dchubp_dpp(
1366 	struct dc *dc,
1367 	struct pipe_ctx *pipe_ctx,
1368 	struct dc_state *context)
1369 {
1370 	struct dce_hwseq *hws = dc->hwseq;
1371 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1372 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1373 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1374 	bool viewport_changed = false;
1375 
1376 	if (pipe_ctx->update_flags.bits.dppclk)
1377 		dpp->funcs->dpp_dppclk_control(dpp, false, true);
1378 
1379 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
1380 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
1381 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
1382 	 */
1383 	if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
1384 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
1385 
1386 		hubp->funcs->hubp_setup(
1387 			hubp,
1388 			&pipe_ctx->dlg_regs,
1389 			&pipe_ctx->ttu_regs,
1390 			&pipe_ctx->rq_regs,
1391 			&pipe_ctx->pipe_dlg_param);
1392 	}
1393 	if (pipe_ctx->update_flags.bits.hubp_interdependent)
1394 		hubp->funcs->hubp_setup_interdependent(
1395 			hubp,
1396 			&pipe_ctx->dlg_regs,
1397 			&pipe_ctx->ttu_regs);
1398 
1399 	if (pipe_ctx->update_flags.bits.enable ||
1400 			pipe_ctx->update_flags.bits.plane_changed ||
1401 			plane_state->update_flags.bits.bpp_change ||
1402 			plane_state->update_flags.bits.input_csc_change ||
1403 			plane_state->update_flags.bits.color_space_change ||
1404 			plane_state->update_flags.bits.coeff_reduction_change) {
1405 		struct dc_bias_and_scale bns_params = {0};
1406 
1407 		// program the input csc
1408 		dpp->funcs->dpp_setup(dpp,
1409 				plane_state->format,
1410 				EXPANSION_MODE_ZERO,
1411 				plane_state->input_csc_color_matrix,
1412 				plane_state->color_space,
1413 				NULL);
1414 
1415 		if (dpp->funcs->dpp_program_bias_and_scale) {
1416 			//TODO :for CNVC set scale and bias registers if necessary
1417 			build_prescale_params(&bns_params, plane_state);
1418 			dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
1419 		}
1420 	}
1421 
1422 	if (pipe_ctx->update_flags.bits.mpcc
1423 			|| pipe_ctx->update_flags.bits.plane_changed
1424 			|| plane_state->update_flags.bits.global_alpha_change
1425 			|| plane_state->update_flags.bits.per_pixel_alpha_change) {
1426 		// MPCC inst is equal to pipe index in practice
1427 		int mpcc_inst = hubp->inst;
1428 		int opp_inst;
1429 		int opp_count = dc->res_pool->pipe_count;
1430 
1431 		for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
1432 			if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
1433 				dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
1434 				dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
1435 				break;
1436 			}
1437 		}
1438 		hws->funcs.update_mpcc(dc, pipe_ctx);
1439 	}
1440 
1441 	if (pipe_ctx->update_flags.bits.scaler ||
1442 			plane_state->update_flags.bits.scaling_change ||
1443 			plane_state->update_flags.bits.position_change ||
1444 			plane_state->update_flags.bits.per_pixel_alpha_change ||
1445 			pipe_ctx->stream->update_flags.bits.scaling) {
1446 		pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
1447 		ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
1448 		/* scaler configuration */
1449 		pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
1450 				pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
1451 	}
1452 
1453 	if (pipe_ctx->update_flags.bits.viewport ||
1454 			(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
1455 			(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
1456 			(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
1457 
1458 		hubp->funcs->mem_program_viewport(
1459 			hubp,
1460 			&pipe_ctx->plane_res.scl_data.viewport,
1461 			&pipe_ctx->plane_res.scl_data.viewport_c);
1462 		viewport_changed = true;
1463 	}
1464 
1465 	/* Any updates are handled in dc interface, just need to apply existing for plane enable */
1466 	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
1467 			pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
1468 			pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
1469 		dc->hwss.set_cursor_position(pipe_ctx);
1470 		dc->hwss.set_cursor_attribute(pipe_ctx);
1471 
1472 		if (dc->hwss.set_cursor_sdr_white_level)
1473 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
1474 	}
1475 
1476 	/* Any updates are handled in dc interface, just need
1477 	 * to apply existing for plane enable / opp change */
1478 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
1479 			|| pipe_ctx->stream->update_flags.bits.gamut_remap
1480 			|| pipe_ctx->stream->update_flags.bits.out_csc) {
1481 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1482 		struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
1483 
1484 		if (mpc->funcs->set_gamut_remap) {
1485 			int i;
1486 			int mpcc_id = hubp->inst;
1487 			struct mpc_grph_gamut_adjustment adjust;
1488 			bool enable_remap_dpp = false;
1489 
1490 			memset(&adjust, 0, sizeof(adjust));
1491 			adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1492 
1493 			/* save the enablement of gamut remap for dpp */
1494 			enable_remap_dpp = pipe_ctx->stream->gamut_remap_matrix.enable_remap;
1495 
1496 			/* force bypass gamut remap for dpp/cm */
1497 			pipe_ctx->stream->gamut_remap_matrix.enable_remap = false;
1498 			dc->hwss.program_gamut_remap(pipe_ctx);
1499 
1500 			/* restore gamut remap flag and use this remap into mpc */
1501 			pipe_ctx->stream->gamut_remap_matrix.enable_remap = enable_remap_dpp;
1502 
1503 			/* build remap matrix for top plane if enabled */
1504 			if (enable_remap_dpp && pipe_ctx->top_pipe == NULL) {
1505 					adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1506 					for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1507 						adjust.temperature_matrix[i] =
1508 								pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1509 			}
1510 			mpc->funcs->set_gamut_remap(mpc, mpcc_id, &adjust);
1511 		} else
1512 #endif
1513 			/* dpp/cm gamut remap*/
1514 			dc->hwss.program_gamut_remap(pipe_ctx);
1515 
1516 		/*call the dcn2 method which uses mpc csc*/
1517 		dc->hwss.program_output_csc(dc,
1518 				pipe_ctx,
1519 				pipe_ctx->stream->output_color_space,
1520 				pipe_ctx->stream->csc_color_matrix.matrix,
1521 				hubp->opp_id);
1522 	}
1523 
1524 	if (pipe_ctx->update_flags.bits.enable ||
1525 			pipe_ctx->update_flags.bits.plane_changed ||
1526 			pipe_ctx->update_flags.bits.opp_changed ||
1527 			plane_state->update_flags.bits.pixel_format_change ||
1528 			plane_state->update_flags.bits.horizontal_mirror_change ||
1529 			plane_state->update_flags.bits.rotation_change ||
1530 			plane_state->update_flags.bits.swizzle_change ||
1531 			plane_state->update_flags.bits.dcc_change ||
1532 			plane_state->update_flags.bits.bpp_change ||
1533 			plane_state->update_flags.bits.scaling_change ||
1534 			plane_state->update_flags.bits.plane_size_change) {
1535 		struct plane_size size = plane_state->plane_size;
1536 
1537 		size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
1538 		hubp->funcs->hubp_program_surface_config(
1539 			hubp,
1540 			plane_state->format,
1541 			&plane_state->tiling_info,
1542 			&size,
1543 			plane_state->rotation,
1544 			&plane_state->dcc,
1545 			plane_state->horizontal_mirror,
1546 			0);
1547 		hubp->power_gated = false;
1548 	}
1549 
1550 	if (pipe_ctx->update_flags.bits.enable ||
1551 		pipe_ctx->update_flags.bits.plane_changed ||
1552 		plane_state->update_flags.bits.addr_update)
1553 		hws->funcs.update_plane_addr(dc, pipe_ctx);
1554 
1555 
1556 
1557 	if (pipe_ctx->update_flags.bits.enable)
1558 		hubp->funcs->set_blank(hubp, false);
1559 }
1560 
1561 
1562 static void dcn20_program_pipe(
1563 		struct dc *dc,
1564 		struct pipe_ctx *pipe_ctx,
1565 		struct dc_state *context)
1566 {
1567 	struct dce_hwseq *hws = dc->hwseq;
1568 	/* Only need to unblank on top pipe */
1569 	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
1570 			&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
1571 		hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
1572 
1573 	if (pipe_ctx->update_flags.bits.global_sync) {
1574 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
1575 				pipe_ctx->stream_res.tg,
1576 				pipe_ctx->pipe_dlg_param.vready_offset,
1577 				pipe_ctx->pipe_dlg_param.vstartup_start,
1578 				pipe_ctx->pipe_dlg_param.vupdate_offset,
1579 				pipe_ctx->pipe_dlg_param.vupdate_width);
1580 
1581 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
1582 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1583 
1584 		if (hws->funcs.setup_vupdate_interrupt)
1585 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
1586 	}
1587 
1588 	if (pipe_ctx->update_flags.bits.odm)
1589 		hws->funcs.update_odm(dc, context, pipe_ctx);
1590 
1591 	if (pipe_ctx->update_flags.bits.enable) {
1592 		dcn20_enable_plane(dc, pipe_ctx, context);
1593 		if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
1594 			dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
1595 	}
1596 
1597 	if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
1598 		dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
1599 
1600 	if (pipe_ctx->update_flags.bits.enable
1601 			|| pipe_ctx->plane_state->update_flags.bits.hdr_mult)
1602 		hws->funcs.set_hdr_multiplier(pipe_ctx);
1603 
1604 	if (pipe_ctx->update_flags.bits.enable ||
1605 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
1606 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
1607 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
1608 
1609 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
1610 	 * only do gamma programming for powering on, internal memcmp to avoid
1611 	 * updating on slave planes
1612 	 */
1613 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf)
1614 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
1615 
1616 	/* If the pipe has been enabled or has a different opp, we
1617 	 * should reprogram the fmt. This deals with cases where
1618 	 * interation between mpc and odm combine on different streams
1619 	 * causes a different pipe to be chosen to odm combine with.
1620 	 */
1621 	if (pipe_ctx->update_flags.bits.enable
1622 	    || pipe_ctx->update_flags.bits.opp_changed) {
1623 
1624 		pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
1625 			pipe_ctx->stream_res.opp,
1626 			COLOR_SPACE_YCBCR601,
1627 			pipe_ctx->stream->timing.display_color_depth,
1628 			pipe_ctx->stream->signal);
1629 
1630 		pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
1631 			pipe_ctx->stream_res.opp,
1632 			&pipe_ctx->stream->bit_depth_params,
1633 			&pipe_ctx->stream->clamping);
1634 	}
1635 }
1636 
1637 void dcn20_program_front_end_for_ctx(
1638 		struct dc *dc,
1639 		struct dc_state *context)
1640 {
1641 	int i;
1642 	struct dce_hwseq *hws = dc->hwseq;
1643 	DC_LOGGER_INIT(dc->ctx->logger);
1644 
1645 	/* Carry over GSL groups in case the context is changing. */
1646        for (i = 0; i < dc->res_pool->pipe_count; i++) {
1647                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1648                struct pipe_ctx *old_pipe_ctx =
1649                        &dc->current_state->res_ctx.pipe_ctx[i];
1650 
1651                if (pipe_ctx->stream == old_pipe_ctx->stream)
1652                        pipe_ctx->stream_res.gsl_group =
1653                                old_pipe_ctx->stream_res.gsl_group;
1654        }
1655 
1656 	if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
1657 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1658 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1659 
1660 			if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
1661 				ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
1662 				/*turn off triple buffer for full update*/
1663 				dc->hwss.program_triplebuffer(
1664 						dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
1665 			}
1666 		}
1667 	}
1668 
1669 	/* Set pipe update flags and lock pipes */
1670 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1671 		dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
1672 				&context->res_ctx.pipe_ctx[i]);
1673 
1674 	/* OTG blank before disabling all front ends */
1675 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1676 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
1677 				&& !context->res_ctx.pipe_ctx[i].top_pipe
1678 				&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
1679 				&& context->res_ctx.pipe_ctx[i].stream)
1680 			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
1681 
1682 	/* Disconnect mpcc */
1683 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1684 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
1685 				|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
1686 			hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
1687 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
1688 		}
1689 
1690 	/*
1691 	 * Program all updated pipes, order matters for mpcc setup. Start with
1692 	 * top pipe and program all pipes that follow in order
1693 	 */
1694 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1695 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1696 
1697 		if (pipe->plane_state && !pipe->top_pipe) {
1698 			while (pipe) {
1699 				dcn20_program_pipe(dc, pipe, context);
1700 				pipe = pipe->bottom_pipe;
1701 			}
1702 			/* Program secondary blending tree and writeback pipes */
1703 			pipe = &context->res_ctx.pipe_ctx[i];
1704 			if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0
1705 					&& (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw)
1706 					&& hws->funcs.program_all_writeback_pipes_in_tree)
1707 				hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
1708 		}
1709 	}
1710 }
1711 
1712 void dcn20_post_unlock_program_front_end(
1713 		struct dc *dc,
1714 		struct dc_state *context)
1715 {
1716 	int i;
1717 	const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
1718 	struct dce_hwseq *hwseq = dc->hwseq;
1719 
1720 	DC_LOGGER_INIT(dc->ctx->logger);
1721 
1722 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1723 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
1724 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
1725 
1726 	/*
1727 	 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
1728 	 * part of the enable operation otherwise, DM may request an immediate flip which
1729 	 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
1730 	 * is unsupported on DCN.
1731 	 */
1732 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1733 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1734 
1735 		if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) {
1736 			struct hubp *hubp = pipe->plane_res.hubp;
1737 			int j = 0;
1738 
1739 			for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
1740 					&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
1741 				mdelay(1);
1742 		}
1743 	}
1744 
1745 	/* WA to apply WM setting*/
1746 	if (hwseq->wa.DEGVIDCN21)
1747 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
1748 
1749 
1750 	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
1751 	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
1752 
1753 		if (dc->current_state->stream_status[0].plane_count == 1 &&
1754 				context->stream_status[0].plane_count > 1) {
1755 
1756 			struct timing_generator *tg = dc->res_pool->timing_generators[0];
1757 
1758 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
1759 
1760 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
1761 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
1762 		}
1763 	}
1764 }
1765 
1766 void dcn20_prepare_bandwidth(
1767 		struct dc *dc,
1768 		struct dc_state *context)
1769 {
1770 	struct hubbub *hubbub = dc->res_pool->hubbub;
1771 
1772 	dc->clk_mgr->funcs->update_clocks(
1773 			dc->clk_mgr,
1774 			context,
1775 			false);
1776 
1777 	/* program dchubbub watermarks */
1778 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
1779 					&context->bw_ctx.bw.dcn.watermarks,
1780 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1781 					false);
1782 }
1783 
1784 void dcn20_optimize_bandwidth(
1785 		struct dc *dc,
1786 		struct dc_state *context)
1787 {
1788 	struct hubbub *hubbub = dc->res_pool->hubbub;
1789 
1790 	/* program dchubbub watermarks */
1791 	hubbub->funcs->program_watermarks(hubbub,
1792 					&context->bw_ctx.bw.dcn.watermarks,
1793 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1794 					true);
1795 
1796 	dc->clk_mgr->funcs->update_clocks(
1797 			dc->clk_mgr,
1798 			context,
1799 			true);
1800 }
1801 
1802 bool dcn20_update_bandwidth(
1803 		struct dc *dc,
1804 		struct dc_state *context)
1805 {
1806 	int i;
1807 	struct dce_hwseq *hws = dc->hwseq;
1808 
1809 	/* recalculate DML parameters */
1810 	if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false))
1811 		return false;
1812 
1813 	/* apply updated bandwidth parameters */
1814 	dc->hwss.prepare_bandwidth(dc, context);
1815 
1816 	/* update hubp configs for all pipes */
1817 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1818 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1819 
1820 		if (pipe_ctx->plane_state == NULL)
1821 			continue;
1822 
1823 		if (pipe_ctx->top_pipe == NULL) {
1824 			bool blank = !is_pipe_tree_visible(pipe_ctx);
1825 
1826 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
1827 					pipe_ctx->stream_res.tg,
1828 					pipe_ctx->pipe_dlg_param.vready_offset,
1829 					pipe_ctx->pipe_dlg_param.vstartup_start,
1830 					pipe_ctx->pipe_dlg_param.vupdate_offset,
1831 					pipe_ctx->pipe_dlg_param.vupdate_width);
1832 
1833 			pipe_ctx->stream_res.tg->funcs->set_vtg_params(
1834 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1835 
1836 			if (pipe_ctx->prev_odm_pipe == NULL)
1837 				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
1838 
1839 			if (hws->funcs.setup_vupdate_interrupt)
1840 				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
1841 		}
1842 
1843 		pipe_ctx->plane_res.hubp->funcs->hubp_setup(
1844 				pipe_ctx->plane_res.hubp,
1845 					&pipe_ctx->dlg_regs,
1846 					&pipe_ctx->ttu_regs,
1847 					&pipe_ctx->rq_regs,
1848 					&pipe_ctx->pipe_dlg_param);
1849 	}
1850 
1851 	return true;
1852 }
1853 
1854 void dcn20_enable_writeback(
1855 		struct dc *dc,
1856 		struct dc_writeback_info *wb_info,
1857 		struct dc_state *context)
1858 {
1859 	struct dwbc *dwb;
1860 	struct mcif_wb *mcif_wb;
1861 	struct timing_generator *optc;
1862 
1863 	ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES);
1864 	ASSERT(wb_info->wb_enabled);
1865 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
1866 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
1867 
1868 	/* set the OPTC source mux */
1869 	optc = dc->res_pool->timing_generators[dwb->otg_inst];
1870 	optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst);
1871 	/* set MCIF_WB buffer and arbitration configuration */
1872 	mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
1873 	mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
1874 	/* Enable MCIF_WB */
1875 	mcif_wb->funcs->enable_mcif(mcif_wb);
1876 	/* Enable DWB */
1877 	dwb->funcs->enable(dwb, &wb_info->dwb_params);
1878 	/* TODO: add sequence to enable/disable warmup */
1879 }
1880 
1881 void dcn20_disable_writeback(
1882 		struct dc *dc,
1883 		unsigned int dwb_pipe_inst)
1884 {
1885 	struct dwbc *dwb;
1886 	struct mcif_wb *mcif_wb;
1887 
1888 	ASSERT(dwb_pipe_inst < MAX_DWB_PIPES);
1889 	dwb = dc->res_pool->dwbc[dwb_pipe_inst];
1890 	mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst];
1891 
1892 	dwb->funcs->disable(dwb);
1893 	mcif_wb->funcs->disable_mcif(mcif_wb);
1894 }
1895 
1896 bool dcn20_wait_for_blank_complete(
1897 		struct output_pixel_processor *opp)
1898 {
1899 	int counter;
1900 
1901 	for (counter = 0; counter < 1000; counter++) {
1902 		if (opp->funcs->dpg_is_blanked(opp))
1903 			break;
1904 
1905 		udelay(100);
1906 	}
1907 
1908 	if (counter == 1000) {
1909 		dm_error("DC: failed to blank crtc!\n");
1910 		return false;
1911 	}
1912 
1913 	return true;
1914 }
1915 
1916 bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
1917 {
1918 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1919 
1920 	if (!hubp)
1921 		return false;
1922 	return hubp->funcs->dmdata_status_done(hubp);
1923 }
1924 
1925 void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
1926 {
1927 	struct dce_hwseq *hws = dc->hwseq;
1928 
1929 	if (pipe_ctx->stream_res.dsc) {
1930 		struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
1931 
1932 		hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true);
1933 		while (odm_pipe) {
1934 			hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true);
1935 			odm_pipe = odm_pipe->next_odm_pipe;
1936 		}
1937 	}
1938 }
1939 
1940 void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx)
1941 {
1942 	struct dce_hwseq *hws = dc->hwseq;
1943 
1944 	if (pipe_ctx->stream_res.dsc) {
1945 		struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
1946 
1947 		hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false);
1948 		while (odm_pipe) {
1949 			hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false);
1950 			odm_pipe = odm_pipe->next_odm_pipe;
1951 		}
1952 	}
1953 }
1954 
1955 void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
1956 {
1957 	struct dc_dmdata_attributes attr = { 0 };
1958 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1959 
1960 	attr.dmdata_mode = DMDATA_HW_MODE;
1961 	attr.dmdata_size =
1962 		dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36;
1963 	attr.address.quad_part =
1964 			pipe_ctx->stream->dmdata_address.quad_part;
1965 	attr.dmdata_dl_delta = 0;
1966 	attr.dmdata_qos_mode = 0;
1967 	attr.dmdata_qos_level = 0;
1968 	attr.dmdata_repeat = 1; /* always repeat */
1969 	attr.dmdata_updated = 1;
1970 	attr.dmdata_sw_data = NULL;
1971 
1972 	hubp->funcs->dmdata_set_attributes(hubp, &attr);
1973 }
1974 
1975 void dcn20_init_vm_ctx(
1976 		struct dce_hwseq *hws,
1977 		struct dc *dc,
1978 		struct dc_virtual_addr_space_config *va_config,
1979 		int vmid)
1980 {
1981 	struct dcn_hubbub_virt_addr_config config;
1982 
1983 	if (vmid == 0) {
1984 		ASSERT(0); /* VMID cannot be 0 for vm context */
1985 		return;
1986 	}
1987 
1988 	config.page_table_start_addr = va_config->page_table_start_addr;
1989 	config.page_table_end_addr = va_config->page_table_end_addr;
1990 	config.page_table_block_size = va_config->page_table_block_size_in_bytes;
1991 	config.page_table_depth = va_config->page_table_depth;
1992 	config.page_table_base_addr = va_config->page_table_base_addr;
1993 
1994 	dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid);
1995 }
1996 
1997 int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config)
1998 {
1999 	struct dcn_hubbub_phys_addr_config config;
2000 
2001 	config.system_aperture.fb_top = pa_config->system_aperture.fb_top;
2002 	config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset;
2003 	config.system_aperture.fb_base = pa_config->system_aperture.fb_base;
2004 	config.system_aperture.agp_top = pa_config->system_aperture.agp_top;
2005 	config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot;
2006 	config.system_aperture.agp_base = pa_config->system_aperture.agp_base;
2007 	config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr;
2008 	config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr;
2009 	config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
2010 	config.page_table_default_page_addr = pa_config->page_table_default_page_addr;
2011 
2012 	return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config);
2013 }
2014 
2015 static bool patch_address_for_sbs_tb_stereo(
2016 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
2017 {
2018 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2019 	bool sec_split = pipe_ctx->top_pipe &&
2020 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
2021 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2022 			(pipe_ctx->stream->timing.timing_3d_format ==
2023 			TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2024 			pipe_ctx->stream->timing.timing_3d_format ==
2025 			TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
2026 		*addr = plane_state->address.grph_stereo.left_addr;
2027 		plane_state->address.grph_stereo.left_addr =
2028 				plane_state->address.grph_stereo.right_addr;
2029 		return true;
2030 	}
2031 
2032 	if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
2033 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
2034 		plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
2035 		plane_state->address.grph_stereo.right_addr =
2036 				plane_state->address.grph_stereo.left_addr;
2037 	}
2038 	return false;
2039 }
2040 
2041 void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
2042 {
2043 	bool addr_patched = false;
2044 	PHYSICAL_ADDRESS_LOC addr;
2045 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2046 
2047 	if (plane_state == NULL)
2048 		return;
2049 
2050 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
2051 
2052 	// Call Helper to track VMID use
2053 	vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst);
2054 
2055 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
2056 			pipe_ctx->plane_res.hubp,
2057 			&plane_state->address,
2058 			plane_state->flip_immediate);
2059 
2060 	plane_state->status.requested_address = plane_state->address;
2061 
2062 	if (plane_state->flip_immediate)
2063 		plane_state->status.current_address = plane_state->address;
2064 
2065 	if (addr_patched)
2066 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
2067 }
2068 
2069 void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
2070 		struct dc_link_settings *link_settings)
2071 {
2072 	struct encoder_unblank_param params = { { 0 } };
2073 	struct dc_stream_state *stream = pipe_ctx->stream;
2074 	struct dc_link *link = stream->link;
2075 	struct dce_hwseq *hws = link->dc->hwseq;
2076 	struct pipe_ctx *odm_pipe;
2077 
2078 	params.opp_cnt = 1;
2079 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
2080 		params.opp_cnt++;
2081 	}
2082 	/* only 3 items below are used by unblank */
2083 	params.timing = pipe_ctx->stream->timing;
2084 
2085 	params.link_settings.link_rate = link_settings->link_rate;
2086 
2087 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
2088 		if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
2089 			params.timing.pix_clk_100hz /= 2;
2090 		pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
2091 				pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
2092 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
2093 	}
2094 
2095 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
2096 		hws->funcs.edp_backlight_control(link, true);
2097 	}
2098 }
2099 
2100 void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
2101 {
2102 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
2103 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
2104 
2105 	if (start_line < 0)
2106 		start_line = 0;
2107 
2108 	if (tg->funcs->setup_vertical_interrupt2)
2109 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
2110 }
2111 
2112 static void dcn20_reset_back_end_for_pipe(
2113 		struct dc *dc,
2114 		struct pipe_ctx *pipe_ctx,
2115 		struct dc_state *context)
2116 {
2117 	int i;
2118 	struct dc_link *link;
2119 	DC_LOGGER_INIT(dc->ctx->logger);
2120 	if (pipe_ctx->stream_res.stream_enc == NULL) {
2121 		pipe_ctx->stream = NULL;
2122 		return;
2123 	}
2124 
2125 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2126 		link = pipe_ctx->stream->link;
2127 		/* DPMS may already disable or */
2128 		/* dpms_off status is incorrect due to fastboot
2129 		 * feature. When system resume from S4 with second
2130 		 * screen only, the dpms_off would be true but
2131 		 * VBIOS lit up eDP, so check link status too.
2132 		 */
2133 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
2134 			core_link_disable_stream(pipe_ctx);
2135 		else if (pipe_ctx->stream_res.audio)
2136 			dc->hwss.disable_audio_stream(pipe_ctx);
2137 
2138 		/* free acquired resources */
2139 		if (pipe_ctx->stream_res.audio) {
2140 			/*disable az_endpoint*/
2141 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2142 
2143 			/*free audio*/
2144 			if (dc->caps.dynamic_audio == true) {
2145 				/*we have to dynamic arbitrate the audio endpoints*/
2146 				/*we free the resource, need reset is_audio_acquired*/
2147 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
2148 						pipe_ctx->stream_res.audio, false);
2149 				pipe_ctx->stream_res.audio = NULL;
2150 			}
2151 		}
2152 	}
2153 	else if (pipe_ctx->stream_res.dsc) {
2154 		dp_set_dsc_enable(pipe_ctx, false);
2155 	}
2156 
2157 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
2158 	 * back end share by all pipes and will be disable only when disable
2159 	 * parent pipe.
2160 	 */
2161 	if (pipe_ctx->top_pipe == NULL) {
2162 
2163 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2164 
2165 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
2166 
2167 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
2168 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
2169 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
2170 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2171 
2172 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
2173 			pipe_ctx->stream_res.tg->funcs->set_drr(
2174 					pipe_ctx->stream_res.tg, NULL);
2175 	}
2176 
2177 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2178 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
2179 			break;
2180 
2181 	if (i == dc->res_pool->pipe_count)
2182 		return;
2183 
2184 	pipe_ctx->stream = NULL;
2185 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
2186 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
2187 }
2188 
2189 void dcn20_reset_hw_ctx_wrap(
2190 		struct dc *dc,
2191 		struct dc_state *context)
2192 {
2193 	int i;
2194 	struct dce_hwseq *hws = dc->hwseq;
2195 
2196 	/* Reset Back End*/
2197 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
2198 		struct pipe_ctx *pipe_ctx_old =
2199 			&dc->current_state->res_ctx.pipe_ctx[i];
2200 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2201 
2202 		if (!pipe_ctx_old->stream)
2203 			continue;
2204 
2205 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
2206 			continue;
2207 
2208 		if (!pipe_ctx->stream ||
2209 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
2210 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
2211 
2212 			dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
2213 			if (hws->funcs.enable_stream_gating)
2214 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
2215 			if (old_clk)
2216 				old_clk->funcs->cs_power_down(old_clk);
2217 		}
2218 	}
2219 }
2220 
2221 void dcn20_get_mpctree_visual_confirm_color(
2222 		struct pipe_ctx *pipe_ctx,
2223 		struct tg_color *color)
2224 {
2225 	const struct tg_color pipe_colors[6] = {
2226 			{MAX_TG_COLOR_VALUE, 0, 0}, // red
2227 			{MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
2228 			{0, MAX_TG_COLOR_VALUE, 0}, // blue
2229 			{MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
2230 			{0, 0, MAX_TG_COLOR_VALUE}, // green
2231 			{MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
2232 	};
2233 
2234 	struct pipe_ctx *top_pipe = pipe_ctx;
2235 
2236 	while (top_pipe->top_pipe) {
2237 		top_pipe = top_pipe->top_pipe;
2238 	}
2239 
2240 	*color = pipe_colors[top_pipe->pipe_idx];
2241 }
2242 
2243 void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2244 {
2245 	struct dce_hwseq *hws = dc->hwseq;
2246 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2247 	struct mpcc_blnd_cfg blnd_cfg = { {0} };
2248 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
2249 	int mpcc_id;
2250 	struct mpcc *new_mpcc;
2251 	struct mpc *mpc = dc->res_pool->mpc;
2252 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2253 
2254 	// input to MPCC is always RGB, by default leave black_color at 0
2255 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2256 		hws->funcs.get_hdr_visual_confirm_color(
2257 				pipe_ctx, &blnd_cfg.black_color);
2258 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2259 		hws->funcs.get_surface_visual_confirm_color(
2260 				pipe_ctx, &blnd_cfg.black_color);
2261 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
2262 		dcn20_get_mpctree_visual_confirm_color(
2263 				pipe_ctx, &blnd_cfg.black_color);
2264 	}
2265 
2266 	if (per_pixel_alpha)
2267 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2268 	else
2269 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2270 
2271 	blnd_cfg.overlap_only = false;
2272 	blnd_cfg.global_gain = 0xff;
2273 
2274 	if (pipe_ctx->plane_state->global_alpha)
2275 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2276 	else
2277 		blnd_cfg.global_alpha = 0xff;
2278 
2279 	blnd_cfg.background_color_bpc = 4;
2280 	blnd_cfg.bottom_gain_mode = 0;
2281 	blnd_cfg.top_gain = 0x1f000;
2282 	blnd_cfg.bottom_inside_gain = 0x1f000;
2283 	blnd_cfg.bottom_outside_gain = 0x1f000;
2284 	blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
2285 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
2286 	if (pipe_ctx->plane_state->format
2287 			== SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
2288 		blnd_cfg.pre_multiplied_alpha = false;
2289 #endif
2290 
2291 	/*
2292 	 * TODO: remove hack
2293 	 * Note: currently there is a bug in init_hw such that
2294 	 * on resume from hibernate, BIOS sets up MPCC0, and
2295 	 * we do mpcc_remove but the mpcc cannot go to idle
2296 	 * after remove. This cause us to pick mpcc1 here,
2297 	 * which causes a pstate hang for yet unknown reason.
2298 	 */
2299 	mpcc_id = hubp->inst;
2300 
2301 	/* If there is no full update, don't need to touch MPC tree*/
2302 	if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
2303 		!pipe_ctx->update_flags.bits.mpcc) {
2304 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2305 		return;
2306 	}
2307 
2308 	/* check if this MPCC is already being used */
2309 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2310 	/* remove MPCC if being used */
2311 	if (new_mpcc != NULL)
2312 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2313 	else
2314 		if (dc->debug.sanity_checks)
2315 			mpc->funcs->assert_mpcc_idle_before_connect(
2316 					dc->res_pool->mpc, mpcc_id);
2317 
2318 	/* Call MPC to insert new plane */
2319 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2320 			mpc_tree_params,
2321 			&blnd_cfg,
2322 			NULL,
2323 			NULL,
2324 			hubp->inst,
2325 			mpcc_id);
2326 
2327 	ASSERT(new_mpcc != NULL);
2328 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2329 	hubp->mpcc_id = mpcc_id;
2330 }
2331 
2332 void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
2333 {
2334 	enum dc_lane_count lane_count =
2335 		pipe_ctx->stream->link->cur_link_settings.lane_count;
2336 
2337 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
2338 	struct dc_link *link = pipe_ctx->stream->link;
2339 
2340 	uint32_t active_total_with_borders;
2341 	uint32_t early_control = 0;
2342 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
2343 
2344 	/* For MST, there are multiply stream go to only one link.
2345 	 * connect DIG back_end to front_end while enable_stream and
2346 	 * disconnect them during disable_stream
2347 	 * BY this, it is logic clean to separate stream and link
2348 	 */
2349 	link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
2350 						    pipe_ctx->stream_res.stream_enc->id, true);
2351 
2352 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
2353 		if (link->dc->hwss.program_dmdata_engine)
2354 			link->dc->hwss.program_dmdata_engine(pipe_ctx);
2355 	}
2356 
2357 	link->dc->hwss.update_info_frame(pipe_ctx);
2358 
2359 	/* enable early control to avoid corruption on DP monitor*/
2360 	active_total_with_borders =
2361 			timing->h_addressable
2362 				+ timing->h_border_left
2363 				+ timing->h_border_right;
2364 
2365 	if (lane_count != 0)
2366 		early_control = active_total_with_borders % lane_count;
2367 
2368 	if (early_control == 0)
2369 		early_control = lane_count;
2370 
2371 	tg->funcs->set_early_control(tg, early_control);
2372 
2373 	/* enable audio only within mode set */
2374 	if (pipe_ctx->stream_res.audio != NULL) {
2375 		if (dc_is_dp_signal(pipe_ctx->stream->signal))
2376 			pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
2377 	}
2378 }
2379 
2380 void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
2381 {
2382 	struct dc_stream_state    *stream     = pipe_ctx->stream;
2383 	struct hubp               *hubp       = pipe_ctx->plane_res.hubp;
2384 	bool                       enable     = false;
2385 	struct stream_encoder     *stream_enc = pipe_ctx->stream_res.stream_enc;
2386 	enum dynamic_metadata_mode mode       = dc_is_dp_signal(stream->signal)
2387 							? dmdata_dp
2388 							: dmdata_hdmi;
2389 
2390 	/* if using dynamic meta, don't set up generic infopackets */
2391 	if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
2392 		pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
2393 		enable = true;
2394 	}
2395 
2396 	if (!hubp)
2397 		return;
2398 
2399 	if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata)
2400 		return;
2401 
2402 	stream_enc->funcs->set_dynamic_metadata(stream_enc, enable,
2403 						hubp->inst, mode);
2404 }
2405 
2406 void dcn20_fpga_init_hw(struct dc *dc)
2407 {
2408 	int i, j;
2409 	struct dce_hwseq *hws = dc->hwseq;
2410 	struct resource_pool *res_pool = dc->res_pool;
2411 	struct dc_state  *context = dc->current_state;
2412 
2413 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
2414 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
2415 
2416 	// Initialize the dccg
2417 	if (res_pool->dccg->funcs->dccg_init)
2418 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
2419 
2420 	//Enable ability to power gate / don't force power on permanently
2421 	hws->funcs.enable_power_gating_plane(hws, true);
2422 
2423 	// Specific to FPGA dccg and registers
2424 	REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
2425 	REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
2426 
2427 	hws->funcs.dccg_init(hws);
2428 
2429 	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
2430 	REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
2431 	if (REG(REFCLK_CNTL))
2432 		REG_WRITE(REFCLK_CNTL, 0);
2433 	//
2434 
2435 
2436 	/* Blank pixel data with OPP DPG */
2437 	for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
2438 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
2439 
2440 		if (tg->funcs->is_tg_enabled(tg))
2441 			dcn20_init_blank(dc, tg);
2442 	}
2443 
2444 	for (i = 0; i < res_pool->timing_generator_count; i++) {
2445 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
2446 
2447 		if (tg->funcs->is_tg_enabled(tg))
2448 			tg->funcs->lock(tg);
2449 	}
2450 
2451 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2452 		struct dpp *dpp = res_pool->dpps[i];
2453 
2454 		dpp->funcs->dpp_reset(dpp);
2455 	}
2456 
2457 	/* Reset all MPCC muxes */
2458 	res_pool->mpc->funcs->mpc_init(res_pool->mpc);
2459 
2460 	/* initialize OPP mpc_tree parameter */
2461 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
2462 		res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
2463 		res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
2464 		for (j = 0; j < MAX_PIPES; j++)
2465 			res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
2466 	}
2467 
2468 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2469 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
2470 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2471 		struct hubp *hubp = dc->res_pool->hubps[i];
2472 		struct dpp *dpp = dc->res_pool->dpps[i];
2473 
2474 		pipe_ctx->stream_res.tg = tg;
2475 		pipe_ctx->pipe_idx = i;
2476 
2477 		pipe_ctx->plane_res.hubp = hubp;
2478 		pipe_ctx->plane_res.dpp = dpp;
2479 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
2480 		hubp->mpcc_id = dpp->inst;
2481 		hubp->opp_id = OPP_ID_INVALID;
2482 		hubp->power_gated = false;
2483 		pipe_ctx->stream_res.opp = NULL;
2484 
2485 		hubp->funcs->hubp_init(hubp);
2486 
2487 		//dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
2488 		//dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
2489 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
2490 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
2491 		/*to do*/
2492 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
2493 	}
2494 
2495 	/* initialize DWB pointer to MCIF_WB */
2496 	for (i = 0; i < res_pool->res_cap->num_dwb; i++)
2497 		res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
2498 
2499 	for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
2500 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
2501 
2502 		if (tg->funcs->is_tg_enabled(tg))
2503 			tg->funcs->unlock(tg);
2504 	}
2505 
2506 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2507 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2508 
2509 		dc->hwss.disable_plane(dc, pipe_ctx);
2510 
2511 		pipe_ctx->stream_res.tg = NULL;
2512 		pipe_ctx->plane_res.hubp = NULL;
2513 	}
2514 
2515 	for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
2516 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
2517 
2518 		tg->funcs->tg_init(tg);
2519 	}
2520 }
2521 #ifndef TRIM_FSFT
2522 bool dcn20_optimize_timing_for_fsft(struct dc *dc,
2523 		struct dc_crtc_timing *timing,
2524 		unsigned int max_input_rate_in_khz)
2525 {
2526 	unsigned int old_v_front_porch;
2527 	unsigned int old_v_total;
2528 	unsigned int max_input_rate_in_100hz;
2529 	unsigned long long new_v_total;
2530 
2531 	max_input_rate_in_100hz = max_input_rate_in_khz * 10;
2532 	if (max_input_rate_in_100hz < timing->pix_clk_100hz)
2533 		return false;
2534 
2535 	old_v_total = timing->v_total;
2536 	old_v_front_porch = timing->v_front_porch;
2537 
2538 	timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz;
2539 	timing->pix_clk_100hz = max_input_rate_in_100hz;
2540 
2541 	new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz);
2542 
2543 	timing->v_total = new_v_total;
2544 	timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total);
2545 	return true;
2546 }
2547 #endif
2548