xref: /openbmc/linux/drivers/gpu/drm/amd/display/dc/core/dc.c (revision 64288aa9)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27 
28 #include "dm_services.h"
29 
30 #include "dc.h"
31 
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36 
37 #include "resource.h"
38 
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42 
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
47 #include "dmcu.h"
48 #include "dpp.h"
49 #include "timing_generator.h"
50 #include "abm.h"
51 #include "virtual/virtual_link_encoder.h"
52 #include "hubp.h"
53 
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
57 
58 #include "dc_link.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
62 
63 #include "dc_link_dp.h"
64 #include "dc_dmub_srv.h"
65 
66 #include "dsc.h"
67 
68 #include "vm_helper.h"
69 
70 #include "dce/dce_i2c.h"
71 
72 #include "dmub/dmub_srv.h"
73 
74 #include "i2caux_interface.h"
75 #include "dce/dmub_hw_lock_mgr.h"
76 
77 #include "dc_trace.h"
78 
79 #define CTX \
80 	dc->ctx
81 
82 #define DC_LOGGER \
83 	dc->ctx->logger
84 
85 static const char DC_BUILD_ID[] = "production-build";
86 
87 /**
88  * DOC: Overview
89  *
90  * DC is the OS-agnostic component of the amdgpu DC driver.
91  *
92  * DC maintains and validates a set of structs representing the state of the
93  * driver and writes that state to AMD hardware
94  *
95  * Main DC HW structs:
96  *
97  * struct dc - The central struct.  One per driver.  Created on driver load,
98  * destroyed on driver unload.
99  *
100  * struct dc_context - One per driver.
101  * Used as a backpointer by most other structs in dc.
102  *
103  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
104  * plugpoints).  Created on driver load, destroyed on driver unload.
105  *
106  * struct dc_sink - One per display.  Created on boot or hotplug.
107  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
108  * (the display directly attached).  It may also have one or more remote
109  * sinks (in the Multi-Stream Transport case)
110  *
111  * struct resource_pool - One per driver.  Represents the hw blocks not in the
112  * main pipeline.  Not directly accessible by dm.
113  *
114  * Main dc state structs:
115  *
116  * These structs can be created and destroyed as needed.  There is a full set of
117  * these structs in dc->current_state representing the currently programmed state.
118  *
119  * struct dc_state - The global DC state to track global state information,
120  * such as bandwidth values.
121  *
122  * struct dc_stream_state - Represents the hw configuration for the pipeline from
123  * a framebuffer to a display.  Maps one-to-one with dc_sink.
124  *
125  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
126  * and may have more in the Multi-Plane Overlay case.
127  *
128  * struct resource_context - Represents the programmable state of everything in
129  * the resource_pool.  Not directly accessible by dm.
130  *
131  * struct pipe_ctx - A member of struct resource_context.  Represents the
132  * internal hardware pipeline components.  Each dc_plane_state has either
133  * one or two (in the pipe-split case).
134  */
135 
136 /*******************************************************************************
137  * Private functions
138  ******************************************************************************/
139 
140 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
141 {
142 	if (new > *original)
143 		*original = new;
144 }
145 
146 static void destroy_links(struct dc *dc)
147 {
148 	uint32_t i;
149 
150 	for (i = 0; i < dc->link_count; i++) {
151 		if (NULL != dc->links[i])
152 			link_destroy(&dc->links[i]);
153 	}
154 }
155 
156 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
157 {
158 	int i;
159 	uint32_t count = 0;
160 
161 	for (i = 0; i < num_links; i++) {
162 		if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
163 				links[i]->is_internal_display)
164 			count++;
165 	}
166 
167 	return count;
168 }
169 
170 static int get_seamless_boot_stream_count(struct dc_state *ctx)
171 {
172 	uint8_t i;
173 	uint8_t seamless_boot_stream_count = 0;
174 
175 	for (i = 0; i < ctx->stream_count; i++)
176 		if (ctx->streams[i]->apply_seamless_boot_optimization)
177 			seamless_boot_stream_count++;
178 
179 	return seamless_boot_stream_count;
180 }
181 
182 static bool create_links(
183 		struct dc *dc,
184 		uint32_t num_virtual_links)
185 {
186 	int i;
187 	int connectors_num;
188 	struct dc_bios *bios = dc->ctx->dc_bios;
189 
190 	dc->link_count = 0;
191 
192 	connectors_num = bios->funcs->get_connectors_number(bios);
193 
194 	DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
195 
196 	if (connectors_num > ENUM_ID_COUNT) {
197 		dm_error(
198 			"DC: Number of connectors %d exceeds maximum of %d!\n",
199 			connectors_num,
200 			ENUM_ID_COUNT);
201 		return false;
202 	}
203 
204 	dm_output_to_console(
205 		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
206 		__func__,
207 		connectors_num,
208 		num_virtual_links);
209 
210 	for (i = 0; i < connectors_num; i++) {
211 		struct link_init_data link_init_params = {0};
212 		struct dc_link *link;
213 
214 		DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
215 
216 		link_init_params.ctx = dc->ctx;
217 		/* next BIOS object table connector */
218 		link_init_params.connector_index = i;
219 		link_init_params.link_index = dc->link_count;
220 		link_init_params.dc = dc;
221 		link = link_create(&link_init_params);
222 
223 		if (link) {
224 			dc->links[dc->link_count] = link;
225 			link->dc = dc;
226 			++dc->link_count;
227 		}
228 	}
229 
230 	DC_LOG_DC("BIOS object table - end");
231 
232 	/* Create a link for each usb4 dpia port */
233 	for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
234 		struct link_init_data link_init_params = {0};
235 		struct dc_link *link;
236 
237 		link_init_params.ctx = dc->ctx;
238 		link_init_params.connector_index = i;
239 		link_init_params.link_index = dc->link_count;
240 		link_init_params.dc = dc;
241 		link_init_params.is_dpia_link = true;
242 
243 		link = link_create(&link_init_params);
244 		if (link) {
245 			dc->links[dc->link_count] = link;
246 			link->dc = dc;
247 			++dc->link_count;
248 		}
249 	}
250 
251 	for (i = 0; i < num_virtual_links; i++) {
252 		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
253 		struct encoder_init_data enc_init = {0};
254 
255 		if (link == NULL) {
256 			BREAK_TO_DEBUGGER();
257 			goto failed_alloc;
258 		}
259 
260 		link->link_index = dc->link_count;
261 		dc->links[dc->link_count] = link;
262 		dc->link_count++;
263 
264 		link->ctx = dc->ctx;
265 		link->dc = dc;
266 		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
267 		link->link_id.type = OBJECT_TYPE_CONNECTOR;
268 		link->link_id.id = CONNECTOR_ID_VIRTUAL;
269 		link->link_id.enum_id = ENUM_ID_1;
270 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
271 
272 		if (!link->link_enc) {
273 			BREAK_TO_DEBUGGER();
274 			goto failed_alloc;
275 		}
276 
277 		link->link_status.dpcd_caps = &link->dpcd_caps;
278 
279 		enc_init.ctx = dc->ctx;
280 		enc_init.channel = CHANNEL_ID_UNKNOWN;
281 		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
282 		enc_init.transmitter = TRANSMITTER_UNKNOWN;
283 		enc_init.connector = link->link_id;
284 		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
285 		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
286 		enc_init.encoder.enum_id = ENUM_ID_1;
287 		virtual_link_encoder_construct(link->link_enc, &enc_init);
288 	}
289 
290 	dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
291 
292 	return true;
293 
294 failed_alloc:
295 	return false;
296 }
297 
298 /* Create additional DIG link encoder objects if fewer than the platform
299  * supports were created during link construction. This can happen if the
300  * number of physical connectors is less than the number of DIGs.
301  */
302 static bool create_link_encoders(struct dc *dc)
303 {
304 	bool res = true;
305 	unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
306 	unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
307 	int i;
308 
309 	/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
310 	 * link encoders and physical display endpoints and does not require
311 	 * additional link encoder objects.
312 	 */
313 	if (num_usb4_dpia == 0)
314 		return res;
315 
316 	/* Create as many link encoder objects as the platform supports. DPIA
317 	 * endpoints can be programmably mapped to any DIG.
318 	 */
319 	if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
320 		for (i = 0; i < num_dig_link_enc; i++) {
321 			struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
322 
323 			if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
324 				link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
325 						(enum engine_id)(ENGINE_ID_DIGA + i));
326 				if (link_enc) {
327 					dc->res_pool->link_encoders[i] = link_enc;
328 					dc->res_pool->dig_link_enc_count++;
329 				} else {
330 					res = false;
331 				}
332 			}
333 		}
334 	}
335 
336 	return res;
337 }
338 
339 /* Destroy any additional DIG link encoder objects created by
340  * create_link_encoders().
341  * NB: Must only be called after destroy_links().
342  */
343 static void destroy_link_encoders(struct dc *dc)
344 {
345 	unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
346 	unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
347 	int i;
348 
349 	/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
350 	 * link encoders and physical display endpoints and does not require
351 	 * additional link encoder objects.
352 	 */
353 	if (num_usb4_dpia == 0)
354 		return;
355 
356 	for (i = 0; i < num_dig_link_enc; i++) {
357 		struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
358 
359 		if (link_enc) {
360 			link_enc->funcs->destroy(&link_enc);
361 			dc->res_pool->link_encoders[i] = NULL;
362 			dc->res_pool->dig_link_enc_count--;
363 		}
364 	}
365 }
366 
367 static struct dc_perf_trace *dc_perf_trace_create(void)
368 {
369 	return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
370 }
371 
372 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
373 {
374 	kfree(*perf_trace);
375 	*perf_trace = NULL;
376 }
377 
378 /**
379  *  dc_stream_adjust_vmin_vmax:
380  *
381  *  Looks up the pipe context of dc_stream_state and updates the
382  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
383  *  Rate, which is a power-saving feature that targets reducing panel
384  *  refresh rate while the screen is static
385  *
386  *  @dc:     dc reference
387  *  @stream: Initial dc stream state
388  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
389  */
390 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
391 		struct dc_stream_state *stream,
392 		struct dc_crtc_timing_adjust *adjust)
393 {
394 	int i;
395 	bool ret = false;
396 
397 	stream->adjust.v_total_max = adjust->v_total_max;
398 	stream->adjust.v_total_mid = adjust->v_total_mid;
399 	stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
400 	stream->adjust.v_total_min = adjust->v_total_min;
401 
402 	for (i = 0; i < MAX_PIPES; i++) {
403 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
404 
405 		if (pipe->stream == stream && pipe->stream_res.tg) {
406 			dc->hwss.set_drr(&pipe,
407 					1,
408 					*adjust);
409 
410 			ret = true;
411 		}
412 	}
413 	return ret;
414 }
415 
416 /**
417  *****************************************************************************
418  *  Function: dc_stream_get_last_vrr_vtotal
419  *
420  *  @brief
421  *     Looks up the pipe context of dc_stream_state and gets the
422  *     last VTOTAL used by DRR (Dynamic Refresh Rate)
423  *
424  *  @param [in] dc: dc reference
425  *  @param [in] stream: Initial dc stream state
426  *  @param [in] adjust: Updated parameters for vertical_total_min and
427  *  vertical_total_max
428  *****************************************************************************
429  */
430 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
431 		struct dc_stream_state *stream,
432 		uint32_t *refresh_rate)
433 {
434 	bool status = false;
435 
436 	int i = 0;
437 
438 	for (i = 0; i < MAX_PIPES; i++) {
439 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
440 
441 		if (pipe->stream == stream && pipe->stream_res.tg) {
442 			/* Only execute if a function pointer has been defined for
443 			 * the DC version in question
444 			 */
445 			if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
446 				pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
447 
448 				status = true;
449 
450 				break;
451 			}
452 		}
453 	}
454 
455 	return status;
456 }
457 
458 bool dc_stream_get_crtc_position(struct dc *dc,
459 		struct dc_stream_state **streams, int num_streams,
460 		unsigned int *v_pos, unsigned int *nom_v_pos)
461 {
462 	/* TODO: Support multiple streams */
463 	const struct dc_stream_state *stream = streams[0];
464 	int i;
465 	bool ret = false;
466 	struct crtc_position position;
467 
468 	for (i = 0; i < MAX_PIPES; i++) {
469 		struct pipe_ctx *pipe =
470 				&dc->current_state->res_ctx.pipe_ctx[i];
471 
472 		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
473 			dc->hwss.get_position(&pipe, 1, &position);
474 
475 			*v_pos = position.vertical_count;
476 			*nom_v_pos = position.nominal_vcount;
477 			ret = true;
478 		}
479 	}
480 	return ret;
481 }
482 
483 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
484 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
485 			     struct crc_params *crc_window)
486 {
487 	int i;
488 	struct dmcu *dmcu = dc->res_pool->dmcu;
489 	struct pipe_ctx *pipe;
490 	struct crc_region tmp_win, *crc_win;
491 	struct otg_phy_mux mapping_tmp, *mux_mapping;
492 
493 	/*crc window can't be null*/
494 	if (!crc_window)
495 		return false;
496 
497 	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
498 		crc_win = &tmp_win;
499 		mux_mapping = &mapping_tmp;
500 		/*set crc window*/
501 		tmp_win.x_start = crc_window->windowa_x_start;
502 		tmp_win.y_start = crc_window->windowa_y_start;
503 		tmp_win.x_end = crc_window->windowa_x_end;
504 		tmp_win.y_end = crc_window->windowa_y_end;
505 
506 		for (i = 0; i < MAX_PIPES; i++) {
507 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
508 			if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
509 				break;
510 		}
511 
512 		/* Stream not found */
513 		if (i == MAX_PIPES)
514 			return false;
515 
516 
517 		/*set mux routing info*/
518 		mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
519 		mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
520 
521 		dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
522 	} else {
523 		DC_LOG_DC("dmcu is not initialized");
524 		return false;
525 	}
526 
527 	return true;
528 }
529 
530 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
531 {
532 	int i;
533 	struct dmcu *dmcu = dc->res_pool->dmcu;
534 	struct pipe_ctx *pipe;
535 	struct otg_phy_mux mapping_tmp, *mux_mapping;
536 
537 	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
538 		mux_mapping = &mapping_tmp;
539 
540 		for (i = 0; i < MAX_PIPES; i++) {
541 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
542 			if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
543 				break;
544 		}
545 
546 		/* Stream not found */
547 		if (i == MAX_PIPES)
548 			return false;
549 
550 
551 		/*set mux routing info*/
552 		mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
553 		mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
554 
555 		dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
556 	} else {
557 		DC_LOG_DC("dmcu is not initialized");
558 		return false;
559 	}
560 
561 	return true;
562 }
563 #endif
564 
565 /**
566  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
567  * @dc: DC Object
568  * @stream: The stream to configure CRC on.
569  * @enable: Enable CRC if true, disable otherwise.
570  * @crc_window: CRC window (x/y start/end) information
571  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
572  *              once.
573  *
574  * By default, only CRC0 is configured, and the entire frame is used to
575  * calculate the crc.
576  */
577 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
578 			     struct crc_params *crc_window, bool enable, bool continuous)
579 {
580 	int i;
581 	struct pipe_ctx *pipe;
582 	struct crc_params param;
583 	struct timing_generator *tg;
584 
585 	for (i = 0; i < MAX_PIPES; i++) {
586 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
587 		if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
588 			break;
589 	}
590 	/* Stream not found */
591 	if (i == MAX_PIPES)
592 		return false;
593 
594 	/* By default, capture the full frame */
595 	param.windowa_x_start = 0;
596 	param.windowa_y_start = 0;
597 	param.windowa_x_end = pipe->stream->timing.h_addressable;
598 	param.windowa_y_end = pipe->stream->timing.v_addressable;
599 	param.windowb_x_start = 0;
600 	param.windowb_y_start = 0;
601 	param.windowb_x_end = pipe->stream->timing.h_addressable;
602 	param.windowb_y_end = pipe->stream->timing.v_addressable;
603 
604 	if (crc_window) {
605 		param.windowa_x_start = crc_window->windowa_x_start;
606 		param.windowa_y_start = crc_window->windowa_y_start;
607 		param.windowa_x_end = crc_window->windowa_x_end;
608 		param.windowa_y_end = crc_window->windowa_y_end;
609 		param.windowb_x_start = crc_window->windowb_x_start;
610 		param.windowb_y_start = crc_window->windowb_y_start;
611 		param.windowb_x_end = crc_window->windowb_x_end;
612 		param.windowb_y_end = crc_window->windowb_y_end;
613 	}
614 
615 	param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
616 	param.odm_mode = pipe->next_odm_pipe ? 1:0;
617 
618 	/* Default to the union of both windows */
619 	param.selection = UNION_WINDOW_A_B;
620 	param.continuous_mode = continuous;
621 	param.enable = enable;
622 
623 	tg = pipe->stream_res.tg;
624 
625 	/* Only call if supported */
626 	if (tg->funcs->configure_crc)
627 		return tg->funcs->configure_crc(tg, &param);
628 	DC_LOG_WARNING("CRC capture not supported.");
629 	return false;
630 }
631 
632 /**
633  * dc_stream_get_crc() - Get CRC values for the given stream.
634  * @dc: DC object
635  * @stream: The DC stream state of the stream to get CRCs from.
636  * @r_cr: CRC value for the first of the 3 channels stored here.
637  * @g_y:  CRC value for the second of the 3 channels stored here.
638  * @b_cb: CRC value for the third of the 3 channels stored here.
639  *
640  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
641  * Return false if stream is not found, or if CRCs are not enabled.
642  */
643 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
644 		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
645 {
646 	int i;
647 	struct pipe_ctx *pipe;
648 	struct timing_generator *tg;
649 
650 	for (i = 0; i < MAX_PIPES; i++) {
651 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
652 		if (pipe->stream == stream)
653 			break;
654 	}
655 	/* Stream not found */
656 	if (i == MAX_PIPES)
657 		return false;
658 
659 	tg = pipe->stream_res.tg;
660 
661 	if (tg->funcs->get_crc)
662 		return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
663 	DC_LOG_WARNING("CRC capture not supported.");
664 	return false;
665 }
666 
667 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
668 		enum dc_dynamic_expansion option)
669 {
670 	/* OPP FMT dyn expansion updates*/
671 	int i;
672 	struct pipe_ctx *pipe_ctx;
673 
674 	for (i = 0; i < MAX_PIPES; i++) {
675 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
676 				== stream) {
677 			pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
678 			pipe_ctx->stream_res.opp->dyn_expansion = option;
679 			pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
680 					pipe_ctx->stream_res.opp,
681 					COLOR_SPACE_YCBCR601,
682 					stream->timing.display_color_depth,
683 					stream->signal);
684 		}
685 	}
686 }
687 
688 void dc_stream_set_dither_option(struct dc_stream_state *stream,
689 		enum dc_dither_option option)
690 {
691 	struct bit_depth_reduction_params params;
692 	struct dc_link *link = stream->link;
693 	struct pipe_ctx *pipes = NULL;
694 	int i;
695 
696 	for (i = 0; i < MAX_PIPES; i++) {
697 		if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
698 				stream) {
699 			pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
700 			break;
701 		}
702 	}
703 
704 	if (!pipes)
705 		return;
706 	if (option > DITHER_OPTION_MAX)
707 		return;
708 
709 	stream->dither_option = option;
710 
711 	memset(&params, 0, sizeof(params));
712 	resource_build_bit_depth_reduction_params(stream, &params);
713 	stream->bit_depth_params = params;
714 
715 	if (pipes->plane_res.xfm &&
716 	    pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
717 		pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
718 			pipes->plane_res.xfm,
719 			pipes->plane_res.scl_data.lb_params.depth,
720 			&stream->bit_depth_params);
721 	}
722 
723 	pipes->stream_res.opp->funcs->
724 		opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
725 }
726 
727 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
728 {
729 	int i;
730 	bool ret = false;
731 	struct pipe_ctx *pipes;
732 
733 	for (i = 0; i < MAX_PIPES; i++) {
734 		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
735 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
736 			dc->hwss.program_gamut_remap(pipes);
737 			ret = true;
738 		}
739 	}
740 
741 	return ret;
742 }
743 
744 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
745 {
746 	int i;
747 	bool ret = false;
748 	struct pipe_ctx *pipes;
749 
750 	for (i = 0; i < MAX_PIPES; i++) {
751 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
752 				== stream) {
753 
754 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
755 			dc->hwss.program_output_csc(dc,
756 					pipes,
757 					stream->output_color_space,
758 					stream->csc_color_matrix.matrix,
759 					pipes->stream_res.opp->inst);
760 			ret = true;
761 		}
762 	}
763 
764 	return ret;
765 }
766 
767 void dc_stream_set_static_screen_params(struct dc *dc,
768 		struct dc_stream_state **streams,
769 		int num_streams,
770 		const struct dc_static_screen_params *params)
771 {
772 	int i, j;
773 	struct pipe_ctx *pipes_affected[MAX_PIPES];
774 	int num_pipes_affected = 0;
775 
776 	for (i = 0; i < num_streams; i++) {
777 		struct dc_stream_state *stream = streams[i];
778 
779 		for (j = 0; j < MAX_PIPES; j++) {
780 			if (dc->current_state->res_ctx.pipe_ctx[j].stream
781 					== stream) {
782 				pipes_affected[num_pipes_affected++] =
783 						&dc->current_state->res_ctx.pipe_ctx[j];
784 			}
785 		}
786 	}
787 
788 	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
789 }
790 
791 static void dc_destruct(struct dc *dc)
792 {
793 	// reset link encoder assignment table on destruct
794 	if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
795 		link_enc_cfg_init(dc, dc->current_state);
796 
797 	if (dc->current_state) {
798 		dc_release_state(dc->current_state);
799 		dc->current_state = NULL;
800 	}
801 
802 	destroy_links(dc);
803 
804 	destroy_link_encoders(dc);
805 
806 	if (dc->clk_mgr) {
807 		dc_destroy_clk_mgr(dc->clk_mgr);
808 		dc->clk_mgr = NULL;
809 	}
810 
811 	dc_destroy_resource_pool(dc);
812 
813 	if (dc->ctx->gpio_service)
814 		dal_gpio_service_destroy(&dc->ctx->gpio_service);
815 
816 	if (dc->ctx->created_bios)
817 		dal_bios_parser_destroy(&dc->ctx->dc_bios);
818 
819 	dc_perf_trace_destroy(&dc->ctx->perf_trace);
820 
821 	kfree(dc->ctx);
822 	dc->ctx = NULL;
823 
824 	kfree(dc->bw_vbios);
825 	dc->bw_vbios = NULL;
826 
827 	kfree(dc->bw_dceip);
828 	dc->bw_dceip = NULL;
829 
830 #ifdef CONFIG_DRM_AMD_DC_DCN
831 	kfree(dc->dcn_soc);
832 	dc->dcn_soc = NULL;
833 
834 	kfree(dc->dcn_ip);
835 	dc->dcn_ip = NULL;
836 
837 #endif
838 	kfree(dc->vm_helper);
839 	dc->vm_helper = NULL;
840 
841 }
842 
843 static bool dc_construct_ctx(struct dc *dc,
844 		const struct dc_init_data *init_params)
845 {
846 	struct dc_context *dc_ctx;
847 	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
848 
849 	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
850 	if (!dc_ctx)
851 		return false;
852 
853 	dc_ctx->cgs_device = init_params->cgs_device;
854 	dc_ctx->driver_context = init_params->driver;
855 	dc_ctx->dc = dc;
856 	dc_ctx->asic_id = init_params->asic_id;
857 	dc_ctx->dc_sink_id_count = 0;
858 	dc_ctx->dc_stream_id_count = 0;
859 	dc_ctx->dce_environment = init_params->dce_environment;
860 
861 	/* Create logger */
862 
863 	dc_version = resource_parse_asic_id(init_params->asic_id);
864 	dc_ctx->dce_version = dc_version;
865 
866 	dc_ctx->perf_trace = dc_perf_trace_create();
867 	if (!dc_ctx->perf_trace) {
868 		ASSERT_CRITICAL(false);
869 		return false;
870 	}
871 
872 	dc->ctx = dc_ctx;
873 
874 	return true;
875 }
876 
877 static bool dc_construct(struct dc *dc,
878 		const struct dc_init_data *init_params)
879 {
880 	struct dc_context *dc_ctx;
881 	struct bw_calcs_dceip *dc_dceip;
882 	struct bw_calcs_vbios *dc_vbios;
883 #ifdef CONFIG_DRM_AMD_DC_DCN
884 	struct dcn_soc_bounding_box *dcn_soc;
885 	struct dcn_ip_params *dcn_ip;
886 #endif
887 
888 	dc->config = init_params->flags;
889 
890 	// Allocate memory for the vm_helper
891 	dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
892 	if (!dc->vm_helper) {
893 		dm_error("%s: failed to create dc->vm_helper\n", __func__);
894 		goto fail;
895 	}
896 
897 	memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
898 
899 	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
900 	if (!dc_dceip) {
901 		dm_error("%s: failed to create dceip\n", __func__);
902 		goto fail;
903 	}
904 
905 	dc->bw_dceip = dc_dceip;
906 
907 	dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
908 	if (!dc_vbios) {
909 		dm_error("%s: failed to create vbios\n", __func__);
910 		goto fail;
911 	}
912 
913 	dc->bw_vbios = dc_vbios;
914 #ifdef CONFIG_DRM_AMD_DC_DCN
915 	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
916 	if (!dcn_soc) {
917 		dm_error("%s: failed to create dcn_soc\n", __func__);
918 		goto fail;
919 	}
920 
921 	dc->dcn_soc = dcn_soc;
922 
923 	dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
924 	if (!dcn_ip) {
925 		dm_error("%s: failed to create dcn_ip\n", __func__);
926 		goto fail;
927 	}
928 
929 	dc->dcn_ip = dcn_ip;
930 #endif
931 
932 	if (!dc_construct_ctx(dc, init_params)) {
933 		dm_error("%s: failed to create ctx\n", __func__);
934 		goto fail;
935 	}
936 
937         dc_ctx = dc->ctx;
938 
939 	/* Resource should construct all asic specific resources.
940 	 * This should be the only place where we need to parse the asic id
941 	 */
942 	if (init_params->vbios_override)
943 		dc_ctx->dc_bios = init_params->vbios_override;
944 	else {
945 		/* Create BIOS parser */
946 		struct bp_init_data bp_init_data;
947 
948 		bp_init_data.ctx = dc_ctx;
949 		bp_init_data.bios = init_params->asic_id.atombios_base_address;
950 
951 		dc_ctx->dc_bios = dal_bios_parser_create(
952 				&bp_init_data, dc_ctx->dce_version);
953 
954 		if (!dc_ctx->dc_bios) {
955 			ASSERT_CRITICAL(false);
956 			goto fail;
957 		}
958 
959 		dc_ctx->created_bios = true;
960 	}
961 
962 	dc->vendor_signature = init_params->vendor_signature;
963 
964 	/* Create GPIO service */
965 	dc_ctx->gpio_service = dal_gpio_service_create(
966 			dc_ctx->dce_version,
967 			dc_ctx->dce_environment,
968 			dc_ctx);
969 
970 	if (!dc_ctx->gpio_service) {
971 		ASSERT_CRITICAL(false);
972 		goto fail;
973 	}
974 
975 	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
976 	if (!dc->res_pool)
977 		goto fail;
978 
979 	/* set i2c speed if not done by the respective dcnxxx__resource.c */
980 	if (dc->caps.i2c_speed_in_khz_hdcp == 0)
981 		dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
982 
983 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
984 	if (!dc->clk_mgr)
985 		goto fail;
986 #ifdef CONFIG_DRM_AMD_DC_DCN
987 	dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
988 #endif
989 
990 	if (dc->res_pool->funcs->update_bw_bounding_box)
991 		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
992 
993 	/* Creation of current_state must occur after dc->dml
994 	 * is initialized in dc_create_resource_pool because
995 	 * on creation it copies the contents of dc->dml
996 	 */
997 
998 	dc->current_state = dc_create_state(dc);
999 
1000 	if (!dc->current_state) {
1001 		dm_error("%s: failed to create validate ctx\n", __func__);
1002 		goto fail;
1003 	}
1004 
1005 	if (!create_links(dc, init_params->num_virtual_links))
1006 		goto fail;
1007 
1008 	/* Create additional DIG link encoder objects if fewer than the platform
1009 	 * supports were created during link construction.
1010 	 */
1011 	if (!create_link_encoders(dc))
1012 		goto fail;
1013 
1014 	dc_resource_state_construct(dc, dc->current_state);
1015 
1016 	return true;
1017 
1018 fail:
1019 	return false;
1020 }
1021 
1022 static void disable_all_writeback_pipes_for_stream(
1023 		const struct dc *dc,
1024 		struct dc_stream_state *stream,
1025 		struct dc_state *context)
1026 {
1027 	int i;
1028 
1029 	for (i = 0; i < stream->num_wb_info; i++)
1030 		stream->writeback_info[i].wb_enabled = false;
1031 }
1032 
1033 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1034 					  struct dc_stream_state *stream, bool lock)
1035 {
1036 	int i;
1037 
1038 	/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1039 	if (dc->hwss.interdependent_update_lock)
1040 		dc->hwss.interdependent_update_lock(dc, context, lock);
1041 	else {
1042 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1043 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1044 			struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1045 
1046 			// Copied conditions that were previously in dce110_apply_ctx_for_surface
1047 			if (stream == pipe_ctx->stream) {
1048 				if (!pipe_ctx->top_pipe &&
1049 					(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1050 					dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1051 			}
1052 		}
1053 	}
1054 }
1055 
1056 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1057 {
1058 	int i, j;
1059 	struct dc_state *dangling_context = dc_create_state(dc);
1060 	struct dc_state *current_ctx;
1061 
1062 	if (dangling_context == NULL)
1063 		return;
1064 
1065 	dc_resource_state_copy_construct(dc->current_state, dangling_context);
1066 
1067 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1068 		struct dc_stream_state *old_stream =
1069 				dc->current_state->res_ctx.pipe_ctx[i].stream;
1070 		bool should_disable = true;
1071 		bool pipe_split_change =
1072 			context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1073 
1074 		for (j = 0; j < context->stream_count; j++) {
1075 			if (old_stream == context->streams[j]) {
1076 				should_disable = false;
1077 				break;
1078 			}
1079 		}
1080 		if (!should_disable && pipe_split_change)
1081 			should_disable = true;
1082 
1083 		if (should_disable && old_stream) {
1084 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1085 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1086 
1087 			if (dc->hwss.apply_ctx_for_surface) {
1088 				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1089 				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1090 				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1091 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1092 			}
1093 			if (dc->hwss.program_front_end_for_ctx) {
1094 				dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1095 				dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1096 				dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1097 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1098 			}
1099 		}
1100 	}
1101 
1102 	current_ctx = dc->current_state;
1103 	dc->current_state = dangling_context;
1104 	dc_release_state(current_ctx);
1105 }
1106 
1107 static void disable_vbios_mode_if_required(
1108 		struct dc *dc,
1109 		struct dc_state *context)
1110 {
1111 	unsigned int i, j;
1112 
1113 	/* check if timing_changed, disable stream*/
1114 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1115 		struct dc_stream_state *stream = NULL;
1116 		struct dc_link *link = NULL;
1117 		struct pipe_ctx *pipe = NULL;
1118 
1119 		pipe = &context->res_ctx.pipe_ctx[i];
1120 		stream = pipe->stream;
1121 		if (stream == NULL)
1122 			continue;
1123 
1124 		// only looking for first odm pipe
1125 		if (pipe->prev_odm_pipe)
1126 			continue;
1127 
1128 		if (stream->link->local_sink &&
1129 			stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1130 			link = stream->link;
1131 		}
1132 
1133 		if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1134 			unsigned int enc_inst, tg_inst = 0;
1135 			unsigned int pix_clk_100hz;
1136 
1137 			enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1138 			if (enc_inst != ENGINE_ID_UNKNOWN) {
1139 				for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1140 					if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1141 						tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1142 							dc->res_pool->stream_enc[j]);
1143 						break;
1144 					}
1145 				}
1146 
1147 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1148 					dc->res_pool->dp_clock_source,
1149 					tg_inst, &pix_clk_100hz);
1150 
1151 				if (link->link_status.link_active) {
1152 					uint32_t requested_pix_clk_100hz =
1153 						pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1154 
1155 					if (pix_clk_100hz != requested_pix_clk_100hz) {
1156 						core_link_disable_stream(pipe);
1157 						pipe->stream->dpms_off = false;
1158 					}
1159 				}
1160 			}
1161 		}
1162 	}
1163 }
1164 
1165 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1166 {
1167 	int i;
1168 	PERF_TRACE();
1169 	for (i = 0; i < MAX_PIPES; i++) {
1170 		int count = 0;
1171 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1172 
1173 		if (!pipe->plane_state)
1174 			continue;
1175 
1176 		/* Timeout 100 ms */
1177 		while (count < 100000) {
1178 			/* Must set to false to start with, due to OR in update function */
1179 			pipe->plane_state->status.is_flip_pending = false;
1180 			dc->hwss.update_pending_status(pipe);
1181 			if (!pipe->plane_state->status.is_flip_pending)
1182 				break;
1183 			udelay(1);
1184 			count++;
1185 		}
1186 		ASSERT(!pipe->plane_state->status.is_flip_pending);
1187 	}
1188 	PERF_TRACE();
1189 }
1190 
1191 /*******************************************************************************
1192  * Public functions
1193  ******************************************************************************/
1194 
1195 struct dc *dc_create(const struct dc_init_data *init_params)
1196 {
1197 	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1198 	unsigned int full_pipe_count;
1199 
1200 	if (!dc)
1201 		return NULL;
1202 
1203 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1204 		if (!dc_construct_ctx(dc, init_params))
1205 			goto destruct_dc;
1206 	} else {
1207 		if (!dc_construct(dc, init_params))
1208 			goto destruct_dc;
1209 
1210 		full_pipe_count = dc->res_pool->pipe_count;
1211 		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1212 			full_pipe_count--;
1213 		dc->caps.max_streams = min(
1214 				full_pipe_count,
1215 				dc->res_pool->stream_enc_count);
1216 
1217 		dc->caps.max_links = dc->link_count;
1218 		dc->caps.max_audios = dc->res_pool->audio_count;
1219 		dc->caps.linear_pitch_alignment = 64;
1220 
1221 		dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1222 
1223 		if (dc->res_pool->dmcu != NULL)
1224 			dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1225 	}
1226 
1227 	/* Populate versioning information */
1228 	dc->versions.dc_ver = DC_VER;
1229 
1230 	dc->build_id = DC_BUILD_ID;
1231 
1232 	DC_LOG_DC("Display Core initialized\n");
1233 
1234 
1235 
1236 	return dc;
1237 
1238 destruct_dc:
1239 	dc_destruct(dc);
1240 	kfree(dc);
1241 	return NULL;
1242 }
1243 
1244 static void detect_edp_presence(struct dc *dc)
1245 {
1246 	struct dc_link *edp_links[MAX_NUM_EDP];
1247 	struct dc_link *edp_link = NULL;
1248 	enum dc_connection_type type;
1249 	int i;
1250 	int edp_num;
1251 
1252 	get_edp_links(dc, edp_links, &edp_num);
1253 	if (!edp_num)
1254 		return;
1255 
1256 	for (i = 0; i < edp_num; i++) {
1257 		edp_link = edp_links[i];
1258 		if (dc->config.edp_not_connected) {
1259 			edp_link->edp_sink_present = false;
1260 		} else {
1261 			dc_link_detect_sink(edp_link, &type);
1262 			edp_link->edp_sink_present = (type != dc_connection_none);
1263 		}
1264 	}
1265 }
1266 
1267 void dc_hardware_init(struct dc *dc)
1268 {
1269 
1270 	detect_edp_presence(dc);
1271 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1272 		dc->hwss.init_hw(dc);
1273 }
1274 
1275 void dc_init_callbacks(struct dc *dc,
1276 		const struct dc_callback_init *init_params)
1277 {
1278 #ifdef CONFIG_DRM_AMD_DC_HDCP
1279 	dc->ctx->cp_psp = init_params->cp_psp;
1280 #endif
1281 }
1282 
1283 void dc_deinit_callbacks(struct dc *dc)
1284 {
1285 #ifdef CONFIG_DRM_AMD_DC_HDCP
1286 	memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1287 #endif
1288 }
1289 
1290 void dc_destroy(struct dc **dc)
1291 {
1292 	dc_destruct(*dc);
1293 	kfree(*dc);
1294 	*dc = NULL;
1295 }
1296 
1297 static void enable_timing_multisync(
1298 		struct dc *dc,
1299 		struct dc_state *ctx)
1300 {
1301 	int i, multisync_count = 0;
1302 	int pipe_count = dc->res_pool->pipe_count;
1303 	struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1304 
1305 	for (i = 0; i < pipe_count; i++) {
1306 		if (!ctx->res_ctx.pipe_ctx[i].stream ||
1307 				!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1308 			continue;
1309 		if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1310 			continue;
1311 		multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1312 		multisync_count++;
1313 	}
1314 
1315 	if (multisync_count > 0) {
1316 		dc->hwss.enable_per_frame_crtc_position_reset(
1317 			dc, multisync_count, multisync_pipes);
1318 	}
1319 }
1320 
1321 static void program_timing_sync(
1322 		struct dc *dc,
1323 		struct dc_state *ctx)
1324 {
1325 	int i, j, k;
1326 	int group_index = 0;
1327 	int num_group = 0;
1328 	int pipe_count = dc->res_pool->pipe_count;
1329 	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1330 
1331 	for (i = 0; i < pipe_count; i++) {
1332 		if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1333 			continue;
1334 
1335 		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1336 	}
1337 
1338 	for (i = 0; i < pipe_count; i++) {
1339 		int group_size = 1;
1340 		enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1341 		struct pipe_ctx *pipe_set[MAX_PIPES];
1342 
1343 		if (!unsynced_pipes[i])
1344 			continue;
1345 
1346 		pipe_set[0] = unsynced_pipes[i];
1347 		unsynced_pipes[i] = NULL;
1348 
1349 		/* Add tg to the set, search rest of the tg's for ones with
1350 		 * same timing, add all tgs with same timing to the group
1351 		 */
1352 		for (j = i + 1; j < pipe_count; j++) {
1353 			if (!unsynced_pipes[j])
1354 				continue;
1355 			if (sync_type != TIMING_SYNCHRONIZABLE &&
1356 				dc->hwss.enable_vblanks_synchronization &&
1357 				unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1358 				resource_are_vblanks_synchronizable(
1359 					unsynced_pipes[j]->stream,
1360 					pipe_set[0]->stream)) {
1361 				sync_type = VBLANK_SYNCHRONIZABLE;
1362 				pipe_set[group_size] = unsynced_pipes[j];
1363 				unsynced_pipes[j] = NULL;
1364 				group_size++;
1365 			} else
1366 			if (sync_type != VBLANK_SYNCHRONIZABLE &&
1367 				resource_are_streams_timing_synchronizable(
1368 					unsynced_pipes[j]->stream,
1369 					pipe_set[0]->stream)) {
1370 				sync_type = TIMING_SYNCHRONIZABLE;
1371 				pipe_set[group_size] = unsynced_pipes[j];
1372 				unsynced_pipes[j] = NULL;
1373 				group_size++;
1374 			}
1375 		}
1376 
1377 		/* set first unblanked pipe as master */
1378 		for (j = 0; j < group_size; j++) {
1379 			bool is_blanked;
1380 
1381 			if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1382 				is_blanked =
1383 					pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1384 			else
1385 				is_blanked =
1386 					pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1387 			if (!is_blanked) {
1388 				if (j == 0)
1389 					break;
1390 
1391 				swap(pipe_set[0], pipe_set[j]);
1392 				break;
1393 			}
1394 		}
1395 
1396 		for (k = 0; k < group_size; k++) {
1397 			struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1398 
1399 			status->timing_sync_info.group_id = num_group;
1400 			status->timing_sync_info.group_size = group_size;
1401 			if (k == 0)
1402 				status->timing_sync_info.master = true;
1403 			else
1404 				status->timing_sync_info.master = false;
1405 
1406 		}
1407 		/* remove any other unblanked pipes as they have already been synced */
1408 		for (j = j + 1; j < group_size; j++) {
1409 			bool is_blanked;
1410 
1411 			if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1412 				is_blanked =
1413 					pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1414 			else
1415 				is_blanked =
1416 					pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1417 			if (!is_blanked) {
1418 				group_size--;
1419 				pipe_set[j] = pipe_set[group_size];
1420 				j--;
1421 			}
1422 		}
1423 
1424 		if (group_size > 1) {
1425 			if (sync_type == TIMING_SYNCHRONIZABLE) {
1426 				dc->hwss.enable_timing_synchronization(
1427 					dc, group_index, group_size, pipe_set);
1428 			} else
1429 				if (sync_type == VBLANK_SYNCHRONIZABLE) {
1430 				dc->hwss.enable_vblanks_synchronization(
1431 					dc, group_index, group_size, pipe_set);
1432 				}
1433 			group_index++;
1434 		}
1435 		num_group++;
1436 	}
1437 }
1438 
1439 static bool context_changed(
1440 		struct dc *dc,
1441 		struct dc_state *context)
1442 {
1443 	uint8_t i;
1444 
1445 	if (context->stream_count != dc->current_state->stream_count)
1446 		return true;
1447 
1448 	for (i = 0; i < dc->current_state->stream_count; i++) {
1449 		if (dc->current_state->streams[i] != context->streams[i])
1450 			return true;
1451 	}
1452 
1453 	return false;
1454 }
1455 
1456 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1457 				const struct dc_sink *sink,
1458 				struct dc_crtc_timing *crtc_timing)
1459 {
1460 	struct timing_generator *tg;
1461 	struct stream_encoder *se = NULL;
1462 
1463 	struct dc_crtc_timing hw_crtc_timing = {0};
1464 
1465 	struct dc_link *link = sink->link;
1466 	unsigned int i, enc_inst, tg_inst = 0;
1467 
1468 	/* Support seamless boot on EDP displays only */
1469 	if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1470 		return false;
1471 	}
1472 
1473 	/* Check for enabled DIG to identify enabled display */
1474 	if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1475 		return false;
1476 
1477 	enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1478 
1479 	if (enc_inst == ENGINE_ID_UNKNOWN)
1480 		return false;
1481 
1482 	for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1483 		if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1484 
1485 			se = dc->res_pool->stream_enc[i];
1486 
1487 			tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1488 				dc->res_pool->stream_enc[i]);
1489 			break;
1490 		}
1491 	}
1492 
1493 	// tg_inst not found
1494 	if (i == dc->res_pool->stream_enc_count)
1495 		return false;
1496 
1497 	if (tg_inst >= dc->res_pool->timing_generator_count)
1498 		return false;
1499 
1500 	tg = dc->res_pool->timing_generators[tg_inst];
1501 
1502 	if (!tg->funcs->get_hw_timing)
1503 		return false;
1504 
1505 	if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1506 		return false;
1507 
1508 	if (crtc_timing->h_total != hw_crtc_timing.h_total)
1509 		return false;
1510 
1511 	if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1512 		return false;
1513 
1514 	if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1515 		return false;
1516 
1517 	if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1518 		return false;
1519 
1520 	if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1521 		return false;
1522 
1523 	if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1524 		return false;
1525 
1526 	if (crtc_timing->v_total != hw_crtc_timing.v_total)
1527 		return false;
1528 
1529 	if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1530 		return false;
1531 
1532 	if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1533 		return false;
1534 
1535 	if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1536 		return false;
1537 
1538 	if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1539 		return false;
1540 
1541 	if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1542 		return false;
1543 
1544 	/* block DSC for now, as VBIOS does not currently support DSC timings */
1545 	if (crtc_timing->flags.DSC)
1546 		return false;
1547 
1548 	if (dc_is_dp_signal(link->connector_signal)) {
1549 		unsigned int pix_clk_100hz;
1550 
1551 		dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1552 			dc->res_pool->dp_clock_source,
1553 			tg_inst, &pix_clk_100hz);
1554 
1555 		if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1556 			return false;
1557 
1558 		if (!se->funcs->dp_get_pixel_format)
1559 			return false;
1560 
1561 		if (!se->funcs->dp_get_pixel_format(
1562 			se,
1563 			&hw_crtc_timing.pixel_encoding,
1564 			&hw_crtc_timing.display_color_depth))
1565 			return false;
1566 
1567 		if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1568 			return false;
1569 
1570 		if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1571 			return false;
1572 	}
1573 
1574 	if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1575 		return false;
1576 	}
1577 
1578 	if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1579 		DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1580 		return false;
1581 	}
1582 
1583 	return true;
1584 }
1585 
1586 static inline bool should_update_pipe_for_stream(
1587 		struct dc_state *context,
1588 		struct pipe_ctx *pipe_ctx,
1589 		struct dc_stream_state *stream)
1590 {
1591 	return (pipe_ctx->stream && pipe_ctx->stream == stream);
1592 }
1593 
1594 static inline bool should_update_pipe_for_plane(
1595 		struct dc_state *context,
1596 		struct pipe_ctx *pipe_ctx,
1597 		struct dc_plane_state *plane_state)
1598 {
1599 	return (pipe_ctx->plane_state == plane_state);
1600 }
1601 
1602 void dc_enable_stereo(
1603 	struct dc *dc,
1604 	struct dc_state *context,
1605 	struct dc_stream_state *streams[],
1606 	uint8_t stream_count)
1607 {
1608 	int i, j;
1609 	struct pipe_ctx *pipe;
1610 
1611 	for (i = 0; i < MAX_PIPES; i++) {
1612 		if (context != NULL) {
1613 			pipe = &context->res_ctx.pipe_ctx[i];
1614 		} else {
1615 			context = dc->current_state;
1616 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1617 		}
1618 
1619 		for (j = 0; pipe && j < stream_count; j++)  {
1620 			if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1621 				dc->hwss.setup_stereo)
1622 				dc->hwss.setup_stereo(pipe, dc);
1623 		}
1624 	}
1625 }
1626 
1627 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1628 {
1629 	if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1630 		enable_timing_multisync(dc, context);
1631 		program_timing_sync(dc, context);
1632 	}
1633 }
1634 
1635 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1636 {
1637 	int i;
1638 	unsigned int stream_mask = 0;
1639 
1640 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1641 		if (context->res_ctx.pipe_ctx[i].stream)
1642 			stream_mask |= 1 << i;
1643 	}
1644 
1645 	return stream_mask;
1646 }
1647 
1648 #if defined(CONFIG_DRM_AMD_DC_DCN)
1649 void dc_z10_restore(const struct dc *dc)
1650 {
1651 	if (dc->hwss.z10_restore)
1652 		dc->hwss.z10_restore(dc);
1653 }
1654 
1655 void dc_z10_save_init(struct dc *dc)
1656 {
1657 	if (dc->hwss.z10_save_init)
1658 		dc->hwss.z10_save_init(dc);
1659 }
1660 #endif
1661 /*
1662  * Applies given context to HW and copy it into current context.
1663  * It's up to the user to release the src context afterwards.
1664  */
1665 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1666 {
1667 	struct dc_bios *dcb = dc->ctx->dc_bios;
1668 	enum dc_status result = DC_ERROR_UNEXPECTED;
1669 	struct pipe_ctx *pipe;
1670 	int i, k, l;
1671 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1672 
1673 #if defined(CONFIG_DRM_AMD_DC_DCN)
1674 	dc_z10_restore(dc);
1675 	dc_allow_idle_optimizations(dc, false);
1676 #endif
1677 
1678 	for (i = 0; i < context->stream_count; i++)
1679 		dc_streams[i] =  context->streams[i];
1680 
1681 	if (!dcb->funcs->is_accelerated_mode(dcb)) {
1682 		disable_vbios_mode_if_required(dc, context);
1683 		dc->hwss.enable_accelerated_mode(dc, context);
1684 	}
1685 
1686 	if (context->stream_count > get_seamless_boot_stream_count(context) ||
1687 		context->stream_count == 0)
1688 		dc->hwss.prepare_bandwidth(dc, context);
1689 
1690 	disable_dangling_plane(dc, context);
1691 	/* re-program planes for existing stream, in case we need to
1692 	 * free up plane resource for later use
1693 	 */
1694 	if (dc->hwss.apply_ctx_for_surface) {
1695 		for (i = 0; i < context->stream_count; i++) {
1696 			if (context->streams[i]->mode_changed)
1697 				continue;
1698 			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1699 			dc->hwss.apply_ctx_for_surface(
1700 				dc, context->streams[i],
1701 				context->stream_status[i].plane_count,
1702 				context); /* use new pipe config in new context */
1703 			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1704 			dc->hwss.post_unlock_program_front_end(dc, context);
1705 		}
1706 	}
1707 
1708 	/* Program hardware */
1709 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1710 		pipe = &context->res_ctx.pipe_ctx[i];
1711 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1712 	}
1713 
1714 	result = dc->hwss.apply_ctx_to_hw(dc, context);
1715 
1716 	if (result != DC_OK)
1717 		return result;
1718 
1719 	dc_trigger_sync(dc, context);
1720 
1721 	/* Program all planes within new context*/
1722 	if (dc->hwss.program_front_end_for_ctx) {
1723 		dc->hwss.interdependent_update_lock(dc, context, true);
1724 		dc->hwss.program_front_end_for_ctx(dc, context);
1725 		dc->hwss.interdependent_update_lock(dc, context, false);
1726 		dc->hwss.post_unlock_program_front_end(dc, context);
1727 	}
1728 	for (i = 0; i < context->stream_count; i++) {
1729 		const struct dc_link *link = context->streams[i]->link;
1730 
1731 		if (!context->streams[i]->mode_changed)
1732 			continue;
1733 
1734 		if (dc->hwss.apply_ctx_for_surface) {
1735 			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1736 			dc->hwss.apply_ctx_for_surface(
1737 					dc, context->streams[i],
1738 					context->stream_status[i].plane_count,
1739 					context);
1740 			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1741 			dc->hwss.post_unlock_program_front_end(dc, context);
1742 		}
1743 
1744 		/*
1745 		 * enable stereo
1746 		 * TODO rework dc_enable_stereo call to work with validation sets?
1747 		 */
1748 		for (k = 0; k < MAX_PIPES; k++) {
1749 			pipe = &context->res_ctx.pipe_ctx[k];
1750 
1751 			for (l = 0 ; pipe && l < context->stream_count; l++)  {
1752 				if (context->streams[l] &&
1753 					context->streams[l] == pipe->stream &&
1754 					dc->hwss.setup_stereo)
1755 					dc->hwss.setup_stereo(pipe, dc);
1756 			}
1757 		}
1758 
1759 		CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1760 				context->streams[i]->timing.h_addressable,
1761 				context->streams[i]->timing.v_addressable,
1762 				context->streams[i]->timing.h_total,
1763 				context->streams[i]->timing.v_total,
1764 				context->streams[i]->timing.pix_clk_100hz / 10);
1765 	}
1766 
1767 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1768 
1769 	if (context->stream_count > get_seamless_boot_stream_count(context) ||
1770 		context->stream_count == 0) {
1771 		/* Must wait for no flips to be pending before doing optimize bw */
1772 		wait_for_no_pipes_pending(dc, context);
1773 		/* pplib is notified if disp_num changed */
1774 		dc->hwss.optimize_bandwidth(dc, context);
1775 	}
1776 
1777 	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1778 		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1779 	else
1780 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1781 
1782 	context->stream_mask = get_stream_mask(dc, context);
1783 
1784 	if (context->stream_mask != dc->current_state->stream_mask)
1785 		dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1786 
1787 	for (i = 0; i < context->stream_count; i++)
1788 		context->streams[i]->mode_changed = false;
1789 
1790 	dc_release_state(dc->current_state);
1791 
1792 	dc->current_state = context;
1793 
1794 	dc_retain_state(dc->current_state);
1795 
1796 	return result;
1797 }
1798 
1799 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1800 {
1801 	enum dc_status result = DC_ERROR_UNEXPECTED;
1802 	int i;
1803 
1804 	if (!context_changed(dc, context))
1805 		return DC_OK;
1806 
1807 	DC_LOG_DC("%s: %d streams\n",
1808 				__func__, context->stream_count);
1809 
1810 	for (i = 0; i < context->stream_count; i++) {
1811 		struct dc_stream_state *stream = context->streams[i];
1812 
1813 		dc_stream_log(dc, stream);
1814 	}
1815 
1816 	/*
1817 	 * Previous validation was perfomred with fast_validation = true and
1818 	 * the full DML state required for hardware programming was skipped.
1819 	 *
1820 	 * Re-validate here to calculate these parameters / watermarks.
1821 	 */
1822 	result = dc_validate_global_state(dc, context, false);
1823 	if (result != DC_OK) {
1824 		DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1825 			     dc_status_to_str(result), result);
1826 		return result;
1827 	}
1828 
1829 	result = dc_commit_state_no_check(dc, context);
1830 
1831 	return (result == DC_OK);
1832 }
1833 
1834 #if defined(CONFIG_DRM_AMD_DC_DCN)
1835 bool dc_acquire_release_mpc_3dlut(
1836 		struct dc *dc, bool acquire,
1837 		struct dc_stream_state *stream,
1838 		struct dc_3dlut **lut,
1839 		struct dc_transfer_func **shaper)
1840 {
1841 	int pipe_idx;
1842 	bool ret = false;
1843 	bool found_pipe_idx = false;
1844 	const struct resource_pool *pool = dc->res_pool;
1845 	struct resource_context *res_ctx = &dc->current_state->res_ctx;
1846 	int mpcc_id = 0;
1847 
1848 	if (pool && res_ctx) {
1849 		if (acquire) {
1850 			/*find pipe idx for the given stream*/
1851 			for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1852 				if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1853 					found_pipe_idx = true;
1854 					mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1855 					break;
1856 				}
1857 			}
1858 		} else
1859 			found_pipe_idx = true;/*for release pipe_idx is not required*/
1860 
1861 		if (found_pipe_idx) {
1862 			if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1863 				ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1864 			else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1865 				ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1866 		}
1867 	}
1868 	return ret;
1869 }
1870 #endif
1871 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1872 {
1873 	int i;
1874 	struct pipe_ctx *pipe;
1875 
1876 	for (i = 0; i < MAX_PIPES; i++) {
1877 		pipe = &context->res_ctx.pipe_ctx[i];
1878 
1879 		if (!pipe->plane_state)
1880 			continue;
1881 
1882 		/* Must set to false to start with, due to OR in update function */
1883 		pipe->plane_state->status.is_flip_pending = false;
1884 		dc->hwss.update_pending_status(pipe);
1885 		if (pipe->plane_state->status.is_flip_pending)
1886 			return true;
1887 	}
1888 	return false;
1889 }
1890 
1891 #ifdef CONFIG_DRM_AMD_DC_DCN
1892 /* Perform updates here which need to be deferred until next vupdate
1893  *
1894  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1895  * but forcing lut memory to shutdown state is immediate. This causes
1896  * single frame corruption as lut gets disabled mid-frame unless shutdown
1897  * is deferred until after entering bypass.
1898  */
1899 static void process_deferred_updates(struct dc *dc)
1900 {
1901 	int i = 0;
1902 
1903 	if (dc->debug.enable_mem_low_power.bits.cm) {
1904 		ASSERT(dc->dcn_ip->max_num_dpp);
1905 		for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1906 			if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1907 				dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1908 	}
1909 }
1910 #endif /* CONFIG_DRM_AMD_DC_DCN */
1911 
1912 void dc_post_update_surfaces_to_stream(struct dc *dc)
1913 {
1914 	int i;
1915 	struct dc_state *context = dc->current_state;
1916 
1917 	if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1918 		return;
1919 
1920 	post_surface_trace(dc);
1921 
1922 	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1923 		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1924 	else
1925 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1926 
1927 	if (is_flip_pending_in_pipes(dc, context))
1928 		return;
1929 
1930 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1931 		if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1932 		    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1933 			context->res_ctx.pipe_ctx[i].pipe_idx = i;
1934 			dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1935 		}
1936 
1937 #ifdef CONFIG_DRM_AMD_DC_DCN
1938 	process_deferred_updates(dc);
1939 #endif
1940 
1941 	dc->hwss.optimize_bandwidth(dc, context);
1942 
1943 	dc->optimized_required = false;
1944 	dc->wm_optimized_required = false;
1945 }
1946 
1947 static void init_state(struct dc *dc, struct dc_state *context)
1948 {
1949 	/* Each context must have their own instance of VBA and in order to
1950 	 * initialize and obtain IP and SOC the base DML instance from DC is
1951 	 * initially copied into every context
1952 	 */
1953 #ifdef CONFIG_DRM_AMD_DC_DCN
1954 	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1955 #endif
1956 }
1957 
1958 struct dc_state *dc_create_state(struct dc *dc)
1959 {
1960 	struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1961 					    GFP_KERNEL);
1962 
1963 	if (!context)
1964 		return NULL;
1965 
1966 	init_state(dc, context);
1967 
1968 	kref_init(&context->refcount);
1969 
1970 	return context;
1971 }
1972 
1973 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1974 {
1975 	int i, j;
1976 	struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1977 
1978 	if (!new_ctx)
1979 		return NULL;
1980 	memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1981 
1982 	for (i = 0; i < MAX_PIPES; i++) {
1983 			struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
1984 
1985 			if (cur_pipe->top_pipe)
1986 				cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
1987 
1988 			if (cur_pipe->bottom_pipe)
1989 				cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
1990 
1991 			if (cur_pipe->prev_odm_pipe)
1992 				cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
1993 
1994 			if (cur_pipe->next_odm_pipe)
1995 				cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
1996 
1997 	}
1998 
1999 	for (i = 0; i < new_ctx->stream_count; i++) {
2000 			dc_stream_retain(new_ctx->streams[i]);
2001 			for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2002 				dc_plane_state_retain(
2003 					new_ctx->stream_status[i].plane_states[j]);
2004 	}
2005 
2006 	kref_init(&new_ctx->refcount);
2007 
2008 	return new_ctx;
2009 }
2010 
2011 void dc_retain_state(struct dc_state *context)
2012 {
2013 	kref_get(&context->refcount);
2014 }
2015 
2016 static void dc_state_free(struct kref *kref)
2017 {
2018 	struct dc_state *context = container_of(kref, struct dc_state, refcount);
2019 	dc_resource_state_destruct(context);
2020 	kvfree(context);
2021 }
2022 
2023 void dc_release_state(struct dc_state *context)
2024 {
2025 	kref_put(&context->refcount, dc_state_free);
2026 }
2027 
2028 bool dc_set_generic_gpio_for_stereo(bool enable,
2029 		struct gpio_service *gpio_service)
2030 {
2031 	enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2032 	struct gpio_pin_info pin_info;
2033 	struct gpio *generic;
2034 	struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2035 			   GFP_KERNEL);
2036 
2037 	if (!config)
2038 		return false;
2039 	pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2040 
2041 	if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2042 		kfree(config);
2043 		return false;
2044 	} else {
2045 		generic = dal_gpio_service_create_generic_mux(
2046 			gpio_service,
2047 			pin_info.offset,
2048 			pin_info.mask);
2049 	}
2050 
2051 	if (!generic) {
2052 		kfree(config);
2053 		return false;
2054 	}
2055 
2056 	gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2057 
2058 	config->enable_output_from_mux = enable;
2059 	config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2060 
2061 	if (gpio_result == GPIO_RESULT_OK)
2062 		gpio_result = dal_mux_setup_config(generic, config);
2063 
2064 	if (gpio_result == GPIO_RESULT_OK) {
2065 		dal_gpio_close(generic);
2066 		dal_gpio_destroy_generic_mux(&generic);
2067 		kfree(config);
2068 		return true;
2069 	} else {
2070 		dal_gpio_close(generic);
2071 		dal_gpio_destroy_generic_mux(&generic);
2072 		kfree(config);
2073 		return false;
2074 	}
2075 }
2076 
2077 static bool is_surface_in_context(
2078 		const struct dc_state *context,
2079 		const struct dc_plane_state *plane_state)
2080 {
2081 	int j;
2082 
2083 	for (j = 0; j < MAX_PIPES; j++) {
2084 		const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2085 
2086 		if (plane_state == pipe_ctx->plane_state) {
2087 			return true;
2088 		}
2089 	}
2090 
2091 	return false;
2092 }
2093 
2094 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2095 {
2096 	union surface_update_flags *update_flags = &u->surface->update_flags;
2097 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
2098 
2099 	if (!u->plane_info)
2100 		return UPDATE_TYPE_FAST;
2101 
2102 	if (u->plane_info->color_space != u->surface->color_space) {
2103 		update_flags->bits.color_space_change = 1;
2104 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2105 	}
2106 
2107 	if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2108 		update_flags->bits.horizontal_mirror_change = 1;
2109 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2110 	}
2111 
2112 	if (u->plane_info->rotation != u->surface->rotation) {
2113 		update_flags->bits.rotation_change = 1;
2114 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2115 	}
2116 
2117 	if (u->plane_info->format != u->surface->format) {
2118 		update_flags->bits.pixel_format_change = 1;
2119 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2120 	}
2121 
2122 	if (u->plane_info->stereo_format != u->surface->stereo_format) {
2123 		update_flags->bits.stereo_format_change = 1;
2124 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2125 	}
2126 
2127 	if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2128 		update_flags->bits.per_pixel_alpha_change = 1;
2129 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2130 	}
2131 
2132 	if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2133 		update_flags->bits.global_alpha_change = 1;
2134 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2135 	}
2136 
2137 	if (u->plane_info->dcc.enable != u->surface->dcc.enable
2138 			|| u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2139 			|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2140 		/* During DCC on/off, stutter period is calculated before
2141 		 * DCC has fully transitioned. This results in incorrect
2142 		 * stutter period calculation. Triggering a full update will
2143 		 * recalculate stutter period.
2144 		 */
2145 		update_flags->bits.dcc_change = 1;
2146 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2147 	}
2148 
2149 	if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2150 			resource_pixel_format_to_bpp(u->surface->format)) {
2151 		/* different bytes per element will require full bandwidth
2152 		 * and DML calculation
2153 		 */
2154 		update_flags->bits.bpp_change = 1;
2155 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2156 	}
2157 
2158 	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2159 			|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2160 		update_flags->bits.plane_size_change = 1;
2161 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2162 	}
2163 
2164 
2165 	if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2166 			sizeof(union dc_tiling_info)) != 0) {
2167 		update_flags->bits.swizzle_change = 1;
2168 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2169 
2170 		/* todo: below are HW dependent, we should add a hook to
2171 		 * DCE/N resource and validated there.
2172 		 */
2173 		if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2174 			/* swizzled mode requires RQ to be setup properly,
2175 			 * thus need to run DML to calculate RQ settings
2176 			 */
2177 			update_flags->bits.bandwidth_change = 1;
2178 			elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2179 		}
2180 	}
2181 
2182 	/* This should be UPDATE_TYPE_FAST if nothing has changed. */
2183 	return update_type;
2184 }
2185 
2186 static enum surface_update_type get_scaling_info_update_type(
2187 		const struct dc_surface_update *u)
2188 {
2189 	union surface_update_flags *update_flags = &u->surface->update_flags;
2190 
2191 	if (!u->scaling_info)
2192 		return UPDATE_TYPE_FAST;
2193 
2194 	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2195 			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2196 			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2197 			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2198 			|| u->scaling_info->scaling_quality.integer_scaling !=
2199 				u->surface->scaling_quality.integer_scaling
2200 			) {
2201 		update_flags->bits.scaling_change = 1;
2202 
2203 		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2204 			|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2205 				&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2206 					|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2207 			/* Making dst rect smaller requires a bandwidth change */
2208 			update_flags->bits.bandwidth_change = 1;
2209 	}
2210 
2211 	if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2212 		|| u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2213 
2214 		update_flags->bits.scaling_change = 1;
2215 		if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2216 				|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
2217 			/* Making src rect bigger requires a bandwidth change */
2218 			update_flags->bits.clock_change = 1;
2219 	}
2220 
2221 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2222 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
2223 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2224 			|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2225 			|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2226 			|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2227 		update_flags->bits.position_change = 1;
2228 
2229 	if (update_flags->bits.clock_change
2230 			|| update_flags->bits.bandwidth_change
2231 			|| update_flags->bits.scaling_change)
2232 		return UPDATE_TYPE_FULL;
2233 
2234 	if (update_flags->bits.position_change)
2235 		return UPDATE_TYPE_MED;
2236 
2237 	return UPDATE_TYPE_FAST;
2238 }
2239 
2240 static enum surface_update_type det_surface_update(const struct dc *dc,
2241 		const struct dc_surface_update *u)
2242 {
2243 	const struct dc_state *context = dc->current_state;
2244 	enum surface_update_type type;
2245 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2246 	union surface_update_flags *update_flags = &u->surface->update_flags;
2247 
2248 	if (u->flip_addr)
2249 		update_flags->bits.addr_update = 1;
2250 
2251 	if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2252 		update_flags->raw = 0xFFFFFFFF;
2253 		return UPDATE_TYPE_FULL;
2254 	}
2255 
2256 	update_flags->raw = 0; // Reset all flags
2257 
2258 	type = get_plane_info_update_type(u);
2259 	elevate_update_type(&overall_type, type);
2260 
2261 	type = get_scaling_info_update_type(u);
2262 	elevate_update_type(&overall_type, type);
2263 
2264 	if (u->flip_addr)
2265 		update_flags->bits.addr_update = 1;
2266 
2267 	if (u->in_transfer_func)
2268 		update_flags->bits.in_transfer_func_change = 1;
2269 
2270 	if (u->input_csc_color_matrix)
2271 		update_flags->bits.input_csc_change = 1;
2272 
2273 	if (u->coeff_reduction_factor)
2274 		update_flags->bits.coeff_reduction_change = 1;
2275 
2276 	if (u->gamut_remap_matrix)
2277 		update_flags->bits.gamut_remap_change = 1;
2278 
2279 	if (u->gamma) {
2280 		enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2281 
2282 		if (u->plane_info)
2283 			format = u->plane_info->format;
2284 		else if (u->surface)
2285 			format = u->surface->format;
2286 
2287 		if (dce_use_lut(format))
2288 			update_flags->bits.gamma_change = 1;
2289 	}
2290 
2291 	if (u->lut3d_func || u->func_shaper)
2292 		update_flags->bits.lut_3d = 1;
2293 
2294 	if (u->hdr_mult.value)
2295 		if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2296 			update_flags->bits.hdr_mult = 1;
2297 			elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2298 		}
2299 
2300 	if (update_flags->bits.in_transfer_func_change) {
2301 		type = UPDATE_TYPE_MED;
2302 		elevate_update_type(&overall_type, type);
2303 	}
2304 
2305 	if (update_flags->bits.input_csc_change
2306 			|| update_flags->bits.coeff_reduction_change
2307 			|| update_flags->bits.lut_3d
2308 			|| update_flags->bits.gamma_change
2309 			|| update_flags->bits.gamut_remap_change) {
2310 		type = UPDATE_TYPE_FULL;
2311 		elevate_update_type(&overall_type, type);
2312 	}
2313 
2314 	return overall_type;
2315 }
2316 
2317 static enum surface_update_type check_update_surfaces_for_stream(
2318 		struct dc *dc,
2319 		struct dc_surface_update *updates,
2320 		int surface_count,
2321 		struct dc_stream_update *stream_update,
2322 		const struct dc_stream_status *stream_status)
2323 {
2324 	int i;
2325 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2326 
2327 #if defined(CONFIG_DRM_AMD_DC_DCN)
2328 	if (dc->idle_optimizations_allowed)
2329 		overall_type = UPDATE_TYPE_FULL;
2330 
2331 #endif
2332 	if (stream_status == NULL || stream_status->plane_count != surface_count)
2333 		overall_type = UPDATE_TYPE_FULL;
2334 
2335 	if (stream_update && stream_update->pending_test_pattern) {
2336 		overall_type = UPDATE_TYPE_FULL;
2337 	}
2338 
2339 	/* some stream updates require passive update */
2340 	if (stream_update) {
2341 		union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2342 
2343 		if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2344 			(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2345 			stream_update->integer_scaling_update)
2346 			su_flags->bits.scaling = 1;
2347 
2348 		if (stream_update->out_transfer_func)
2349 			su_flags->bits.out_tf = 1;
2350 
2351 		if (stream_update->abm_level)
2352 			su_flags->bits.abm_level = 1;
2353 
2354 		if (stream_update->dpms_off)
2355 			su_flags->bits.dpms_off = 1;
2356 
2357 		if (stream_update->gamut_remap)
2358 			su_flags->bits.gamut_remap = 1;
2359 
2360 		if (stream_update->wb_update)
2361 			su_flags->bits.wb_update = 1;
2362 
2363 		if (stream_update->dsc_config)
2364 			su_flags->bits.dsc_changed = 1;
2365 
2366 #if defined(CONFIG_DRM_AMD_DC_DCN)
2367 		if (stream_update->mst_bw_update)
2368 			su_flags->bits.mst_bw = 1;
2369 #endif
2370 
2371 		if (su_flags->raw != 0)
2372 			overall_type = UPDATE_TYPE_FULL;
2373 
2374 		if (stream_update->output_csc_transform || stream_update->output_color_space)
2375 			su_flags->bits.out_csc = 1;
2376 	}
2377 
2378 	for (i = 0 ; i < surface_count; i++) {
2379 		enum surface_update_type type =
2380 				det_surface_update(dc, &updates[i]);
2381 
2382 		elevate_update_type(&overall_type, type);
2383 	}
2384 
2385 	return overall_type;
2386 }
2387 
2388 /*
2389  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2390  *
2391  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2392  */
2393 enum surface_update_type dc_check_update_surfaces_for_stream(
2394 		struct dc *dc,
2395 		struct dc_surface_update *updates,
2396 		int surface_count,
2397 		struct dc_stream_update *stream_update,
2398 		const struct dc_stream_status *stream_status)
2399 {
2400 	int i;
2401 	enum surface_update_type type;
2402 
2403 	if (stream_update)
2404 		stream_update->stream->update_flags.raw = 0;
2405 	for (i = 0; i < surface_count; i++)
2406 		updates[i].surface->update_flags.raw = 0;
2407 
2408 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2409 	if (type == UPDATE_TYPE_FULL) {
2410 		if (stream_update) {
2411 			uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2412 			stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2413 			stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2414 		}
2415 		for (i = 0; i < surface_count; i++)
2416 			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2417 	}
2418 
2419 	if (type == UPDATE_TYPE_FAST) {
2420 		// If there's an available clock comparator, we use that.
2421 		if (dc->clk_mgr->funcs->are_clock_states_equal) {
2422 			if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2423 				dc->optimized_required = true;
2424 		// Else we fallback to mem compare.
2425 		} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2426 			dc->optimized_required = true;
2427 		}
2428 
2429 		dc->optimized_required |= dc->wm_optimized_required;
2430 	}
2431 
2432 	return type;
2433 }
2434 
2435 static struct dc_stream_status *stream_get_status(
2436 	struct dc_state *ctx,
2437 	struct dc_stream_state *stream)
2438 {
2439 	uint8_t i;
2440 
2441 	for (i = 0; i < ctx->stream_count; i++) {
2442 		if (stream == ctx->streams[i]) {
2443 			return &ctx->stream_status[i];
2444 		}
2445 	}
2446 
2447 	return NULL;
2448 }
2449 
2450 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2451 
2452 static void copy_surface_update_to_plane(
2453 		struct dc_plane_state *surface,
2454 		struct dc_surface_update *srf_update)
2455 {
2456 	if (srf_update->flip_addr) {
2457 		surface->address = srf_update->flip_addr->address;
2458 		surface->flip_immediate =
2459 			srf_update->flip_addr->flip_immediate;
2460 		surface->time.time_elapsed_in_us[surface->time.index] =
2461 			srf_update->flip_addr->flip_timestamp_in_us -
2462 				surface->time.prev_update_time_in_us;
2463 		surface->time.prev_update_time_in_us =
2464 			srf_update->flip_addr->flip_timestamp_in_us;
2465 		surface->time.index++;
2466 		if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2467 			surface->time.index = 0;
2468 
2469 		surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2470 	}
2471 
2472 	if (srf_update->scaling_info) {
2473 		surface->scaling_quality =
2474 				srf_update->scaling_info->scaling_quality;
2475 		surface->dst_rect =
2476 				srf_update->scaling_info->dst_rect;
2477 		surface->src_rect =
2478 				srf_update->scaling_info->src_rect;
2479 		surface->clip_rect =
2480 				srf_update->scaling_info->clip_rect;
2481 	}
2482 
2483 	if (srf_update->plane_info) {
2484 		surface->color_space =
2485 				srf_update->plane_info->color_space;
2486 		surface->format =
2487 				srf_update->plane_info->format;
2488 		surface->plane_size =
2489 				srf_update->plane_info->plane_size;
2490 		surface->rotation =
2491 				srf_update->plane_info->rotation;
2492 		surface->horizontal_mirror =
2493 				srf_update->plane_info->horizontal_mirror;
2494 		surface->stereo_format =
2495 				srf_update->plane_info->stereo_format;
2496 		surface->tiling_info =
2497 				srf_update->plane_info->tiling_info;
2498 		surface->visible =
2499 				srf_update->plane_info->visible;
2500 		surface->per_pixel_alpha =
2501 				srf_update->plane_info->per_pixel_alpha;
2502 		surface->global_alpha =
2503 				srf_update->plane_info->global_alpha;
2504 		surface->global_alpha_value =
2505 				srf_update->plane_info->global_alpha_value;
2506 		surface->dcc =
2507 				srf_update->plane_info->dcc;
2508 		surface->layer_index =
2509 				srf_update->plane_info->layer_index;
2510 	}
2511 
2512 	if (srf_update->gamma &&
2513 			(surface->gamma_correction !=
2514 					srf_update->gamma)) {
2515 		memcpy(&surface->gamma_correction->entries,
2516 			&srf_update->gamma->entries,
2517 			sizeof(struct dc_gamma_entries));
2518 		surface->gamma_correction->is_identity =
2519 			srf_update->gamma->is_identity;
2520 		surface->gamma_correction->num_entries =
2521 			srf_update->gamma->num_entries;
2522 		surface->gamma_correction->type =
2523 			srf_update->gamma->type;
2524 	}
2525 
2526 	if (srf_update->in_transfer_func &&
2527 			(surface->in_transfer_func !=
2528 				srf_update->in_transfer_func)) {
2529 		surface->in_transfer_func->sdr_ref_white_level =
2530 			srf_update->in_transfer_func->sdr_ref_white_level;
2531 		surface->in_transfer_func->tf =
2532 			srf_update->in_transfer_func->tf;
2533 		surface->in_transfer_func->type =
2534 			srf_update->in_transfer_func->type;
2535 		memcpy(&surface->in_transfer_func->tf_pts,
2536 			&srf_update->in_transfer_func->tf_pts,
2537 			sizeof(struct dc_transfer_func_distributed_points));
2538 	}
2539 
2540 	if (srf_update->func_shaper &&
2541 			(surface->in_shaper_func !=
2542 			srf_update->func_shaper))
2543 		memcpy(surface->in_shaper_func, srf_update->func_shaper,
2544 		sizeof(*surface->in_shaper_func));
2545 
2546 	if (srf_update->lut3d_func &&
2547 			(surface->lut3d_func !=
2548 			srf_update->lut3d_func))
2549 		memcpy(surface->lut3d_func, srf_update->lut3d_func,
2550 		sizeof(*surface->lut3d_func));
2551 
2552 	if (srf_update->hdr_mult.value)
2553 		surface->hdr_mult =
2554 				srf_update->hdr_mult;
2555 
2556 	if (srf_update->blend_tf &&
2557 			(surface->blend_tf !=
2558 			srf_update->blend_tf))
2559 		memcpy(surface->blend_tf, srf_update->blend_tf,
2560 		sizeof(*surface->blend_tf));
2561 
2562 	if (srf_update->input_csc_color_matrix)
2563 		surface->input_csc_color_matrix =
2564 			*srf_update->input_csc_color_matrix;
2565 
2566 	if (srf_update->coeff_reduction_factor)
2567 		surface->coeff_reduction_factor =
2568 			*srf_update->coeff_reduction_factor;
2569 
2570 	if (srf_update->gamut_remap_matrix)
2571 		surface->gamut_remap_matrix =
2572 			*srf_update->gamut_remap_matrix;
2573 }
2574 
2575 static void copy_stream_update_to_stream(struct dc *dc,
2576 					 struct dc_state *context,
2577 					 struct dc_stream_state *stream,
2578 					 struct dc_stream_update *update)
2579 {
2580 	struct dc_context *dc_ctx = dc->ctx;
2581 
2582 	if (update == NULL || stream == NULL)
2583 		return;
2584 
2585 	if (update->src.height && update->src.width)
2586 		stream->src = update->src;
2587 
2588 	if (update->dst.height && update->dst.width)
2589 		stream->dst = update->dst;
2590 
2591 	if (update->out_transfer_func &&
2592 	    stream->out_transfer_func != update->out_transfer_func) {
2593 		stream->out_transfer_func->sdr_ref_white_level =
2594 			update->out_transfer_func->sdr_ref_white_level;
2595 		stream->out_transfer_func->tf = update->out_transfer_func->tf;
2596 		stream->out_transfer_func->type =
2597 			update->out_transfer_func->type;
2598 		memcpy(&stream->out_transfer_func->tf_pts,
2599 		       &update->out_transfer_func->tf_pts,
2600 		       sizeof(struct dc_transfer_func_distributed_points));
2601 	}
2602 
2603 	if (update->hdr_static_metadata)
2604 		stream->hdr_static_metadata = *update->hdr_static_metadata;
2605 
2606 	if (update->abm_level)
2607 		stream->abm_level = *update->abm_level;
2608 
2609 	if (update->periodic_interrupt0)
2610 		stream->periodic_interrupt0 = *update->periodic_interrupt0;
2611 
2612 	if (update->periodic_interrupt1)
2613 		stream->periodic_interrupt1 = *update->periodic_interrupt1;
2614 
2615 	if (update->gamut_remap)
2616 		stream->gamut_remap_matrix = *update->gamut_remap;
2617 
2618 	/* Note: this being updated after mode set is currently not a use case
2619 	 * however if it arises OCSC would need to be reprogrammed at the
2620 	 * minimum
2621 	 */
2622 	if (update->output_color_space)
2623 		stream->output_color_space = *update->output_color_space;
2624 
2625 	if (update->output_csc_transform)
2626 		stream->csc_color_matrix = *update->output_csc_transform;
2627 
2628 	if (update->vrr_infopacket)
2629 		stream->vrr_infopacket = *update->vrr_infopacket;
2630 
2631 	if (update->dpms_off)
2632 		stream->dpms_off = *update->dpms_off;
2633 
2634 	if (update->vsc_infopacket)
2635 		stream->vsc_infopacket = *update->vsc_infopacket;
2636 
2637 	if (update->vsp_infopacket)
2638 		stream->vsp_infopacket = *update->vsp_infopacket;
2639 
2640 	if (update->dither_option)
2641 		stream->dither_option = *update->dither_option;
2642 
2643 	if (update->pending_test_pattern)
2644 		stream->test_pattern = *update->pending_test_pattern;
2645 	/* update current stream with writeback info */
2646 	if (update->wb_update) {
2647 		int i;
2648 
2649 		stream->num_wb_info = update->wb_update->num_wb_info;
2650 		ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2651 		for (i = 0; i < stream->num_wb_info; i++)
2652 			stream->writeback_info[i] =
2653 				update->wb_update->writeback_info[i];
2654 	}
2655 	if (update->dsc_config) {
2656 		struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2657 		uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2658 		uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2659 				       update->dsc_config->num_slices_v != 0);
2660 
2661 		/* Use temporarry context for validating new DSC config */
2662 		struct dc_state *dsc_validate_context = dc_create_state(dc);
2663 
2664 		if (dsc_validate_context) {
2665 			dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2666 
2667 			stream->timing.dsc_cfg = *update->dsc_config;
2668 			stream->timing.flags.DSC = enable_dsc;
2669 			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2670 				stream->timing.dsc_cfg = old_dsc_cfg;
2671 				stream->timing.flags.DSC = old_dsc_enabled;
2672 				update->dsc_config = NULL;
2673 			}
2674 
2675 			dc_release_state(dsc_validate_context);
2676 		} else {
2677 			DC_ERROR("Failed to allocate new validate context for DSC change\n");
2678 			update->dsc_config = NULL;
2679 		}
2680 	}
2681 }
2682 
2683 static void commit_planes_do_stream_update(struct dc *dc,
2684 		struct dc_stream_state *stream,
2685 		struct dc_stream_update *stream_update,
2686 		enum surface_update_type update_type,
2687 		struct dc_state *context)
2688 {
2689 	int j;
2690 
2691 	// Stream updates
2692 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
2693 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2694 
2695 		if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2696 
2697 			if (stream_update->periodic_interrupt0 &&
2698 					dc->hwss.setup_periodic_interrupt)
2699 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2700 
2701 			if (stream_update->periodic_interrupt1 &&
2702 					dc->hwss.setup_periodic_interrupt)
2703 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2704 
2705 			if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2706 					stream_update->vrr_infopacket ||
2707 					stream_update->vsc_infopacket ||
2708 					stream_update->vsp_infopacket) {
2709 				resource_build_info_frame(pipe_ctx);
2710 				dc->hwss.update_info_frame(pipe_ctx);
2711 			}
2712 
2713 			if (stream_update->hdr_static_metadata &&
2714 					stream->use_dynamic_meta &&
2715 					dc->hwss.set_dmdata_attributes &&
2716 					pipe_ctx->stream->dmdata_address.quad_part != 0)
2717 				dc->hwss.set_dmdata_attributes(pipe_ctx);
2718 
2719 			if (stream_update->gamut_remap)
2720 				dc_stream_set_gamut_remap(dc, stream);
2721 
2722 			if (stream_update->output_csc_transform)
2723 				dc_stream_program_csc_matrix(dc, stream);
2724 
2725 			if (stream_update->dither_option) {
2726 				struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2727 				resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2728 									&pipe_ctx->stream->bit_depth_params);
2729 				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2730 						&stream->bit_depth_params,
2731 						&stream->clamping);
2732 				while (odm_pipe) {
2733 					odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2734 							&stream->bit_depth_params,
2735 							&stream->clamping);
2736 					odm_pipe = odm_pipe->next_odm_pipe;
2737 				}
2738 			}
2739 
2740 
2741 			/* Full fe update*/
2742 			if (update_type == UPDATE_TYPE_FAST)
2743 				continue;
2744 
2745 			if (stream_update->dsc_config)
2746 				dp_update_dsc_config(pipe_ctx);
2747 
2748 #if defined(CONFIG_DRM_AMD_DC_DCN)
2749 			if (stream_update->mst_bw_update) {
2750 				if (stream_update->mst_bw_update->is_increase)
2751 					dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2752 				else
2753 					dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2754 			}
2755 #endif
2756 
2757 			if (stream_update->pending_test_pattern) {
2758 				dc_link_dp_set_test_pattern(stream->link,
2759 					stream->test_pattern.type,
2760 					stream->test_pattern.color_space,
2761 					stream->test_pattern.p_link_settings,
2762 					stream->test_pattern.p_custom_pattern,
2763 					stream->test_pattern.cust_pattern_size);
2764 			}
2765 
2766 			if (stream_update->dpms_off) {
2767 				if (*stream_update->dpms_off) {
2768 					core_link_disable_stream(pipe_ctx);
2769 					/* for dpms, keep acquired resources*/
2770 					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2771 						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2772 
2773 					dc->optimized_required = true;
2774 
2775 				} else {
2776 					if (get_seamless_boot_stream_count(context) == 0)
2777 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
2778 
2779 					core_link_enable_stream(dc->current_state, pipe_ctx);
2780 				}
2781 			}
2782 
2783 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2784 				bool should_program_abm = true;
2785 
2786 				// if otg funcs defined check if blanked before programming
2787 				if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2788 					if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2789 						should_program_abm = false;
2790 
2791 				if (should_program_abm) {
2792 					if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2793 						dc->hwss.set_abm_immediate_disable(pipe_ctx);
2794 					} else {
2795 						pipe_ctx->stream_res.abm->funcs->set_abm_level(
2796 							pipe_ctx->stream_res.abm, stream->abm_level);
2797 					}
2798 				}
2799 			}
2800 		}
2801 	}
2802 }
2803 
2804 static void commit_planes_for_stream(struct dc *dc,
2805 		struct dc_surface_update *srf_updates,
2806 		int surface_count,
2807 		struct dc_stream_state *stream,
2808 		struct dc_stream_update *stream_update,
2809 		enum surface_update_type update_type,
2810 		struct dc_state *context)
2811 {
2812 	int i, j;
2813 	struct pipe_ctx *top_pipe_to_program = NULL;
2814 	bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
2815 
2816 #if defined(CONFIG_DRM_AMD_DC_DCN)
2817 	dc_z10_restore(dc);
2818 #endif
2819 
2820 	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2821 		/* Optimize seamless boot flag keeps clocks and watermarks high until
2822 		 * first flip. After first flip, optimization is required to lower
2823 		 * bandwidth. Important to note that it is expected UEFI will
2824 		 * only light up a single display on POST, therefore we only expect
2825 		 * one stream with seamless boot flag set.
2826 		 */
2827 		if (stream->apply_seamless_boot_optimization) {
2828 			stream->apply_seamless_boot_optimization = false;
2829 
2830 			if (get_seamless_boot_stream_count(context) == 0)
2831 				dc->optimized_required = true;
2832 		}
2833 	}
2834 
2835 	if (update_type == UPDATE_TYPE_FULL) {
2836 #if defined(CONFIG_DRM_AMD_DC_DCN)
2837 		dc_allow_idle_optimizations(dc, false);
2838 
2839 #endif
2840 		if (get_seamless_boot_stream_count(context) == 0)
2841 			dc->hwss.prepare_bandwidth(dc, context);
2842 
2843 		context_clock_trace(dc, context);
2844 	}
2845 
2846 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
2847 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2848 
2849 		if (!pipe_ctx->top_pipe &&
2850 			!pipe_ctx->prev_odm_pipe &&
2851 			pipe_ctx->stream &&
2852 			pipe_ctx->stream == stream) {
2853 			top_pipe_to_program = pipe_ctx;
2854 		}
2855 	}
2856 
2857 #ifdef CONFIG_DRM_AMD_DC_DCN
2858 	if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2859 		struct pipe_ctx *mpcc_pipe;
2860 		struct pipe_ctx *odm_pipe;
2861 
2862 		for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2863 			for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2864 				odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2865 	}
2866 #endif
2867 
2868 	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2869 		if (top_pipe_to_program &&
2870 			top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2871 			if (should_use_dmub_lock(stream->link)) {
2872 				union dmub_hw_lock_flags hw_locks = { 0 };
2873 				struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2874 
2875 				hw_locks.bits.lock_dig = 1;
2876 				inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2877 
2878 				dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2879 							true,
2880 							&hw_locks,
2881 							&inst_flags);
2882 			} else
2883 				top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2884 						top_pipe_to_program->stream_res.tg);
2885 		}
2886 
2887 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2888 		dc->hwss.interdependent_update_lock(dc, context, true);
2889 	else
2890 		/* Lock the top pipe while updating plane addrs, since freesync requires
2891 		 *  plane addr update event triggers to be synchronized.
2892 		 *  top_pipe_to_program is expected to never be NULL
2893 		 */
2894 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2895 
2896 	// Stream updates
2897 	if (stream_update)
2898 		commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2899 
2900 	if (surface_count == 0) {
2901 		/*
2902 		 * In case of turning off screen, no need to program front end a second time.
2903 		 * just return after program blank.
2904 		 */
2905 		if (dc->hwss.apply_ctx_for_surface)
2906 			dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2907 		if (dc->hwss.program_front_end_for_ctx)
2908 			dc->hwss.program_front_end_for_ctx(dc, context);
2909 
2910 		if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2911 			dc->hwss.interdependent_update_lock(dc, context, false);
2912 		else
2913 			dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2914 		dc->hwss.post_unlock_program_front_end(dc, context);
2915 		return;
2916 	}
2917 
2918 	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2919 		for (i = 0; i < surface_count; i++) {
2920 			struct dc_plane_state *plane_state = srf_updates[i].surface;
2921 			/*set logical flag for lock/unlock use*/
2922 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
2923 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2924 				if (!pipe_ctx->plane_state)
2925 					continue;
2926 				if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2927 					continue;
2928 				pipe_ctx->plane_state->triplebuffer_flips = false;
2929 				if (update_type == UPDATE_TYPE_FAST &&
2930 					dc->hwss.program_triplebuffer != NULL &&
2931 					!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2932 						/*triple buffer for VUpdate  only*/
2933 						pipe_ctx->plane_state->triplebuffer_flips = true;
2934 				}
2935 			}
2936 			if (update_type == UPDATE_TYPE_FULL) {
2937 				/* force vsync flip when reconfiguring pipes to prevent underflow */
2938 				plane_state->flip_immediate = false;
2939 			}
2940 		}
2941 	}
2942 
2943 	// Update Type FULL, Surface updates
2944 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
2945 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2946 
2947 		if (!pipe_ctx->top_pipe &&
2948 			!pipe_ctx->prev_odm_pipe &&
2949 			should_update_pipe_for_stream(context, pipe_ctx, stream)) {
2950 			struct dc_stream_status *stream_status = NULL;
2951 
2952 			if (!pipe_ctx->plane_state)
2953 				continue;
2954 
2955 			/* Full fe update*/
2956 			if (update_type == UPDATE_TYPE_FAST)
2957 				continue;
2958 
2959 			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2960 
2961 			if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2962 				/*turn off triple buffer for full update*/
2963 				dc->hwss.program_triplebuffer(
2964 					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2965 			}
2966 			stream_status =
2967 				stream_get_status(context, pipe_ctx->stream);
2968 
2969 			if (dc->hwss.apply_ctx_for_surface)
2970 				dc->hwss.apply_ctx_for_surface(
2971 					dc, pipe_ctx->stream, stream_status->plane_count, context);
2972 		}
2973 	}
2974 	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2975 		dc->hwss.program_front_end_for_ctx(dc, context);
2976 #ifdef CONFIG_DRM_AMD_DC_DCN
2977 		if (dc->debug.validate_dml_output) {
2978 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
2979 				struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
2980 				if (cur_pipe->stream == NULL)
2981 					continue;
2982 
2983 				cur_pipe->plane_res.hubp->funcs->validate_dml_output(
2984 						cur_pipe->plane_res.hubp, dc->ctx,
2985 						&context->res_ctx.pipe_ctx[i].rq_regs,
2986 						&context->res_ctx.pipe_ctx[i].dlg_regs,
2987 						&context->res_ctx.pipe_ctx[i].ttu_regs);
2988 			}
2989 		}
2990 #endif
2991 	}
2992 
2993 	// Update Type FAST, Surface updates
2994 	if (update_type == UPDATE_TYPE_FAST) {
2995 		if (dc->hwss.set_flip_control_gsl)
2996 			for (i = 0; i < surface_count; i++) {
2997 				struct dc_plane_state *plane_state = srf_updates[i].surface;
2998 
2999 				for (j = 0; j < dc->res_pool->pipe_count; j++) {
3000 					struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3001 
3002 					if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3003 						continue;
3004 
3005 					if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3006 						continue;
3007 
3008 					// GSL has to be used for flip immediate
3009 					dc->hwss.set_flip_control_gsl(pipe_ctx,
3010 							pipe_ctx->plane_state->flip_immediate);
3011 				}
3012 			}
3013 
3014 		/* Perform requested Updates */
3015 		for (i = 0; i < surface_count; i++) {
3016 			struct dc_plane_state *plane_state = srf_updates[i].surface;
3017 
3018 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
3019 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3020 
3021 				if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3022 					continue;
3023 
3024 				if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3025 					continue;
3026 
3027 				/*program triple buffer after lock based on flip type*/
3028 				if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3029 					/*only enable triplebuffer for  fast_update*/
3030 					dc->hwss.program_triplebuffer(
3031 						dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3032 				}
3033 				if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3034 					dc->hwss.update_plane_addr(dc, pipe_ctx);
3035 			}
3036 		}
3037 
3038 	}
3039 
3040 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
3041 		dc->hwss.interdependent_update_lock(dc, context, false);
3042 	else
3043 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3044 
3045 	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3046 		if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3047 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3048 					top_pipe_to_program->stream_res.tg,
3049 					CRTC_STATE_VACTIVE);
3050 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3051 					top_pipe_to_program->stream_res.tg,
3052 					CRTC_STATE_VBLANK);
3053 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3054 					top_pipe_to_program->stream_res.tg,
3055 					CRTC_STATE_VACTIVE);
3056 
3057 			if (stream && should_use_dmub_lock(stream->link)) {
3058 				union dmub_hw_lock_flags hw_locks = { 0 };
3059 				struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3060 
3061 				hw_locks.bits.lock_dig = 1;
3062 				inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3063 
3064 				dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3065 							false,
3066 							&hw_locks,
3067 							&inst_flags);
3068 			} else
3069 				top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3070 					top_pipe_to_program->stream_res.tg);
3071 		}
3072 
3073 	if (update_type != UPDATE_TYPE_FAST)
3074 		dc->hwss.post_unlock_program_front_end(dc, context);
3075 
3076 	// Fire manual trigger only when bottom plane is flipped
3077 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
3078 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3079 
3080 		if (!pipe_ctx->plane_state)
3081 			continue;
3082 
3083 		if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3084 				!pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3085 				!pipe_ctx->plane_state->update_flags.bits.addr_update ||
3086 				pipe_ctx->plane_state->skip_manual_trigger)
3087 			continue;
3088 
3089 		if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3090 			pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3091 	}
3092 }
3093 
3094 void dc_commit_updates_for_stream(struct dc *dc,
3095 		struct dc_surface_update *srf_updates,
3096 		int surface_count,
3097 		struct dc_stream_state *stream,
3098 		struct dc_stream_update *stream_update,
3099 		struct dc_state *state)
3100 {
3101 	const struct dc_stream_status *stream_status;
3102 	enum surface_update_type update_type;
3103 	struct dc_state *context;
3104 	struct dc_context *dc_ctx = dc->ctx;
3105 	int i, j;
3106 
3107 	stream_status = dc_stream_get_status(stream);
3108 	context = dc->current_state;
3109 
3110 	update_type = dc_check_update_surfaces_for_stream(
3111 				dc, srf_updates, surface_count, stream_update, stream_status);
3112 
3113 	if (update_type >= update_surface_trace_level)
3114 		update_surface_trace(dc, srf_updates, surface_count);
3115 
3116 
3117 	if (update_type >= UPDATE_TYPE_FULL) {
3118 
3119 		/* initialize scratch memory for building context */
3120 		context = dc_create_state(dc);
3121 		if (context == NULL) {
3122 			DC_ERROR("Failed to allocate new validate context!\n");
3123 			return;
3124 		}
3125 
3126 		dc_resource_state_copy_construct(state, context);
3127 
3128 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3129 			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3130 			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3131 
3132 			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3133 				new_pipe->plane_state->force_full_update = true;
3134 		}
3135 	} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3136 		/*
3137 		 * Previous frame finished and HW is ready for optimization.
3138 		 *
3139 		 * Only relevant for DCN behavior where we can guarantee the optimization
3140 		 * is safe to apply - retain the legacy behavior for DCE.
3141 		 */
3142 		dc_post_update_surfaces_to_stream(dc);
3143 	}
3144 
3145 
3146 	for (i = 0; i < surface_count; i++) {
3147 		struct dc_plane_state *surface = srf_updates[i].surface;
3148 
3149 		copy_surface_update_to_plane(surface, &srf_updates[i]);
3150 
3151 		if (update_type >= UPDATE_TYPE_MED) {
3152 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
3153 				struct pipe_ctx *pipe_ctx =
3154 					&context->res_ctx.pipe_ctx[j];
3155 
3156 				if (pipe_ctx->plane_state != surface)
3157 					continue;
3158 
3159 				resource_build_scaling_params(pipe_ctx);
3160 			}
3161 		}
3162 	}
3163 
3164 	copy_stream_update_to_stream(dc, context, stream, stream_update);
3165 
3166 	if (update_type >= UPDATE_TYPE_FULL) {
3167 		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3168 			DC_ERROR("Mode validation failed for stream update!\n");
3169 			dc_release_state(context);
3170 			return;
3171 		}
3172 	}
3173 
3174 	TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3175 
3176 	commit_planes_for_stream(
3177 				dc,
3178 				srf_updates,
3179 				surface_count,
3180 				stream,
3181 				stream_update,
3182 				update_type,
3183 				context);
3184 	/*update current_State*/
3185 	if (dc->current_state != context) {
3186 
3187 		struct dc_state *old = dc->current_state;
3188 
3189 		dc->current_state = context;
3190 		dc_release_state(old);
3191 
3192 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3193 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3194 
3195 			if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3196 				pipe_ctx->plane_state->force_full_update = false;
3197 		}
3198 	}
3199 
3200 	/* Legacy optimization path for DCE. */
3201 	if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3202 		dc_post_update_surfaces_to_stream(dc);
3203 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3204 	}
3205 
3206 	return;
3207 
3208 }
3209 
3210 uint8_t dc_get_current_stream_count(struct dc *dc)
3211 {
3212 	return dc->current_state->stream_count;
3213 }
3214 
3215 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3216 {
3217 	if (i < dc->current_state->stream_count)
3218 		return dc->current_state->streams[i];
3219 	return NULL;
3220 }
3221 
3222 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3223 {
3224 	uint8_t i;
3225 	struct dc_context *ctx = link->ctx;
3226 
3227 	for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3228 		if (ctx->dc->current_state->streams[i]->link == link)
3229 			return ctx->dc->current_state->streams[i];
3230 	}
3231 
3232 	return NULL;
3233 }
3234 
3235 enum dc_irq_source dc_interrupt_to_irq_source(
3236 		struct dc *dc,
3237 		uint32_t src_id,
3238 		uint32_t ext_id)
3239 {
3240 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3241 }
3242 
3243 /*
3244  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3245  */
3246 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3247 {
3248 
3249 	if (dc == NULL)
3250 		return false;
3251 
3252 	return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3253 }
3254 
3255 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3256 {
3257 	dal_irq_service_ack(dc->res_pool->irqs, src);
3258 }
3259 
3260 void dc_power_down_on_boot(struct dc *dc)
3261 {
3262 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3263 			dc->hwss.power_down_on_boot)
3264 		dc->hwss.power_down_on_boot(dc);
3265 }
3266 
3267 void dc_set_power_state(
3268 	struct dc *dc,
3269 	enum dc_acpi_cm_power_state power_state)
3270 {
3271 	struct kref refcount;
3272 	struct display_mode_lib *dml;
3273 
3274 	if (!dc->current_state)
3275 		return;
3276 
3277 	switch (power_state) {
3278 	case DC_ACPI_CM_POWER_STATE_D0:
3279 		dc_resource_state_construct(dc, dc->current_state);
3280 
3281 #if defined(CONFIG_DRM_AMD_DC_DCN)
3282 		dc_z10_restore(dc);
3283 #endif
3284 		if (dc->ctx->dmub_srv)
3285 			dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3286 
3287 		dc->hwss.init_hw(dc);
3288 
3289 		if (dc->hwss.init_sys_ctx != NULL &&
3290 			dc->vm_pa_config.valid) {
3291 			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3292 		}
3293 
3294 		break;
3295 	default:
3296 		ASSERT(dc->current_state->stream_count == 0);
3297 		/* Zero out the current context so that on resume we start with
3298 		 * clean state, and dc hw programming optimizations will not
3299 		 * cause any trouble.
3300 		 */
3301 		dml = kzalloc(sizeof(struct display_mode_lib),
3302 				GFP_KERNEL);
3303 
3304 		ASSERT(dml);
3305 		if (!dml)
3306 			return;
3307 
3308 		/* Preserve refcount */
3309 		refcount = dc->current_state->refcount;
3310 		/* Preserve display mode lib */
3311 		memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3312 
3313 		dc_resource_state_destruct(dc->current_state);
3314 		memset(dc->current_state, 0,
3315 				sizeof(*dc->current_state));
3316 
3317 		dc->current_state->refcount = refcount;
3318 		dc->current_state->bw_ctx.dml = *dml;
3319 
3320 		kfree(dml);
3321 
3322 		break;
3323 	}
3324 }
3325 
3326 void dc_resume(struct dc *dc)
3327 {
3328 	uint32_t i;
3329 
3330 	for (i = 0; i < dc->link_count; i++)
3331 		core_link_resume(dc->links[i]);
3332 }
3333 
3334 bool dc_is_dmcu_initialized(struct dc *dc)
3335 {
3336 	struct dmcu *dmcu = dc->res_pool->dmcu;
3337 
3338 	if (dmcu)
3339 		return dmcu->funcs->is_dmcu_initialized(dmcu);
3340 	return false;
3341 }
3342 
3343 bool dc_submit_i2c(
3344 		struct dc *dc,
3345 		uint32_t link_index,
3346 		struct i2c_command *cmd)
3347 {
3348 
3349 	struct dc_link *link = dc->links[link_index];
3350 	struct ddc_service *ddc = link->ddc;
3351 	return dce_i2c_submit_command(
3352 		dc->res_pool,
3353 		ddc->ddc_pin,
3354 		cmd);
3355 }
3356 
3357 bool dc_submit_i2c_oem(
3358 		struct dc *dc,
3359 		struct i2c_command *cmd)
3360 {
3361 	struct ddc_service *ddc = dc->res_pool->oem_device;
3362 	return dce_i2c_submit_command(
3363 		dc->res_pool,
3364 		ddc->ddc_pin,
3365 		cmd);
3366 }
3367 
3368 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3369 {
3370 	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3371 		BREAK_TO_DEBUGGER();
3372 		return false;
3373 	}
3374 
3375 	dc_sink_retain(sink);
3376 
3377 	dc_link->remote_sinks[dc_link->sink_count] = sink;
3378 	dc_link->sink_count++;
3379 
3380 	return true;
3381 }
3382 
3383 /*
3384  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3385  *
3386  * EDID length is in bytes
3387  */
3388 struct dc_sink *dc_link_add_remote_sink(
3389 		struct dc_link *link,
3390 		const uint8_t *edid,
3391 		int len,
3392 		struct dc_sink_init_data *init_data)
3393 {
3394 	struct dc_sink *dc_sink;
3395 	enum dc_edid_status edid_status;
3396 
3397 	if (len > DC_MAX_EDID_BUFFER_SIZE) {
3398 		dm_error("Max EDID buffer size breached!\n");
3399 		return NULL;
3400 	}
3401 
3402 	if (!init_data) {
3403 		BREAK_TO_DEBUGGER();
3404 		return NULL;
3405 	}
3406 
3407 	if (!init_data->link) {
3408 		BREAK_TO_DEBUGGER();
3409 		return NULL;
3410 	}
3411 
3412 	dc_sink = dc_sink_create(init_data);
3413 
3414 	if (!dc_sink)
3415 		return NULL;
3416 
3417 	memmove(dc_sink->dc_edid.raw_edid, edid, len);
3418 	dc_sink->dc_edid.length = len;
3419 
3420 	if (!link_add_remote_sink_helper(
3421 			link,
3422 			dc_sink))
3423 		goto fail_add_sink;
3424 
3425 	edid_status = dm_helpers_parse_edid_caps(
3426 			link,
3427 			&dc_sink->dc_edid,
3428 			&dc_sink->edid_caps);
3429 
3430 	/*
3431 	 * Treat device as no EDID device if EDID
3432 	 * parsing fails
3433 	 */
3434 	if (edid_status != EDID_OK) {
3435 		dc_sink->dc_edid.length = 0;
3436 		dm_error("Bad EDID, status%d!\n", edid_status);
3437 	}
3438 
3439 	return dc_sink;
3440 
3441 fail_add_sink:
3442 	dc_sink_release(dc_sink);
3443 	return NULL;
3444 }
3445 
3446 /*
3447  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3448  *
3449  * Note that this just removes the struct dc_sink - it doesn't
3450  * program hardware or alter other members of dc_link
3451  */
3452 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3453 {
3454 	int i;
3455 
3456 	if (!link->sink_count) {
3457 		BREAK_TO_DEBUGGER();
3458 		return;
3459 	}
3460 
3461 	for (i = 0; i < link->sink_count; i++) {
3462 		if (link->remote_sinks[i] == sink) {
3463 			dc_sink_release(sink);
3464 			link->remote_sinks[i] = NULL;
3465 
3466 			/* shrink array to remove empty place */
3467 			while (i < link->sink_count - 1) {
3468 				link->remote_sinks[i] = link->remote_sinks[i+1];
3469 				i++;
3470 			}
3471 			link->remote_sinks[i] = NULL;
3472 			link->sink_count--;
3473 			return;
3474 		}
3475 	}
3476 }
3477 
3478 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3479 {
3480 	info->displayClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3481 	info->engineClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3482 	info->memoryClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3483 	info->maxSupportedDppClock		= (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3484 	info->dppClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3485 	info->socClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3486 	info->dcfClockDeepSleep			= (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3487 	info->fClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3488 	info->phyClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3489 }
3490 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3491 {
3492 	if (dc->hwss.set_clock)
3493 		return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3494 	return DC_ERROR_UNEXPECTED;
3495 }
3496 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3497 {
3498 	if (dc->hwss.get_clock)
3499 		dc->hwss.get_clock(dc, clock_type, clock_cfg);
3500 }
3501 
3502 /* enable/disable eDP PSR without specify stream for eDP */
3503 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3504 {
3505 	int i;
3506 	bool allow_active;
3507 
3508 	for (i = 0; i < dc->current_state->stream_count ; i++) {
3509 		struct dc_link *link;
3510 		struct dc_stream_state *stream = dc->current_state->streams[i];
3511 
3512 		link = stream->link;
3513 		if (!link)
3514 			continue;
3515 
3516 		if (link->psr_settings.psr_feature_enabled) {
3517 			if (enable && !link->psr_settings.psr_allow_active) {
3518 				allow_active = true;
3519 				if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
3520 					return false;
3521 			} else if (!enable && link->psr_settings.psr_allow_active) {
3522 				allow_active = false;
3523 				if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
3524 					return false;
3525 			}
3526 		}
3527 	}
3528 
3529 	return true;
3530 }
3531 
3532 #if defined(CONFIG_DRM_AMD_DC_DCN)
3533 
3534 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3535 {
3536 	if (dc->debug.disable_idle_power_optimizations)
3537 		return;
3538 
3539 	if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3540 		if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3541 			return;
3542 
3543 	if (allow == dc->idle_optimizations_allowed)
3544 		return;
3545 
3546 	if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3547 		dc->idle_optimizations_allowed = allow;
3548 }
3549 
3550 /*
3551  * blank all streams, and set min and max memory clock to
3552  * lowest and highest DPM level, respectively
3553  */
3554 void dc_unlock_memory_clock_frequency(struct dc *dc)
3555 {
3556 	unsigned int i;
3557 
3558 	for (i = 0; i < MAX_PIPES; i++)
3559 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3560 			core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3561 
3562 	dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3563 	dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3564 }
3565 
3566 /*
3567  * set min memory clock to the min required for current mode,
3568  * max to maxDPM, and unblank streams
3569  */
3570 void dc_lock_memory_clock_frequency(struct dc *dc)
3571 {
3572 	unsigned int i;
3573 
3574 	dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3575 	dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3576 	dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3577 
3578 	for (i = 0; i < MAX_PIPES; i++)
3579 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3580 			core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3581 }
3582 
3583 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
3584 {
3585 	struct dc_state *context = dc->current_state;
3586 	struct hubp *hubp;
3587 	struct pipe_ctx *pipe;
3588 	int i;
3589 
3590 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3591 		pipe = &context->res_ctx.pipe_ctx[i];
3592 
3593 		if (pipe->stream != NULL) {
3594 			dc->hwss.disable_pixel_data(dc, pipe, true);
3595 
3596 			// wait for double buffer
3597 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3598 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
3599 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3600 
3601 			hubp = pipe->plane_res.hubp;
3602 			hubp->funcs->set_blank_regs(hubp, true);
3603 		}
3604 	}
3605 
3606 	dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
3607 	dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
3608 
3609 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3610 		pipe = &context->res_ctx.pipe_ctx[i];
3611 
3612 		if (pipe->stream != NULL) {
3613 			dc->hwss.disable_pixel_data(dc, pipe, false);
3614 
3615 			hubp = pipe->plane_res.hubp;
3616 			hubp->funcs->set_blank_regs(hubp, false);
3617 		}
3618 	}
3619 }
3620 
3621 
3622 /**
3623  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
3624  * @dc: pointer to dc of the dm calling this
3625  * @enable: True = transition to DC mode, false = transition back to AC mode
3626  *
3627  * Some SoCs define additional clock limits when in DC mode, DM should
3628  * invoke this function when the platform undergoes a power source transition
3629  * so DC can apply/unapply the limit. This interface may be disruptive to
3630  * the onscreen content.
3631  *
3632  * Context: Triggered by OS through DM interface, or manually by escape calls.
3633  * Need to hold a dclock when doing so.
3634  *
3635  * Return: none (void function)
3636  *
3637  */
3638 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
3639 {
3640 	uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
3641 	unsigned int softMax, maxDPM, funcMin;
3642 	bool p_state_change_support;
3643 
3644 	if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
3645 		return;
3646 
3647 	softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
3648 	maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
3649 	funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
3650 	p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
3651 
3652 	if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
3653 		if (p_state_change_support) {
3654 			if (funcMin <= softMax)
3655 				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
3656 			// else: No-Op
3657 		} else {
3658 			if (funcMin <= softMax)
3659 				blank_and_force_memclk(dc, true, softMax);
3660 			// else: No-Op
3661 		}
3662 	} else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
3663 		if (p_state_change_support) {
3664 			if (funcMin <= softMax)
3665 				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
3666 			// else: No-Op
3667 		} else {
3668 			if (funcMin <= softMax)
3669 				blank_and_force_memclk(dc, true, maxDPM);
3670 			// else: No-Op
3671 		}
3672 	}
3673 	dc->clk_mgr->dc_mode_softmax_enabled = enable;
3674 }
3675 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3676 		struct dc_cursor_attributes *cursor_attr)
3677 {
3678 	if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3679 		return true;
3680 	return false;
3681 }
3682 
3683 /* cleanup on driver unload */
3684 void dc_hardware_release(struct dc *dc)
3685 {
3686 	if (dc->hwss.hardware_release)
3687 		dc->hwss.hardware_release(dc);
3688 }
3689 #endif
3690 
3691 /**
3692  * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
3693  * @dc: dc structure
3694  *
3695  * Returns: True to enable dmub notifications, False otherwise
3696  */
3697 bool dc_enable_dmub_notifications(struct dc *dc)
3698 {
3699 #if defined(CONFIG_DRM_AMD_DC_DCN)
3700 	/* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
3701 	if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
3702 	    dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
3703 	    !dc->debug.dpia_debug.bits.disable_dpia)
3704 		return true;
3705 #endif
3706 	/* dmub aux needs dmub notifications to be enabled */
3707 	return dc->debug.enable_dmub_aux_for_legacy_ddc;
3708 }
3709 
3710 /**
3711  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
3712  *                                      Sets port index appropriately for legacy DDC
3713  * @dc: dc structure
3714  * @link_index: link index
3715  * @payload: aux payload
3716  *
3717  * Returns: True if successful, False if failure
3718  */
3719 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3720 				uint32_t link_index,
3721 				struct aux_payload *payload)
3722 {
3723 	uint8_t action;
3724 	union dmub_rb_cmd cmd = {0};
3725 	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3726 
3727 	ASSERT(payload->length <= 16);
3728 
3729 	cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3730 	cmd.dp_aux_access.header.payload_bytes = 0;
3731 	/* For dpia, ddc_pin is set to NULL */
3732 	if (!dc->links[link_index]->ddc->ddc_pin)
3733 		cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
3734 	else
3735 		cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3736 
3737 	cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3738 	cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3739 	cmd.dp_aux_access.aux_control.timeout = 0;
3740 	cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3741 	cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3742 	cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3743 
3744 	/* set aux action */
3745 	if (payload->i2c_over_aux) {
3746 		if (payload->write) {
3747 			if (payload->mot)
3748 				action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3749 			else
3750 				action = DP_AUX_REQ_ACTION_I2C_WRITE;
3751 		} else {
3752 			if (payload->mot)
3753 				action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3754 			else
3755 				action = DP_AUX_REQ_ACTION_I2C_READ;
3756 			}
3757 	} else {
3758 		if (payload->write)
3759 			action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3760 		else
3761 			action = DP_AUX_REQ_ACTION_DPCD_READ;
3762 	}
3763 
3764 	cmd.dp_aux_access.aux_control.dpaux.action = action;
3765 
3766 	if (payload->length && payload->write) {
3767 		memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3768 			payload->data,
3769 			payload->length
3770 			);
3771 	}
3772 
3773 	dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3774 	dc_dmub_srv_cmd_execute(dmub_srv);
3775 	dc_dmub_srv_wait_idle(dmub_srv);
3776 
3777 	return true;
3778 }
3779 
3780 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
3781 					    uint8_t dpia_port_index)
3782 {
3783 	uint8_t index, link_index = 0xFF;
3784 
3785 	for (index = 0; index < dc->link_count; index++) {
3786 		/* ddc_hw_inst has dpia port index for dpia links
3787 		 * and ddc instance for legacy links
3788 		 */
3789 		if (!dc->links[index]->ddc->ddc_pin) {
3790 			if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
3791 				link_index = index;
3792 				break;
3793 			}
3794 		}
3795 	}
3796 	ASSERT(link_index != 0xFF);
3797 	return link_index;
3798 }
3799 
3800 /**
3801  *****************************************************************************
3802  *  Function: dc_process_dmub_set_config_async
3803  *
3804  *  @brief
3805  *		Submits set_config command to dmub via inbox message
3806  *
3807  *  @param
3808  *		[in] dc: dc structure
3809  *		[in] link_index: link index
3810  *		[in] payload: aux payload
3811  *		[out] notify: set_config immediate reply
3812  *
3813  *	@return
3814  *		True if successful, False if failure
3815  *****************************************************************************
3816  */
3817 bool dc_process_dmub_set_config_async(struct dc *dc,
3818 				uint32_t link_index,
3819 				struct set_config_cmd_payload *payload,
3820 				struct dmub_notification *notify)
3821 {
3822 	union dmub_rb_cmd cmd = {0};
3823 	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3824 	bool is_cmd_complete = true;
3825 
3826 	/* prepare SET_CONFIG command */
3827 	cmd.set_config_access.header.type = DMUB_CMD__DPIA;
3828 	cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
3829 
3830 	cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
3831 	cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
3832 	cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
3833 
3834 	if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
3835 		/* command is not processed by dmub */
3836 		notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
3837 		return is_cmd_complete;
3838 	}
3839 
3840 	/* command processed by dmub, if ret_status is 1, it is completed instantly */
3841 	if (cmd.set_config_access.header.ret_status == 1)
3842 		notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
3843 	else
3844 		/* cmd pending, will receive notification via outbox */
3845 		is_cmd_complete = false;
3846 
3847 	return is_cmd_complete;
3848 }
3849 
3850 /**
3851  *****************************************************************************
3852  *  Function: dc_process_dmub_set_mst_slots
3853  *
3854  *  @brief
3855  *		Submits mst slot allocation command to dmub via inbox message
3856  *
3857  *  @param
3858  *		[in] dc: dc structure
3859  *		[in] link_index: link index
3860  *		[in] mst_alloc_slots: mst slots to be allotted
3861  *		[out] mst_slots_in_use: mst slots in use returned in failure case
3862  *
3863  *	@return
3864  *		DC_OK if successful, DC_ERROR if failure
3865  *****************************************************************************
3866  */
3867 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
3868 				uint32_t link_index,
3869 				uint8_t mst_alloc_slots,
3870 				uint8_t *mst_slots_in_use)
3871 {
3872 	union dmub_rb_cmd cmd = {0};
3873 	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3874 
3875 	/* prepare MST_ALLOC_SLOTS command */
3876 	cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
3877 	cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
3878 
3879 	cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
3880 	cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
3881 
3882 	if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
3883 		/* command is not processed by dmub */
3884 		return DC_ERROR_UNEXPECTED;
3885 
3886 	/* command processed by dmub, if ret_status is 1 */
3887 	if (cmd.set_config_access.header.ret_status != 1)
3888 		/* command processing error */
3889 		return DC_ERROR_UNEXPECTED;
3890 
3891 	/* command processed and we have a status of 2, mst not enabled in dpia */
3892 	if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
3893 		return DC_FAIL_UNSUPPORTED_1;
3894 
3895 	/* previously configured mst alloc and used slots did not match */
3896 	if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
3897 		*mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
3898 		return DC_NOT_SUPPORTED;
3899 	}
3900 
3901 	return DC_OK;
3902 }
3903 
3904 /**
3905  * dc_disable_accelerated_mode - disable accelerated mode
3906  * @dc: dc structure
3907  */
3908 void dc_disable_accelerated_mode(struct dc *dc)
3909 {
3910 	bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3911 }
3912 
3913 
3914 /**
3915  *****************************************************************************
3916  *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
3917  *  @dc: dc structure
3918  *	@stream: stream where vsync int state changed
3919  *	@enable: whether vsync is enabled or disabled
3920  *
3921  *  Called when vsync is enabled/disabled
3922  *	Will notify DMUB to start/stop ABM interrupts after steady state is reached
3923  *
3924  *****************************************************************************
3925  */
3926 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
3927 {
3928 	int i;
3929 	int edp_num;
3930 	struct pipe_ctx *pipe = NULL;
3931 	struct dc_link *link = stream->sink->link;
3932 	struct dc_link *edp_links[MAX_NUM_EDP];
3933 
3934 
3935 	if (link->psr_settings.psr_feature_enabled)
3936 		return;
3937 
3938 	/*find primary pipe associated with stream*/
3939 	for (i = 0; i < MAX_PIPES; i++) {
3940 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3941 
3942 		if (pipe->stream == stream && pipe->stream_res.tg)
3943 			break;
3944 	}
3945 
3946 	if (i == MAX_PIPES) {
3947 		ASSERT(0);
3948 		return;
3949 	}
3950 
3951 	get_edp_links(dc, edp_links, &edp_num);
3952 
3953 	/* Determine panel inst */
3954 	for (i = 0; i < edp_num; i++) {
3955 		if (edp_links[i] == link)
3956 			break;
3957 	}
3958 
3959 	if (i == edp_num) {
3960 		return;
3961 	}
3962 
3963 	if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
3964 		pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
3965 }
3966