1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 // header file of functions being implemented
27 #include "dcn32_resource.h"
28 #include "dcn20/dcn20_resource.h"
29 #include "dml/dcn32/display_mode_vba_util_32.h"
30 #include "dml/dcn32/dcn32_fpu.h"
31 
is_dual_plane(enum surface_pixel_format format)32 static bool is_dual_plane(enum surface_pixel_format format)
33 {
34 	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
35 }
36 
37 
dcn32_helper_mall_bytes_to_ways(struct dc * dc,uint32_t total_size_in_mall_bytes)38 uint32_t dcn32_helper_mall_bytes_to_ways(
39 		struct dc *dc,
40 		uint32_t total_size_in_mall_bytes)
41 {
42 	uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
43 
44 	/* add 2 lines for worst case alignment */
45 	cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
46 
47 	total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
48 	lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
49 	num_ways = cache_lines_used / lines_per_way;
50 	if (cache_lines_used % lines_per_way > 0)
51 		num_ways++;
52 
53 	return num_ways;
54 }
55 
dcn32_helper_calculate_mall_bytes_for_cursor(struct dc * dc,struct pipe_ctx * pipe_ctx,bool ignore_cursor_buf)56 uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
57 		struct dc *dc,
58 		struct pipe_ctx *pipe_ctx,
59 		bool ignore_cursor_buf)
60 {
61 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
62 	uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
63 	uint32_t cursor_mall_size_bytes = 0;
64 
65 	switch (pipe_ctx->stream->cursor_attributes.color_format) {
66 	case CURSOR_MODE_MONO:
67 		cursor_size /= 2;
68 		break;
69 	case CURSOR_MODE_COLOR_1BIT_AND:
70 	case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
71 	case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
72 		cursor_size *= 4;
73 		break;
74 
75 	case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
76 	case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
77 		cursor_size *= 8;
78 		break;
79 	}
80 
81 	/* only count if cursor is enabled, and if additional allocation needed outside of the
82 	 * DCN cursor buffer
83 	 */
84 	if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf ||
85 			cursor_size > 16384)) {
86 		/* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
87 		 * Note: add 1 mblk in case of cursor misalignment
88 		 */
89 		cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
90 				DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES;
91 	}
92 
93 	return cursor_mall_size_bytes;
94 }
95 
96 /**
97  * dcn32_helper_calculate_num_ways_for_subvp(): Calculate number of ways needed for SubVP
98  *
99  * Gets total allocation required for the phantom viewport calculated by DML in bytes and
100  * converts to number of cache ways.
101  *
102  * @dc: current dc state
103  * @context: new dc state
104  *
105  * Return: number of ways required for SubVP
106  */
dcn32_helper_calculate_num_ways_for_subvp(struct dc * dc,struct dc_state * context)107 uint32_t dcn32_helper_calculate_num_ways_for_subvp(
108 		struct dc *dc,
109 		struct dc_state *context)
110 {
111 	if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
112 		if (dc->debug.force_subvp_num_ways) {
113 			return dc->debug.force_subvp_num_ways;
114 		} else {
115 			return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
116 		}
117 	} else {
118 		return 0;
119 	}
120 }
121 
dcn32_merge_pipes_for_subvp(struct dc * dc,struct dc_state * context)122 void dcn32_merge_pipes_for_subvp(struct dc *dc,
123 		struct dc_state *context)
124 {
125 	uint32_t i;
126 
127 	/* merge pipes if necessary */
128 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
129 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
130 
131 		// For now merge all pipes for SubVP since pipe split case isn't supported yet
132 
133 		/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
134 		if (pipe->prev_odm_pipe) {
135 			/*split off odm pipe*/
136 			pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
137 			if (pipe->next_odm_pipe)
138 				pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
139 
140 			pipe->bottom_pipe = NULL;
141 			pipe->next_odm_pipe = NULL;
142 			pipe->plane_state = NULL;
143 			pipe->stream = NULL;
144 			pipe->top_pipe = NULL;
145 			pipe->prev_odm_pipe = NULL;
146 			if (pipe->stream_res.dsc)
147 				dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
148 			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
149 			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
150 		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
151 			struct pipe_ctx *top_pipe = pipe->top_pipe;
152 			struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
153 
154 			top_pipe->bottom_pipe = bottom_pipe;
155 			if (bottom_pipe)
156 				bottom_pipe->top_pipe = top_pipe;
157 
158 			pipe->top_pipe = NULL;
159 			pipe->bottom_pipe = NULL;
160 			pipe->plane_state = NULL;
161 			pipe->stream = NULL;
162 			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
163 			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
164 		}
165 	}
166 }
167 
dcn32_all_pipes_have_stream_and_plane(struct dc * dc,struct dc_state * context)168 bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
169 		struct dc_state *context)
170 {
171 	uint32_t i;
172 
173 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
174 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
175 
176 		if (!pipe->stream)
177 			continue;
178 
179 		if (!pipe->plane_state)
180 			return false;
181 	}
182 	return true;
183 }
184 
dcn32_subvp_in_use(struct dc * dc,struct dc_state * context)185 bool dcn32_subvp_in_use(struct dc *dc,
186 		struct dc_state *context)
187 {
188 	uint32_t i;
189 
190 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
191 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
192 
193 		if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
194 			return true;
195 	}
196 	return false;
197 }
198 
dcn32_mpo_in_use(struct dc_state * context)199 bool dcn32_mpo_in_use(struct dc_state *context)
200 {
201 	uint32_t i;
202 
203 	for (i = 0; i < context->stream_count; i++) {
204 		if (context->stream_status[i].plane_count > 1)
205 			return true;
206 	}
207 	return false;
208 }
209 
210 
dcn32_any_surfaces_rotated(struct dc * dc,struct dc_state * context)211 bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
212 {
213 	uint32_t i;
214 
215 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
216 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
217 
218 		if (!pipe->stream)
219 			continue;
220 
221 		if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
222 			return true;
223 	}
224 	return false;
225 }
226 
dcn32_is_center_timing(struct pipe_ctx * pipe)227 bool dcn32_is_center_timing(struct pipe_ctx *pipe)
228 {
229 	bool is_center_timing = false;
230 
231 	if (pipe->stream) {
232 		if (pipe->stream->timing.v_addressable != pipe->stream->dst.height ||
233 				pipe->stream->timing.v_addressable != pipe->stream->src.height) {
234 			is_center_timing = true;
235 		}
236 	}
237 
238 	if (pipe->plane_state) {
239 		if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
240 				pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
241 			is_center_timing = true;
242 		}
243 	}
244 
245 	return is_center_timing;
246 }
247 
dcn32_is_psr_capable(struct pipe_ctx * pipe)248 bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
249 {
250 	bool psr_capable = false;
251 
252 	if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
253 		psr_capable = true;
254 	}
255 	return psr_capable;
256 }
257 
258 /**
259  * dcn32_determine_det_override(): Determine DET allocation for each pipe
260  *
261  * This function determines how much DET to allocate for each pipe. The total number of
262  * DET segments will be split equally among each of the streams, and after that the DET
263  * segments per stream will be split equally among the planes for the given stream.
264  *
265  * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
266  * number of DET for that given plane will be split among the pipes driving that plane.
267  *
268  *
269  * High level algorithm:
270  * 1. Split total DET among number of streams
271  * 2. For each stream, split DET among the planes
272  * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
273  *    among those pipes.
274  * 4. Assign the DET override to the DML pipes.
275  *
276  * @dc: Current DC state
277  * @context: New DC state to be programmed
278  * @pipes: Array of DML pipes
279  *
280  * Return: void
281  */
dcn32_determine_det_override(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes)282 void dcn32_determine_det_override(struct dc *dc,
283 		struct dc_state *context,
284 		display_e2e_pipe_params_st *pipes)
285 {
286 	uint32_t i, j, k;
287 	uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
288 	uint8_t pipe_counted[MAX_PIPES] = {0};
289 	uint8_t pipe_cnt = 0;
290 	struct dc_plane_state *current_plane = NULL;
291 	uint8_t stream_count = 0;
292 
293 	for (i = 0; i < context->stream_count; i++) {
294 		/* Don't count SubVP streams for DET allocation */
295 		if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
296 			stream_count++;
297 	}
298 
299 	if (stream_count > 0) {
300 		stream_segments = 18 / stream_count;
301 		for (i = 0; i < context->stream_count; i++) {
302 			if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
303 				continue;
304 
305 			if (context->stream_status[i].plane_count > 0)
306 				plane_segments = stream_segments / context->stream_status[i].plane_count;
307 			else
308 				plane_segments = stream_segments;
309 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
310 				pipe_plane_count = 0;
311 				if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
312 						pipe_counted[j] != 1) {
313 					/* Note: pipe_plane_count indicates the number of pipes to be used for a
314 					 * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
315 					 * pipe_plane_count = 2 means 2:1 split, etc.
316 					 */
317 					pipe_plane_count++;
318 					pipe_counted[j] = 1;
319 					current_plane = context->res_ctx.pipe_ctx[j].plane_state;
320 					for (k = 0; k < dc->res_pool->pipe_count; k++) {
321 						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
322 								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
323 							pipe_plane_count++;
324 							pipe_counted[k] = 1;
325 						}
326 					}
327 
328 					pipe_segments[j] = plane_segments / pipe_plane_count;
329 					for (k = 0; k < dc->res_pool->pipe_count; k++) {
330 						if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
331 								context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
332 							pipe_segments[k] = plane_segments / pipe_plane_count;
333 						}
334 					}
335 				}
336 			}
337 		}
338 
339 		for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
340 			if (!context->res_ctx.pipe_ctx[i].stream)
341 				continue;
342 			pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
343 			pipe_cnt++;
344 		}
345 	} else {
346 		for (i = 0; i < dc->res_pool->pipe_count; i++)
347 			pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
348 	}
349 }
350 
dcn32_set_det_allocations(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes)351 void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
352 	display_e2e_pipe_params_st *pipes)
353 {
354 	int i, pipe_cnt;
355 	struct resource_context *res_ctx = &context->res_ctx;
356 	struct pipe_ctx *pipe;
357 	bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
358 
359 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
360 
361 		if (!res_ctx->pipe_ctx[i].stream)
362 			continue;
363 
364 		pipe = &res_ctx->pipe_ctx[i];
365 		pipe_cnt++;
366 	}
367 
368 	/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
369 	 * the DET available for each pipe). Use the DET override input to maintain our driver
370 	 * policy.
371 	 */
372 	if (pipe_cnt == 1) {
373 		pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
374 		if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
375 			if (!is_dual_plane(pipe->plane_state->format)) {
376 				pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
377 				pipes[0].pipe.src.unbounded_req_mode = true;
378 				if (pipe->plane_state->src_rect.width >= 5120 &&
379 					pipe->plane_state->src_rect.height >= 2880)
380 					pipes[0].pipe.src.det_size_override = 320; // 5K or higher
381 			}
382 		}
383 	} else
384 		dcn32_determine_det_override(dc, context, pipes);
385 }
386 
387 /**
388  * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
389  *
390  * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
391  * there are situations where a shallow copy of the dc->current_state is created for the
392  * validation. In this case we want to save and restore the mall config because we always
393  * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
394  * fast validation). If we don't restore the subvp config in cases of fast validation +
395  * shallow copy of the dc->current_state, the dc->current_state will have a partially
396  * removed subvp state when we did not intend to remove it.
397  *
398  * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
399  *       validation. We don't expect this to happen in fast_validation=1 cases.
400  *
401  * @dc: Current DC state
402  * @context: New DC state to be programmed
403  * @temp_config: struct used to cache the existing MALL state
404  *
405  * Return: void
406  */
dcn32_save_mall_state(struct dc * dc,struct dc_state * context,struct mall_temp_config * temp_config)407 void dcn32_save_mall_state(struct dc *dc,
408 		struct dc_state *context,
409 		struct mall_temp_config *temp_config)
410 {
411 	uint32_t i;
412 
413 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
414 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
415 
416 		if (pipe->stream)
417 			temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
418 
419 		if (pipe->plane_state)
420 			temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
421 	}
422 }
423 
424 /**
425  * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
426  *
427  * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
428  *
429  * @dc: Current DC state
430  * @context: New DC state to be programmed, restore MALL state into here
431  * @temp_config: struct that has the cached MALL state
432  *
433  * Return: void
434  */
dcn32_restore_mall_state(struct dc * dc,struct dc_state * context,struct mall_temp_config * temp_config)435 void dcn32_restore_mall_state(struct dc *dc,
436 		struct dc_state *context,
437 		struct mall_temp_config *temp_config)
438 {
439 	uint32_t i;
440 
441 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
442 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
443 
444 		if (pipe->stream)
445 			pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
446 
447 		if (pipe->plane_state)
448 			pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
449 	}
450 }
451 
452 #define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
453 /*
454  * Scaling factor for v_blank stretch calculations considering timing in
455  * micro-seconds and pixel clock in 100hz.
456  * Note: the parenthesis are necessary to ensure the correct order of
457  * operation where V_SCALE is used.
458  */
459 #define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
460 
get_frame_rate_at_max_stretch_100hz(struct dc_stream_state * fpo_candidate_stream,uint32_t fpo_vactive_margin_us)461 static int get_frame_rate_at_max_stretch_100hz(
462 		struct dc_stream_state *fpo_candidate_stream,
463 		uint32_t fpo_vactive_margin_us)
464 {
465 	struct dc_crtc_timing *timing = NULL;
466 	uint32_t sec_per_100_lines;
467 	uint32_t max_v_blank;
468 	uint32_t curr_v_blank;
469 	uint32_t v_stretch_max;
470 	uint32_t stretched_frame_pix_cnt;
471 	uint32_t scaled_stretched_frame_pix_cnt;
472 	uint32_t scaled_refresh_rate;
473 	uint32_t v_scale;
474 
475 	if (fpo_candidate_stream == NULL)
476 		return 0;
477 
478 	/* check if refresh rate at least 120hz */
479 	timing = &fpo_candidate_stream->timing;
480 	if (timing == NULL)
481 		return 0;
482 
483 	v_scale = 10000 / (MAX_STRETCHED_V_BLANK + fpo_vactive_margin_us);
484 
485 	sec_per_100_lines = timing->pix_clk_100hz / timing->h_total + 1;
486 	max_v_blank = sec_per_100_lines / v_scale + 1;
487 	curr_v_blank = timing->v_total - timing->v_addressable;
488 	v_stretch_max = (max_v_blank > curr_v_blank) ? (max_v_blank - curr_v_blank) : (0);
489 	stretched_frame_pix_cnt = (v_stretch_max + timing->v_total) * timing->h_total;
490 	scaled_stretched_frame_pix_cnt = stretched_frame_pix_cnt / 10000;
491 	scaled_refresh_rate = (timing->pix_clk_100hz) / scaled_stretched_frame_pix_cnt + 1;
492 
493 	return scaled_refresh_rate;
494 
495 }
496 
is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_stream_state * fpo_candidate_stream,uint32_t fpo_vactive_margin_us)497 static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(
498 		struct dc_stream_state *fpo_candidate_stream, uint32_t fpo_vactive_margin_us)
499 {
500 	int refresh_rate_max_stretch_100hz;
501 	int min_refresh_100hz;
502 
503 	if (fpo_candidate_stream == NULL)
504 		return false;
505 
506 	refresh_rate_max_stretch_100hz = get_frame_rate_at_max_stretch_100hz(fpo_candidate_stream, fpo_vactive_margin_us);
507 	min_refresh_100hz = fpo_candidate_stream->timing.min_refresh_in_uhz / 10000;
508 
509 	if (refresh_rate_max_stretch_100hz < min_refresh_100hz)
510 		return false;
511 
512 	return true;
513 }
514 
get_refresh_rate(struct dc_stream_state * fpo_candidate_stream)515 static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
516 {
517 	int refresh_rate = 0;
518 	int h_v_total = 0;
519 	struct dc_crtc_timing *timing = NULL;
520 
521 	if (fpo_candidate_stream == NULL)
522 		return 0;
523 
524 	/* check if refresh rate at least 120hz */
525 	timing = &fpo_candidate_stream->timing;
526 	if (timing == NULL)
527 		return 0;
528 
529 	h_v_total = timing->h_total * timing->v_total;
530 	if (h_v_total == 0)
531 		return 0;
532 
533 	refresh_rate = ((timing->pix_clk_100hz * 100) / (h_v_total)) + 1;
534 	return refresh_rate;
535 }
536 
537 /**
538  * dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch() - Determines if config can
539  *								    support FPO
540  *
541  * @dc: current dc state
542  * @context: new dc state
543  *
544  * Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
545  */
dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc * dc,const struct dc_state * context)546 struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
547 {
548 	int refresh_rate = 0;
549 	const int minimum_refreshrate_supported = 120;
550 	struct dc_stream_state *fpo_candidate_stream = NULL;
551 	bool is_fpo_vactive = false;
552 	uint32_t fpo_vactive_margin_us = 0;
553 
554 	if (context == NULL)
555 		return NULL;
556 
557 	if (dc->debug.disable_fams)
558 		return NULL;
559 
560 	if (!dc->caps.dmub_caps.mclk_sw)
561 		return NULL;
562 
563 	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down)
564 		return NULL;
565 
566 	/* For FPO we can support up to 2 display configs if:
567 	 * - first display uses FPO
568 	 * - Second display switches in VACTIVE */
569 	if (context->stream_count > 2)
570 		return NULL;
571 	else if (context->stream_count == 2) {
572 		DC_FP_START();
573 		dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
574 		DC_FP_END();
575 
576 		DC_FP_START();
577 		is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
578 		DC_FP_END();
579 		if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
580 			return NULL;
581 	} else
582 		fpo_candidate_stream = context->streams[0];
583 
584 	if (!fpo_candidate_stream)
585 		return NULL;
586 
587 	if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
588 		return NULL;
589 
590 	refresh_rate = get_refresh_rate(fpo_candidate_stream);
591 	if (refresh_rate < minimum_refreshrate_supported)
592 		return NULL;
593 
594 	fpo_vactive_margin_us = is_fpo_vactive ? dc->debug.fpo_vactive_margin_us : 0; // For now hardcode the FPO + Vactive stretch margin to be 2000us
595 	if (!is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(fpo_candidate_stream, fpo_vactive_margin_us))
596 		return NULL;
597 
598 	if (!fpo_candidate_stream->allow_freesync)
599 		return NULL;
600 
601 	if (fpo_candidate_stream->vrr_active_variable && dc->debug.disable_fams_gaming)
602 		return NULL;
603 
604 	return fpo_candidate_stream;
605 }
606 
dcn32_check_native_scaling_for_res(struct pipe_ctx * pipe,unsigned int width,unsigned int height)607 bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height)
608 {
609 	bool is_native_scaling = false;
610 
611 	if (pipe->stream->timing.h_addressable == width &&
612 			pipe->stream->timing.v_addressable == height &&
613 			pipe->plane_state->src_rect.width == width &&
614 			pipe->plane_state->src_rect.height == height &&
615 			pipe->plane_state->dst_rect.width == width &&
616 			pipe->plane_state->dst_rect.height == height)
617 		is_native_scaling = true;
618 
619 	return is_native_scaling;
620 }
621 
622 /**
623  * dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
624  *
625  * @dc: Current DC state
626  * @context: New DC state to be programmed
627  *
628  * SubVP + DRR is admissible under the following conditions:
629  * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
630  * - One display is SubVP
631  * - Other display must have Freesync enabled
632  * - The potential DRR display must not be PSR capable
633  *
634  * Return: True if admissible, false otherwise
635  */
dcn32_subvp_drr_admissable(struct dc * dc,struct dc_state * context)636 bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
637 {
638 	bool result = false;
639 	uint32_t i;
640 	uint8_t subvp_count = 0;
641 	uint8_t non_subvp_pipes = 0;
642 	bool drr_pipe_found = false;
643 	bool drr_psr_capable = false;
644 	uint64_t refresh_rate = 0;
645 
646 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
647 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
648 
649 		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
650 				resource_is_pipe_type(pipe, DPP_PIPE)) {
651 			if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
652 				subvp_count++;
653 
654 				refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
655 					pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
656 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
657 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
658 			}
659 			if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
660 				non_subvp_pipes++;
661 				drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
662 				if (pipe->stream->ignore_msa_timing_param &&
663 						(pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) {
664 					drr_pipe_found = true;
665 				}
666 			}
667 		}
668 	}
669 
670 	if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
671 		((uint32_t)refresh_rate < 120))
672 		result = true;
673 
674 	return result;
675 }
676 
677 /**
678  * dcn32_subvp_vblank_admissable() - Determine if SubVP + Vblank config is admissible
679  *
680  * @dc: Current DC state
681  * @context: New DC state to be programmed
682  * @vlevel: Voltage level calculated by DML
683  *
684  * SubVP + Vblank is admissible under the following conditions:
685  * - Config must have 2 displays (i.e., 2 non-phantom master pipes)
686  * - One display is SubVP
687  * - Other display must not have Freesync capability
688  * - DML must have output DRAM clock change support as SubVP + Vblank
689  * - The potential vblank display must not be PSR capable
690  *
691  * Return: True if admissible, false otherwise
692  */
dcn32_subvp_vblank_admissable(struct dc * dc,struct dc_state * context,int vlevel)693 bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel)
694 {
695 	bool result = false;
696 	uint32_t i;
697 	uint8_t subvp_count = 0;
698 	uint8_t non_subvp_pipes = 0;
699 	bool drr_pipe_found = false;
700 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
701 	bool vblank_psr_capable = false;
702 	uint64_t refresh_rate = 0;
703 
704 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
705 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
706 
707 		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
708 				resource_is_pipe_type(pipe, DPP_PIPE)) {
709 			if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
710 				subvp_count++;
711 
712 				refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
713 					pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
714 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
715 				refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
716 			}
717 			if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
718 				non_subvp_pipes++;
719 				vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
720 				if (pipe->stream->ignore_msa_timing_param &&
721 						(pipe->stream->allow_freesync || pipe->stream->vrr_active_variable)) {
722 					drr_pipe_found = true;
723 				}
724 			}
725 		}
726 	}
727 
728 	if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
729 		((uint32_t)refresh_rate < 120) &&
730 		vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
731 		result = true;
732 
733 	return result;
734 }
735