1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/slab.h>
27 
28 #include "dm_services.h"
29 #include "dc.h"
30 
31 #include "resource.h"
32 #include "include/irq_service_interface.h"
33 #include "dcn20/dcn20_resource.h"
34 
35 #include "dcn10/dcn10_hubp.h"
36 #include "dcn10/dcn10_ipp.h"
37 #include "dcn20_hubbub.h"
38 #include "dcn20_mpc.h"
39 #include "dcn20_hubp.h"
40 #include "irq/dcn20/irq_service_dcn20.h"
41 #include "dcn20_dpp.h"
42 #include "dcn20_optc.h"
43 #include "dcn20_hwseq.h"
44 #include "dce110/dce110_hw_sequencer.h"
45 #include "dcn10/dcn10_resource.h"
46 #include "dcn20_opp.h"
47 
48 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
49 #include "dcn20_dsc.h"
50 #endif
51 
52 #include "dcn20_link_encoder.h"
53 #include "dcn20_stream_encoder.h"
54 #include "dce/dce_clock_source.h"
55 #include "dce/dce_audio.h"
56 #include "dce/dce_hwseq.h"
57 #include "virtual/virtual_stream_encoder.h"
58 #include "dce110/dce110_resource.h"
59 #include "dml/display_mode_vba.h"
60 #include "dcn20_dccg.h"
61 #include "dcn20_vmid.h"
62 
63 #include "navi10_ip_offset.h"
64 
65 #include "dcn/dcn_2_0_0_offset.h"
66 #include "dcn/dcn_2_0_0_sh_mask.h"
67 
68 #include "nbio/nbio_2_3_offset.h"
69 
70 #include "dcn20/dcn20_dwb.h"
71 #include "dcn20/dcn20_mmhubbub.h"
72 
73 #include "mmhub/mmhub_2_0_0_offset.h"
74 #include "mmhub/mmhub_2_0_0_sh_mask.h"
75 
76 #include "reg_helper.h"
77 #include "dce/dce_abm.h"
78 #include "dce/dce_dmcu.h"
79 #include "dce/dce_aux.h"
80 #include "dce/dce_i2c.h"
81 #include "vm_helper.h"
82 
83 #include "amdgpu_socbb.h"
84 
85 /* NV12 SOC BB is currently in FW, mark SW bounding box invalid. */
86 #define SOC_BOUNDING_BOX_VALID false
87 #define DC_LOGGER_INIT(logger)
88 
89 struct _vcs_dpi_ip_params_st dcn2_0_ip = {
90 	.odm_capable = 1,
91 	.gpuvm_enable = 0,
92 	.hostvm_enable = 0,
93 	.gpuvm_max_page_table_levels = 4,
94 	.hostvm_max_page_table_levels = 4,
95 	.hostvm_cached_page_table_levels = 0,
96 	.pte_group_size_bytes = 2048,
97 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
98 	.num_dsc = 6,
99 #else
100 	.num_dsc = 0,
101 #endif
102 	.rob_buffer_size_kbytes = 168,
103 	.det_buffer_size_kbytes = 164,
104 	.dpte_buffer_size_in_pte_reqs_luma = 84,
105 	.pde_proc_buffer_size_64k_reqs = 48,
106 	.dpp_output_buffer_pixels = 2560,
107 	.opp_output_buffer_lines = 1,
108 	.pixel_chunk_size_kbytes = 8,
109 	.pte_chunk_size_kbytes = 2,
110 	.meta_chunk_size_kbytes = 2,
111 	.writeback_chunk_size_kbytes = 2,
112 	.line_buffer_size_bits = 789504,
113 	.is_line_buffer_bpp_fixed = 0,
114 	.line_buffer_fixed_bpp = 0,
115 	.dcc_supported = true,
116 	.max_line_buffer_lines = 12,
117 	.writeback_luma_buffer_size_kbytes = 12,
118 	.writeback_chroma_buffer_size_kbytes = 8,
119 	.writeback_chroma_line_buffer_width_pixels = 4,
120 	.writeback_max_hscl_ratio = 1,
121 	.writeback_max_vscl_ratio = 1,
122 	.writeback_min_hscl_ratio = 1,
123 	.writeback_min_vscl_ratio = 1,
124 	.writeback_max_hscl_taps = 12,
125 	.writeback_max_vscl_taps = 12,
126 	.writeback_line_buffer_luma_buffer_size = 0,
127 	.writeback_line_buffer_chroma_buffer_size = 14643,
128 	.cursor_buffer_size = 8,
129 	.cursor_chunk_size = 2,
130 	.max_num_otg = 6,
131 	.max_num_dpp = 6,
132 	.max_num_wb = 1,
133 	.max_dchub_pscl_bw_pix_per_clk = 4,
134 	.max_pscl_lb_bw_pix_per_clk = 2,
135 	.max_lb_vscl_bw_pix_per_clk = 4,
136 	.max_vscl_hscl_bw_pix_per_clk = 4,
137 	.max_hscl_ratio = 8,
138 	.max_vscl_ratio = 8,
139 	.hscl_mults = 4,
140 	.vscl_mults = 4,
141 	.max_hscl_taps = 8,
142 	.max_vscl_taps = 8,
143 	.dispclk_ramp_margin_percent = 1,
144 	.underscan_factor = 1.10,
145 	.min_vblank_lines = 32, //
146 	.dppclk_delay_subtotal = 77, //
147 	.dppclk_delay_scl_lb_only = 16,
148 	.dppclk_delay_scl = 50,
149 	.dppclk_delay_cnvc_formatter = 8,
150 	.dppclk_delay_cnvc_cursor = 6,
151 	.dispclk_delay_subtotal = 87, //
152 	.dcfclk_cstate_latency = 10, // SRExitTime
153 	.max_inter_dcn_tile_repeaters = 8,
154 
155 	.xfc_supported = true,
156 	.xfc_fill_bw_overhead_percent = 10.0,
157 	.xfc_fill_constant_bytes = 0,
158 };
159 
160 struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
161 	.odm_capable = 1,
162 	.gpuvm_enable = 0,
163 	.hostvm_enable = 0,
164 	.gpuvm_max_page_table_levels = 4,
165 	.hostvm_max_page_table_levels = 4,
166 	.hostvm_cached_page_table_levels = 0,
167 	.num_dsc = 5,
168 	.rob_buffer_size_kbytes = 168,
169 	.det_buffer_size_kbytes = 164,
170 	.dpte_buffer_size_in_pte_reqs_luma = 84,
171 	.dpte_buffer_size_in_pte_reqs_chroma = 42,//todo
172 	.dpp_output_buffer_pixels = 2560,
173 	.opp_output_buffer_lines = 1,
174 	.pixel_chunk_size_kbytes = 8,
175 	.pte_enable = 1,
176 	.max_page_table_levels = 4,
177 	.pte_chunk_size_kbytes = 2,
178 	.meta_chunk_size_kbytes = 2,
179 	.writeback_chunk_size_kbytes = 2,
180 	.line_buffer_size_bits = 789504,
181 	.is_line_buffer_bpp_fixed = 0,
182 	.line_buffer_fixed_bpp = 0,
183 	.dcc_supported = true,
184 	.max_line_buffer_lines = 12,
185 	.writeback_luma_buffer_size_kbytes = 12,
186 	.writeback_chroma_buffer_size_kbytes = 8,
187 	.writeback_chroma_line_buffer_width_pixels = 4,
188 	.writeback_max_hscl_ratio = 1,
189 	.writeback_max_vscl_ratio = 1,
190 	.writeback_min_hscl_ratio = 1,
191 	.writeback_min_vscl_ratio = 1,
192 	.writeback_max_hscl_taps = 12,
193 	.writeback_max_vscl_taps = 12,
194 	.writeback_line_buffer_luma_buffer_size = 0,
195 	.writeback_line_buffer_chroma_buffer_size = 14643,
196 	.cursor_buffer_size = 8,
197 	.cursor_chunk_size = 2,
198 	.max_num_otg = 5,
199 	.max_num_dpp = 5,
200 	.max_num_wb = 1,
201 	.max_dchub_pscl_bw_pix_per_clk = 4,
202 	.max_pscl_lb_bw_pix_per_clk = 2,
203 	.max_lb_vscl_bw_pix_per_clk = 4,
204 	.max_vscl_hscl_bw_pix_per_clk = 4,
205 	.max_hscl_ratio = 8,
206 	.max_vscl_ratio = 8,
207 	.hscl_mults = 4,
208 	.vscl_mults = 4,
209 	.max_hscl_taps = 8,
210 	.max_vscl_taps = 8,
211 	.dispclk_ramp_margin_percent = 1,
212 	.underscan_factor = 1.10,
213 	.min_vblank_lines = 32, //
214 	.dppclk_delay_subtotal = 77, //
215 	.dppclk_delay_scl_lb_only = 16,
216 	.dppclk_delay_scl = 50,
217 	.dppclk_delay_cnvc_formatter = 8,
218 	.dppclk_delay_cnvc_cursor = 6,
219 	.dispclk_delay_subtotal = 87, //
220 	.dcfclk_cstate_latency = 10, // SRExitTime
221 	.max_inter_dcn_tile_repeaters = 8,
222 	.xfc_supported = true,
223 	.xfc_fill_bw_overhead_percent = 10.0,
224 	.xfc_fill_constant_bytes = 0,
225 	.ptoi_supported = 0
226 };
227 
228 struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
229 	/* Defaults that get patched on driver load from firmware. */
230 	.clock_limits = {
231 			{
232 				.state = 0,
233 				.dcfclk_mhz = 560.0,
234 				.fabricclk_mhz = 560.0,
235 				.dispclk_mhz = 513.0,
236 				.dppclk_mhz = 513.0,
237 				.phyclk_mhz = 540.0,
238 				.socclk_mhz = 560.0,
239 				.dscclk_mhz = 171.0,
240 				.dram_speed_mts = 8960.0,
241 			},
242 			{
243 				.state = 1,
244 				.dcfclk_mhz = 694.0,
245 				.fabricclk_mhz = 694.0,
246 				.dispclk_mhz = 642.0,
247 				.dppclk_mhz = 642.0,
248 				.phyclk_mhz = 600.0,
249 				.socclk_mhz = 694.0,
250 				.dscclk_mhz = 214.0,
251 				.dram_speed_mts = 11104.0,
252 			},
253 			{
254 				.state = 2,
255 				.dcfclk_mhz = 875.0,
256 				.fabricclk_mhz = 875.0,
257 				.dispclk_mhz = 734.0,
258 				.dppclk_mhz = 734.0,
259 				.phyclk_mhz = 810.0,
260 				.socclk_mhz = 875.0,
261 				.dscclk_mhz = 245.0,
262 				.dram_speed_mts = 14000.0,
263 			},
264 			{
265 				.state = 3,
266 				.dcfclk_mhz = 1000.0,
267 				.fabricclk_mhz = 1000.0,
268 				.dispclk_mhz = 1100.0,
269 				.dppclk_mhz = 1100.0,
270 				.phyclk_mhz = 810.0,
271 				.socclk_mhz = 1000.0,
272 				.dscclk_mhz = 367.0,
273 				.dram_speed_mts = 16000.0,
274 			},
275 			{
276 				.state = 4,
277 				.dcfclk_mhz = 1200.0,
278 				.fabricclk_mhz = 1200.0,
279 				.dispclk_mhz = 1284.0,
280 				.dppclk_mhz = 1284.0,
281 				.phyclk_mhz = 810.0,
282 				.socclk_mhz = 1200.0,
283 				.dscclk_mhz = 428.0,
284 				.dram_speed_mts = 16000.0,
285 			},
286 			/*Extra state, no dispclk ramping*/
287 			{
288 				.state = 5,
289 				.dcfclk_mhz = 1200.0,
290 				.fabricclk_mhz = 1200.0,
291 				.dispclk_mhz = 1284.0,
292 				.dppclk_mhz = 1284.0,
293 				.phyclk_mhz = 810.0,
294 				.socclk_mhz = 1200.0,
295 				.dscclk_mhz = 428.0,
296 				.dram_speed_mts = 16000.0,
297 			},
298 		},
299 	.num_states = 5,
300 	.sr_exit_time_us = 8.6,
301 	.sr_enter_plus_exit_time_us = 10.9,
302 	.urgent_latency_us = 4.0,
303 	.urgent_latency_pixel_data_only_us = 4.0,
304 	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
305 	.urgent_latency_vm_data_only_us = 4.0,
306 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
307 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
308 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
309 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
310 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
311 	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
312 	.max_avg_sdp_bw_use_normal_percent = 40.0,
313 	.max_avg_dram_bw_use_normal_percent = 40.0,
314 	.writeback_latency_us = 12.0,
315 	.ideal_dram_bw_after_urgent_percent = 40.0,
316 	.max_request_size_bytes = 256,
317 	.dram_channel_width_bytes = 2,
318 	.fabric_datapath_to_dcn_data_return_bytes = 64,
319 	.dcn_downspread_percent = 0.5,
320 	.downspread_percent = 0.38,
321 	.dram_page_open_time_ns = 50.0,
322 	.dram_rw_turnaround_time_ns = 17.5,
323 	.dram_return_buffer_per_channel_bytes = 8192,
324 	.round_trip_ping_latency_dcfclk_cycles = 131,
325 	.urgent_out_of_order_return_per_channel_bytes = 256,
326 	.channel_interleave_bytes = 256,
327 	.num_banks = 8,
328 	.num_chans = 16,
329 	.vmm_page_size_bytes = 4096,
330 	.dram_clock_change_latency_us = 404.0,
331 	.dummy_pstate_latency_us = 5.0,
332 	.writeback_dram_clock_change_latency_us = 23.0,
333 	.return_bus_width_bytes = 64,
334 	.dispclk_dppclk_vco_speed_mhz = 3850,
335 	.xfc_bus_transport_time_us = 20,
336 	.xfc_xbuf_latency_tolerance_us = 4,
337 	.use_urgent_burst_bw = 0
338 };
339 
340 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
341 
342 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
343 	#define mmDP0_DP_DPHY_INTERNAL_CTRL		0x210f
344 	#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
345 	#define mmDP1_DP_DPHY_INTERNAL_CTRL		0x220f
346 	#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
347 	#define mmDP2_DP_DPHY_INTERNAL_CTRL		0x230f
348 	#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
349 	#define mmDP3_DP_DPHY_INTERNAL_CTRL		0x240f
350 	#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
351 	#define mmDP4_DP_DPHY_INTERNAL_CTRL		0x250f
352 	#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
353 	#define mmDP5_DP_DPHY_INTERNAL_CTRL		0x260f
354 	#define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
355 	#define mmDP6_DP_DPHY_INTERNAL_CTRL		0x270f
356 	#define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
357 #endif
358 
359 
360 enum dcn20_clk_src_array_id {
361 	DCN20_CLK_SRC_PLL0,
362 	DCN20_CLK_SRC_PLL1,
363 	DCN20_CLK_SRC_PLL2,
364 	DCN20_CLK_SRC_PLL3,
365 	DCN20_CLK_SRC_PLL4,
366 	DCN20_CLK_SRC_PLL5,
367 	DCN20_CLK_SRC_TOTAL
368 };
369 
370 /* begin *********************
371  * macros to expend register list macro defined in HW object header file */
372 
373 /* DCN */
374 /* TODO awful hack. fixup dcn20_dwb.h */
375 #undef BASE_INNER
376 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
377 
378 #define BASE(seg) BASE_INNER(seg)
379 
380 #define SR(reg_name)\
381 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
382 					mm ## reg_name
383 
384 #define SRI(reg_name, block, id)\
385 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
386 					mm ## block ## id ## _ ## reg_name
387 
388 #define SRIR(var_name, reg_name, block, id)\
389 	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
390 					mm ## block ## id ## _ ## reg_name
391 
392 #define SRII(reg_name, block, id)\
393 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
394 					mm ## block ## id ## _ ## reg_name
395 
396 #define DCCG_SRII(reg_name, block, id)\
397 	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
398 					mm ## block ## id ## _ ## reg_name
399 
400 /* NBIO */
401 #define NBIO_BASE_INNER(seg) \
402 	NBIO_BASE__INST0_SEG ## seg
403 
404 #define NBIO_BASE(seg) \
405 	NBIO_BASE_INNER(seg)
406 
407 #define NBIO_SR(reg_name)\
408 		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
409 					mm ## reg_name
410 
411 /* MMHUB */
412 #define MMHUB_BASE_INNER(seg) \
413 	MMHUB_BASE__INST0_SEG ## seg
414 
415 #define MMHUB_BASE(seg) \
416 	MMHUB_BASE_INNER(seg)
417 
418 #define MMHUB_SR(reg_name)\
419 		.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
420 					mmMM ## reg_name
421 
422 static const struct bios_registers bios_regs = {
423 		NBIO_SR(BIOS_SCRATCH_3),
424 		NBIO_SR(BIOS_SCRATCH_6)
425 };
426 
427 #define clk_src_regs(index, pllid)\
428 [index] = {\
429 	CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
430 }
431 
432 static const struct dce110_clk_src_regs clk_src_regs[] = {
433 	clk_src_regs(0, A),
434 	clk_src_regs(1, B),
435 	clk_src_regs(2, C),
436 	clk_src_regs(3, D),
437 	clk_src_regs(4, E),
438 	clk_src_regs(5, F)
439 };
440 
441 static const struct dce110_clk_src_shift cs_shift = {
442 		CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
443 };
444 
445 static const struct dce110_clk_src_mask cs_mask = {
446 		CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
447 };
448 
449 static const struct dce_dmcu_registers dmcu_regs = {
450 		DMCU_DCN10_REG_LIST()
451 };
452 
453 static const struct dce_dmcu_shift dmcu_shift = {
454 		DMCU_MASK_SH_LIST_DCN10(__SHIFT)
455 };
456 
457 static const struct dce_dmcu_mask dmcu_mask = {
458 		DMCU_MASK_SH_LIST_DCN10(_MASK)
459 };
460 
461 static const struct dce_abm_registers abm_regs = {
462 		ABM_DCN20_REG_LIST()
463 };
464 
465 static const struct dce_abm_shift abm_shift = {
466 		ABM_MASK_SH_LIST_DCN20(__SHIFT)
467 };
468 
469 static const struct dce_abm_mask abm_mask = {
470 		ABM_MASK_SH_LIST_DCN20(_MASK)
471 };
472 
473 #define audio_regs(id)\
474 [id] = {\
475 		AUD_COMMON_REG_LIST(id)\
476 }
477 
478 static const struct dce_audio_registers audio_regs[] = {
479 	audio_regs(0),
480 	audio_regs(1),
481 	audio_regs(2),
482 	audio_regs(3),
483 	audio_regs(4),
484 	audio_regs(5),
485 	audio_regs(6),
486 };
487 
488 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
489 		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
490 		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
491 		AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
492 
493 static const struct dce_audio_shift audio_shift = {
494 		DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
495 };
496 
497 static const struct dce_audio_mask audio_mask = {
498 		DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
499 };
500 
501 #define stream_enc_regs(id)\
502 [id] = {\
503 	SE_DCN2_REG_LIST(id)\
504 }
505 
506 static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
507 	stream_enc_regs(0),
508 	stream_enc_regs(1),
509 	stream_enc_regs(2),
510 	stream_enc_regs(3),
511 	stream_enc_regs(4),
512 	stream_enc_regs(5),
513 };
514 
515 static const struct dcn10_stream_encoder_shift se_shift = {
516 		SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
517 };
518 
519 static const struct dcn10_stream_encoder_mask se_mask = {
520 		SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
521 };
522 
523 
524 #define aux_regs(id)\
525 [id] = {\
526 	DCN2_AUX_REG_LIST(id)\
527 }
528 
529 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
530 		aux_regs(0),
531 		aux_regs(1),
532 		aux_regs(2),
533 		aux_regs(3),
534 		aux_regs(4),
535 		aux_regs(5)
536 };
537 
538 #define hpd_regs(id)\
539 [id] = {\
540 	HPD_REG_LIST(id)\
541 }
542 
543 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
544 		hpd_regs(0),
545 		hpd_regs(1),
546 		hpd_regs(2),
547 		hpd_regs(3),
548 		hpd_regs(4),
549 		hpd_regs(5)
550 };
551 
552 #define link_regs(id, phyid)\
553 [id] = {\
554 	LE_DCN10_REG_LIST(id), \
555 	UNIPHY_DCN2_REG_LIST(phyid), \
556 	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
557 }
558 
559 static const struct dcn10_link_enc_registers link_enc_regs[] = {
560 	link_regs(0, A),
561 	link_regs(1, B),
562 	link_regs(2, C),
563 	link_regs(3, D),
564 	link_regs(4, E),
565 	link_regs(5, F)
566 };
567 
568 static const struct dcn10_link_enc_shift le_shift = {
569 	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
570 };
571 
572 static const struct dcn10_link_enc_mask le_mask = {
573 	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
574 };
575 
576 #define ipp_regs(id)\
577 [id] = {\
578 	IPP_REG_LIST_DCN20(id),\
579 }
580 
581 static const struct dcn10_ipp_registers ipp_regs[] = {
582 	ipp_regs(0),
583 	ipp_regs(1),
584 	ipp_regs(2),
585 	ipp_regs(3),
586 	ipp_regs(4),
587 	ipp_regs(5),
588 };
589 
590 static const struct dcn10_ipp_shift ipp_shift = {
591 		IPP_MASK_SH_LIST_DCN20(__SHIFT)
592 };
593 
594 static const struct dcn10_ipp_mask ipp_mask = {
595 		IPP_MASK_SH_LIST_DCN20(_MASK),
596 };
597 
598 #define opp_regs(id)\
599 [id] = {\
600 	OPP_REG_LIST_DCN20(id),\
601 }
602 
603 static const struct dcn20_opp_registers opp_regs[] = {
604 	opp_regs(0),
605 	opp_regs(1),
606 	opp_regs(2),
607 	opp_regs(3),
608 	opp_regs(4),
609 	opp_regs(5),
610 };
611 
612 static const struct dcn20_opp_shift opp_shift = {
613 		OPP_MASK_SH_LIST_DCN20(__SHIFT)
614 };
615 
616 static const struct dcn20_opp_mask opp_mask = {
617 		OPP_MASK_SH_LIST_DCN20(_MASK)
618 };
619 
620 #define aux_engine_regs(id)\
621 [id] = {\
622 	AUX_COMMON_REG_LIST0(id), \
623 	.AUXN_IMPCAL = 0, \
624 	.AUXP_IMPCAL = 0, \
625 	.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
626 }
627 
628 static const struct dce110_aux_registers aux_engine_regs[] = {
629 		aux_engine_regs(0),
630 		aux_engine_regs(1),
631 		aux_engine_regs(2),
632 		aux_engine_regs(3),
633 		aux_engine_regs(4),
634 		aux_engine_regs(5)
635 };
636 
637 #define tf_regs(id)\
638 [id] = {\
639 	TF_REG_LIST_DCN20(id),\
640 }
641 
642 static const struct dcn2_dpp_registers tf_regs[] = {
643 	tf_regs(0),
644 	tf_regs(1),
645 	tf_regs(2),
646 	tf_regs(3),
647 	tf_regs(4),
648 	tf_regs(5),
649 };
650 
651 static const struct dcn2_dpp_shift tf_shift = {
652 		TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
653 		TF_DEBUG_REG_LIST_SH_DCN10
654 };
655 
656 static const struct dcn2_dpp_mask tf_mask = {
657 		TF_REG_LIST_SH_MASK_DCN20(_MASK),
658 		TF_DEBUG_REG_LIST_MASK_DCN10
659 };
660 
661 #define dwbc_regs_dcn2(id)\
662 [id] = {\
663 	DWBC_COMMON_REG_LIST_DCN2_0(id),\
664 		}
665 
666 static const struct dcn20_dwbc_registers dwbc20_regs[] = {
667 	dwbc_regs_dcn2(0),
668 };
669 
670 static const struct dcn20_dwbc_shift dwbc20_shift = {
671 	DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
672 };
673 
674 static const struct dcn20_dwbc_mask dwbc20_mask = {
675 	DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
676 };
677 
678 #define mcif_wb_regs_dcn2(id)\
679 [id] = {\
680 	MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
681 		}
682 
683 static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
684 	mcif_wb_regs_dcn2(0),
685 };
686 
687 static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
688 	MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
689 };
690 
691 static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
692 	MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
693 };
694 
695 static const struct dcn20_mpc_registers mpc_regs = {
696 		MPC_REG_LIST_DCN2_0(0),
697 		MPC_REG_LIST_DCN2_0(1),
698 		MPC_REG_LIST_DCN2_0(2),
699 		MPC_REG_LIST_DCN2_0(3),
700 		MPC_REG_LIST_DCN2_0(4),
701 		MPC_REG_LIST_DCN2_0(5),
702 		MPC_OUT_MUX_REG_LIST_DCN2_0(0),
703 		MPC_OUT_MUX_REG_LIST_DCN2_0(1),
704 		MPC_OUT_MUX_REG_LIST_DCN2_0(2),
705 		MPC_OUT_MUX_REG_LIST_DCN2_0(3),
706 		MPC_OUT_MUX_REG_LIST_DCN2_0(4),
707 		MPC_OUT_MUX_REG_LIST_DCN2_0(5),
708 };
709 
710 static const struct dcn20_mpc_shift mpc_shift = {
711 	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
712 };
713 
714 static const struct dcn20_mpc_mask mpc_mask = {
715 	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
716 };
717 
718 #define tg_regs(id)\
719 [id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
720 
721 
722 static const struct dcn_optc_registers tg_regs[] = {
723 	tg_regs(0),
724 	tg_regs(1),
725 	tg_regs(2),
726 	tg_regs(3),
727 	tg_regs(4),
728 	tg_regs(5)
729 };
730 
731 static const struct dcn_optc_shift tg_shift = {
732 	TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
733 };
734 
735 static const struct dcn_optc_mask tg_mask = {
736 	TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
737 };
738 
739 #define hubp_regs(id)\
740 [id] = {\
741 	HUBP_REG_LIST_DCN20(id)\
742 }
743 
744 static const struct dcn_hubp2_registers hubp_regs[] = {
745 		hubp_regs(0),
746 		hubp_regs(1),
747 		hubp_regs(2),
748 		hubp_regs(3),
749 		hubp_regs(4),
750 		hubp_regs(5)
751 };
752 
753 static const struct dcn_hubp2_shift hubp_shift = {
754 		HUBP_MASK_SH_LIST_DCN20(__SHIFT)
755 };
756 
757 static const struct dcn_hubp2_mask hubp_mask = {
758 		HUBP_MASK_SH_LIST_DCN20(_MASK)
759 };
760 
761 static const struct dcn_hubbub_registers hubbub_reg = {
762 		HUBBUB_REG_LIST_DCN20(0)
763 };
764 
765 static const struct dcn_hubbub_shift hubbub_shift = {
766 		HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
767 };
768 
769 static const struct dcn_hubbub_mask hubbub_mask = {
770 		HUBBUB_MASK_SH_LIST_DCN20(_MASK)
771 };
772 
773 #define vmid_regs(id)\
774 [id] = {\
775 		DCN20_VMID_REG_LIST(id)\
776 }
777 
778 static const struct dcn_vmid_registers vmid_regs[] = {
779 	vmid_regs(0),
780 	vmid_regs(1),
781 	vmid_regs(2),
782 	vmid_regs(3),
783 	vmid_regs(4),
784 	vmid_regs(5),
785 	vmid_regs(6),
786 	vmid_regs(7),
787 	vmid_regs(8),
788 	vmid_regs(9),
789 	vmid_regs(10),
790 	vmid_regs(11),
791 	vmid_regs(12),
792 	vmid_regs(13),
793 	vmid_regs(14),
794 	vmid_regs(15)
795 };
796 
797 static const struct dcn20_vmid_shift vmid_shifts = {
798 		DCN20_VMID_MASK_SH_LIST(__SHIFT)
799 };
800 
801 static const struct dcn20_vmid_mask vmid_masks = {
802 		DCN20_VMID_MASK_SH_LIST(_MASK)
803 };
804 
805 static const struct dce110_aux_registers_shift aux_shift = {
806 		DCN_AUX_MASK_SH_LIST(__SHIFT)
807 };
808 
809 static const struct dce110_aux_registers_mask aux_mask = {
810 		DCN_AUX_MASK_SH_LIST(_MASK)
811 };
812 
813 static int map_transmitter_id_to_phy_instance(
814 	enum transmitter transmitter)
815 {
816 	switch (transmitter) {
817 	case TRANSMITTER_UNIPHY_A:
818 		return 0;
819 	break;
820 	case TRANSMITTER_UNIPHY_B:
821 		return 1;
822 	break;
823 	case TRANSMITTER_UNIPHY_C:
824 		return 2;
825 	break;
826 	case TRANSMITTER_UNIPHY_D:
827 		return 3;
828 	break;
829 	case TRANSMITTER_UNIPHY_E:
830 		return 4;
831 	break;
832 	case TRANSMITTER_UNIPHY_F:
833 		return 5;
834 	break;
835 	default:
836 		ASSERT(0);
837 		return 0;
838 	}
839 }
840 
841 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
842 #define dsc_regsDCN20(id)\
843 [id] = {\
844 	DSC_REG_LIST_DCN20(id)\
845 }
846 
847 static const struct dcn20_dsc_registers dsc_regs[] = {
848 	dsc_regsDCN20(0),
849 	dsc_regsDCN20(1),
850 	dsc_regsDCN20(2),
851 	dsc_regsDCN20(3),
852 	dsc_regsDCN20(4),
853 	dsc_regsDCN20(5)
854 };
855 
856 static const struct dcn20_dsc_shift dsc_shift = {
857 	DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
858 };
859 
860 static const struct dcn20_dsc_mask dsc_mask = {
861 	DSC_REG_LIST_SH_MASK_DCN20(_MASK)
862 };
863 #endif
864 
865 static const struct dccg_registers dccg_regs = {
866 		DCCG_REG_LIST_DCN2()
867 };
868 
869 static const struct dccg_shift dccg_shift = {
870 		DCCG_MASK_SH_LIST_DCN2(__SHIFT)
871 };
872 
873 static const struct dccg_mask dccg_mask = {
874 		DCCG_MASK_SH_LIST_DCN2(_MASK)
875 };
876 
877 static const struct resource_caps res_cap_nv10 = {
878 		.num_timing_generator = 6,
879 		.num_opp = 6,
880 		.num_video_plane = 6,
881 		.num_audio = 7,
882 		.num_stream_encoder = 6,
883 		.num_pll = 6,
884 		.num_dwb = 1,
885 		.num_ddc = 6,
886 		.num_vmid = 16,
887 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
888 		.num_dsc = 6,
889 #endif
890 };
891 
892 static const struct dc_plane_cap plane_cap = {
893 	.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
894 	.blends_with_above = true,
895 	.blends_with_below = true,
896 	.per_pixel_alpha = true,
897 
898 	.pixel_format_support = {
899 			.argb8888 = true,
900 			.nv12 = true,
901 			.fp16 = true
902 	},
903 
904 	.max_upscale_factor = {
905 			.argb8888 = 16000,
906 			.nv12 = 16000,
907 			.fp16 = 1
908 	},
909 
910 	.max_downscale_factor = {
911 			.argb8888 = 250,
912 			.nv12 = 250,
913 			.fp16 = 1
914 	}
915 };
916 static const struct resource_caps res_cap_nv14 = {
917 		.num_timing_generator = 5,
918 		.num_opp = 5,
919 		.num_video_plane = 5,
920 		.num_audio = 6,
921 		.num_stream_encoder = 5,
922 		.num_pll = 5,
923 		.num_dwb = 1,
924 		.num_ddc = 5,
925 		.num_vmid = 16,
926 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
927 		.num_dsc = 5,
928 #endif
929 };
930 
931 static const struct dc_debug_options debug_defaults_drv = {
932 		.disable_dmcu = true,
933 		.force_abm_enable = false,
934 		.timing_trace = false,
935 		.clock_trace = true,
936 		.disable_pplib_clock_request = true,
937 		.pipe_split_policy = MPC_SPLIT_DYNAMIC,
938 		.force_single_disp_pipe_split = false,
939 		.disable_dcc = DCC_ENABLE,
940 		.vsr_support = true,
941 		.performance_trace = false,
942 		.max_downscale_src_width = 5120,/*upto 5K*/
943 		.disable_pplib_wm_range = false,
944 		.scl_reset_length10 = true,
945 		.sanity_checks = false,
946 		.disable_tri_buf = true,
947 		.underflow_assert_delay_us = 0xFFFFFFFF,
948 };
949 
950 static const struct dc_debug_options debug_defaults_diags = {
951 		.disable_dmcu = true,
952 		.force_abm_enable = false,
953 		.timing_trace = true,
954 		.clock_trace = true,
955 		.disable_dpp_power_gate = true,
956 		.disable_hubp_power_gate = true,
957 		.disable_clock_gate = true,
958 		.disable_pplib_clock_request = true,
959 		.disable_pplib_wm_range = true,
960 		.disable_stutter = true,
961 		.scl_reset_length10 = true,
962 		.underflow_assert_delay_us = 0xFFFFFFFF,
963 };
964 
965 void dcn20_dpp_destroy(struct dpp **dpp)
966 {
967 	kfree(TO_DCN20_DPP(*dpp));
968 	*dpp = NULL;
969 }
970 
971 struct dpp *dcn20_dpp_create(
972 	struct dc_context *ctx,
973 	uint32_t inst)
974 {
975 	struct dcn20_dpp *dpp =
976 		kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
977 
978 	if (!dpp)
979 		return NULL;
980 
981 	if (dpp2_construct(dpp, ctx, inst,
982 			&tf_regs[inst], &tf_shift, &tf_mask))
983 		return &dpp->base;
984 
985 	BREAK_TO_DEBUGGER();
986 	kfree(dpp);
987 	return NULL;
988 }
989 
990 struct input_pixel_processor *dcn20_ipp_create(
991 	struct dc_context *ctx, uint32_t inst)
992 {
993 	struct dcn10_ipp *ipp =
994 		kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
995 
996 	if (!ipp) {
997 		BREAK_TO_DEBUGGER();
998 		return NULL;
999 	}
1000 
1001 	dcn20_ipp_construct(ipp, ctx, inst,
1002 			&ipp_regs[inst], &ipp_shift, &ipp_mask);
1003 	return &ipp->base;
1004 }
1005 
1006 
1007 struct output_pixel_processor *dcn20_opp_create(
1008 	struct dc_context *ctx, uint32_t inst)
1009 {
1010 	struct dcn20_opp *opp =
1011 		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
1012 
1013 	if (!opp) {
1014 		BREAK_TO_DEBUGGER();
1015 		return NULL;
1016 	}
1017 
1018 	dcn20_opp_construct(opp, ctx, inst,
1019 			&opp_regs[inst], &opp_shift, &opp_mask);
1020 	return &opp->base;
1021 }
1022 
1023 struct dce_aux *dcn20_aux_engine_create(
1024 	struct dc_context *ctx,
1025 	uint32_t inst)
1026 {
1027 	struct aux_engine_dce110 *aux_engine =
1028 		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
1029 
1030 	if (!aux_engine)
1031 		return NULL;
1032 
1033 	dce110_aux_engine_construct(aux_engine, ctx, inst,
1034 				    SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
1035 				    &aux_engine_regs[inst],
1036 					&aux_mask,
1037 					&aux_shift,
1038 					ctx->dc->caps.extended_aux_timeout_support);
1039 
1040 	return &aux_engine->base;
1041 }
1042 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
1043 
1044 static const struct dce_i2c_registers i2c_hw_regs[] = {
1045 		i2c_inst_regs(1),
1046 		i2c_inst_regs(2),
1047 		i2c_inst_regs(3),
1048 		i2c_inst_regs(4),
1049 		i2c_inst_regs(5),
1050 		i2c_inst_regs(6),
1051 };
1052 
1053 static const struct dce_i2c_shift i2c_shifts = {
1054 		I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
1055 };
1056 
1057 static const struct dce_i2c_mask i2c_masks = {
1058 		I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
1059 };
1060 
1061 struct dce_i2c_hw *dcn20_i2c_hw_create(
1062 	struct dc_context *ctx,
1063 	uint32_t inst)
1064 {
1065 	struct dce_i2c_hw *dce_i2c_hw =
1066 		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
1067 
1068 	if (!dce_i2c_hw)
1069 		return NULL;
1070 
1071 	dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
1072 				    &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
1073 
1074 	return dce_i2c_hw;
1075 }
1076 struct mpc *dcn20_mpc_create(struct dc_context *ctx)
1077 {
1078 	struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
1079 					  GFP_KERNEL);
1080 
1081 	if (!mpc20)
1082 		return NULL;
1083 
1084 	dcn20_mpc_construct(mpc20, ctx,
1085 			&mpc_regs,
1086 			&mpc_shift,
1087 			&mpc_mask,
1088 			6);
1089 
1090 	return &mpc20->base;
1091 }
1092 
1093 struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
1094 {
1095 	int i;
1096 	struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
1097 					  GFP_KERNEL);
1098 
1099 	if (!hubbub)
1100 		return NULL;
1101 
1102 	hubbub2_construct(hubbub, ctx,
1103 			&hubbub_reg,
1104 			&hubbub_shift,
1105 			&hubbub_mask);
1106 
1107 	for (i = 0; i < res_cap_nv10.num_vmid; i++) {
1108 		struct dcn20_vmid *vmid = &hubbub->vmid[i];
1109 
1110 		vmid->ctx = ctx;
1111 
1112 		vmid->regs = &vmid_regs[i];
1113 		vmid->shifts = &vmid_shifts;
1114 		vmid->masks = &vmid_masks;
1115 	}
1116 
1117 	return &hubbub->base;
1118 }
1119 
1120 struct timing_generator *dcn20_timing_generator_create(
1121 		struct dc_context *ctx,
1122 		uint32_t instance)
1123 {
1124 	struct optc *tgn10 =
1125 		kzalloc(sizeof(struct optc), GFP_KERNEL);
1126 
1127 	if (!tgn10)
1128 		return NULL;
1129 
1130 	tgn10->base.inst = instance;
1131 	tgn10->base.ctx = ctx;
1132 
1133 	tgn10->tg_regs = &tg_regs[instance];
1134 	tgn10->tg_shift = &tg_shift;
1135 	tgn10->tg_mask = &tg_mask;
1136 
1137 	dcn20_timing_generator_init(tgn10);
1138 
1139 	return &tgn10->base;
1140 }
1141 
1142 static const struct encoder_feature_support link_enc_feature = {
1143 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
1144 		.max_hdmi_pixel_clock = 600000,
1145 		.hdmi_ycbcr420_supported = true,
1146 		.dp_ycbcr420_supported = true,
1147 		.flags.bits.IS_HBR2_CAPABLE = true,
1148 		.flags.bits.IS_HBR3_CAPABLE = true,
1149 		.flags.bits.IS_TPS3_CAPABLE = true,
1150 		.flags.bits.IS_TPS4_CAPABLE = true
1151 };
1152 
1153 struct link_encoder *dcn20_link_encoder_create(
1154 	const struct encoder_init_data *enc_init_data)
1155 {
1156 	struct dcn20_link_encoder *enc20 =
1157 		kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
1158 	int link_regs_id;
1159 
1160 	if (!enc20)
1161 		return NULL;
1162 
1163 	link_regs_id =
1164 		map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
1165 
1166 	dcn20_link_encoder_construct(enc20,
1167 				      enc_init_data,
1168 				      &link_enc_feature,
1169 				      &link_enc_regs[link_regs_id],
1170 				      &link_enc_aux_regs[enc_init_data->channel - 1],
1171 				      &link_enc_hpd_regs[enc_init_data->hpd_source],
1172 				      &le_shift,
1173 				      &le_mask);
1174 
1175 	return &enc20->enc10.base;
1176 }
1177 
1178 struct clock_source *dcn20_clock_source_create(
1179 	struct dc_context *ctx,
1180 	struct dc_bios *bios,
1181 	enum clock_source_id id,
1182 	const struct dce110_clk_src_regs *regs,
1183 	bool dp_clk_src)
1184 {
1185 	struct dce110_clk_src *clk_src =
1186 		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
1187 
1188 	if (!clk_src)
1189 		return NULL;
1190 
1191 	if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
1192 			regs, &cs_shift, &cs_mask)) {
1193 		clk_src->base.dp_clk_src = dp_clk_src;
1194 		return &clk_src->base;
1195 	}
1196 
1197 	kfree(clk_src);
1198 	BREAK_TO_DEBUGGER();
1199 	return NULL;
1200 }
1201 
1202 static void read_dce_straps(
1203 	struct dc_context *ctx,
1204 	struct resource_straps *straps)
1205 {
1206 	generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
1207 		FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
1208 }
1209 
1210 static struct audio *dcn20_create_audio(
1211 		struct dc_context *ctx, unsigned int inst)
1212 {
1213 	return dce_audio_create(ctx, inst,
1214 			&audio_regs[inst], &audio_shift, &audio_mask);
1215 }
1216 
1217 struct stream_encoder *dcn20_stream_encoder_create(
1218 	enum engine_id eng_id,
1219 	struct dc_context *ctx)
1220 {
1221 	struct dcn10_stream_encoder *enc1 =
1222 		kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
1223 
1224 	if (!enc1)
1225 		return NULL;
1226 
1227 	if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
1228 		if (eng_id >= ENGINE_ID_DIGD)
1229 			eng_id++;
1230 	}
1231 
1232 	dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
1233 					&stream_enc_regs[eng_id],
1234 					&se_shift, &se_mask);
1235 
1236 	return &enc1->base;
1237 }
1238 
1239 static const struct dce_hwseq_registers hwseq_reg = {
1240 		HWSEQ_DCN2_REG_LIST()
1241 };
1242 
1243 static const struct dce_hwseq_shift hwseq_shift = {
1244 		HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
1245 };
1246 
1247 static const struct dce_hwseq_mask hwseq_mask = {
1248 		HWSEQ_DCN2_MASK_SH_LIST(_MASK)
1249 };
1250 
1251 struct dce_hwseq *dcn20_hwseq_create(
1252 	struct dc_context *ctx)
1253 {
1254 	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
1255 
1256 	if (hws) {
1257 		hws->ctx = ctx;
1258 		hws->regs = &hwseq_reg;
1259 		hws->shifts = &hwseq_shift;
1260 		hws->masks = &hwseq_mask;
1261 	}
1262 	return hws;
1263 }
1264 
1265 static const struct resource_create_funcs res_create_funcs = {
1266 	.read_dce_straps = read_dce_straps,
1267 	.create_audio = dcn20_create_audio,
1268 	.create_stream_encoder = dcn20_stream_encoder_create,
1269 	.create_hwseq = dcn20_hwseq_create,
1270 };
1271 
1272 static const struct resource_create_funcs res_create_maximus_funcs = {
1273 	.read_dce_straps = NULL,
1274 	.create_audio = NULL,
1275 	.create_stream_encoder = NULL,
1276 	.create_hwseq = dcn20_hwseq_create,
1277 };
1278 
1279 static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
1280 
1281 void dcn20_clock_source_destroy(struct clock_source **clk_src)
1282 {
1283 	kfree(TO_DCE110_CLK_SRC(*clk_src));
1284 	*clk_src = NULL;
1285 }
1286 
1287 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1288 
1289 struct display_stream_compressor *dcn20_dsc_create(
1290 	struct dc_context *ctx, uint32_t inst)
1291 {
1292 	struct dcn20_dsc *dsc =
1293 		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
1294 
1295 	if (!dsc) {
1296 		BREAK_TO_DEBUGGER();
1297 		return NULL;
1298 	}
1299 
1300 	dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
1301 	return &dsc->base;
1302 }
1303 
1304 void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
1305 {
1306 	kfree(container_of(*dsc, struct dcn20_dsc, base));
1307 	*dsc = NULL;
1308 }
1309 
1310 #endif
1311 
1312 static void destruct(struct dcn20_resource_pool *pool)
1313 {
1314 	unsigned int i;
1315 
1316 	for (i = 0; i < pool->base.stream_enc_count; i++) {
1317 		if (pool->base.stream_enc[i] != NULL) {
1318 			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
1319 			pool->base.stream_enc[i] = NULL;
1320 		}
1321 	}
1322 
1323 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1324 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
1325 		if (pool->base.dscs[i] != NULL)
1326 			dcn20_dsc_destroy(&pool->base.dscs[i]);
1327 	}
1328 #endif
1329 
1330 	if (pool->base.mpc != NULL) {
1331 		kfree(TO_DCN20_MPC(pool->base.mpc));
1332 		pool->base.mpc = NULL;
1333 	}
1334 	if (pool->base.hubbub != NULL) {
1335 		kfree(pool->base.hubbub);
1336 		pool->base.hubbub = NULL;
1337 	}
1338 	for (i = 0; i < pool->base.pipe_count; i++) {
1339 		if (pool->base.dpps[i] != NULL)
1340 			dcn20_dpp_destroy(&pool->base.dpps[i]);
1341 
1342 		if (pool->base.ipps[i] != NULL)
1343 			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
1344 
1345 		if (pool->base.hubps[i] != NULL) {
1346 			kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
1347 			pool->base.hubps[i] = NULL;
1348 		}
1349 
1350 		if (pool->base.irqs != NULL) {
1351 			dal_irq_service_destroy(&pool->base.irqs);
1352 		}
1353 	}
1354 
1355 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
1356 		if (pool->base.engines[i] != NULL)
1357 			dce110_engine_destroy(&pool->base.engines[i]);
1358 		if (pool->base.hw_i2cs[i] != NULL) {
1359 			kfree(pool->base.hw_i2cs[i]);
1360 			pool->base.hw_i2cs[i] = NULL;
1361 		}
1362 		if (pool->base.sw_i2cs[i] != NULL) {
1363 			kfree(pool->base.sw_i2cs[i]);
1364 			pool->base.sw_i2cs[i] = NULL;
1365 		}
1366 	}
1367 
1368 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
1369 		if (pool->base.opps[i] != NULL)
1370 			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
1371 	}
1372 
1373 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
1374 		if (pool->base.timing_generators[i] != NULL)	{
1375 			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
1376 			pool->base.timing_generators[i] = NULL;
1377 		}
1378 	}
1379 
1380 	for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
1381 		if (pool->base.dwbc[i] != NULL) {
1382 			kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
1383 			pool->base.dwbc[i] = NULL;
1384 		}
1385 		if (pool->base.mcif_wb[i] != NULL) {
1386 			kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
1387 			pool->base.mcif_wb[i] = NULL;
1388 		}
1389 	}
1390 
1391 	for (i = 0; i < pool->base.audio_count; i++) {
1392 		if (pool->base.audios[i])
1393 			dce_aud_destroy(&pool->base.audios[i]);
1394 	}
1395 
1396 	for (i = 0; i < pool->base.clk_src_count; i++) {
1397 		if (pool->base.clock_sources[i] != NULL) {
1398 			dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
1399 			pool->base.clock_sources[i] = NULL;
1400 		}
1401 	}
1402 
1403 	if (pool->base.dp_clock_source != NULL) {
1404 		dcn20_clock_source_destroy(&pool->base.dp_clock_source);
1405 		pool->base.dp_clock_source = NULL;
1406 	}
1407 
1408 
1409 	if (pool->base.abm != NULL)
1410 		dce_abm_destroy(&pool->base.abm);
1411 
1412 	if (pool->base.dmcu != NULL)
1413 		dce_dmcu_destroy(&pool->base.dmcu);
1414 
1415 	if (pool->base.dccg != NULL)
1416 		dcn_dccg_destroy(&pool->base.dccg);
1417 
1418 	if (pool->base.pp_smu != NULL)
1419 		dcn20_pp_smu_destroy(&pool->base.pp_smu);
1420 
1421 }
1422 
1423 struct hubp *dcn20_hubp_create(
1424 	struct dc_context *ctx,
1425 	uint32_t inst)
1426 {
1427 	struct dcn20_hubp *hubp2 =
1428 		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
1429 
1430 	if (!hubp2)
1431 		return NULL;
1432 
1433 	if (hubp2_construct(hubp2, ctx, inst,
1434 			&hubp_regs[inst], &hubp_shift, &hubp_mask))
1435 		return &hubp2->base;
1436 
1437 	BREAK_TO_DEBUGGER();
1438 	kfree(hubp2);
1439 	return NULL;
1440 }
1441 
1442 static void get_pixel_clock_parameters(
1443 	struct pipe_ctx *pipe_ctx,
1444 	struct pixel_clk_params *pixel_clk_params)
1445 {
1446 	const struct dc_stream_state *stream = pipe_ctx->stream;
1447 	struct pipe_ctx *odm_pipe;
1448 	int opp_cnt = 1;
1449 
1450 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1451 		opp_cnt++;
1452 
1453 	pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
1454 	pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
1455 	pixel_clk_params->signal_type = pipe_ctx->stream->signal;
1456 	pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
1457 	/* TODO: un-hardcode*/
1458 	pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
1459 		LINK_RATE_REF_FREQ_IN_KHZ;
1460 	pixel_clk_params->flags.ENABLE_SS = 0;
1461 	pixel_clk_params->color_depth =
1462 		stream->timing.display_color_depth;
1463 	pixel_clk_params->flags.DISPLAY_BLANKED = 1;
1464 	pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
1465 
1466 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
1467 		pixel_clk_params->color_depth = COLOR_DEPTH_888;
1468 
1469 	if (opp_cnt == 4)
1470 		pixel_clk_params->requested_pix_clk_100hz /= 4;
1471 	else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
1472 		pixel_clk_params->requested_pix_clk_100hz /= 2;
1473 
1474 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1475 		pixel_clk_params->requested_pix_clk_100hz *= 2;
1476 
1477 }
1478 
1479 static void build_clamping_params(struct dc_stream_state *stream)
1480 {
1481 	stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
1482 	stream->clamping.c_depth = stream->timing.display_color_depth;
1483 	stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
1484 }
1485 
1486 static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
1487 {
1488 
1489 	get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
1490 
1491 	pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
1492 		pipe_ctx->clock_source,
1493 		&pipe_ctx->stream_res.pix_clk_params,
1494 		&pipe_ctx->pll_settings);
1495 
1496 	pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
1497 
1498 	resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1499 					&pipe_ctx->stream->bit_depth_params);
1500 	build_clamping_params(pipe_ctx->stream);
1501 
1502 	return DC_OK;
1503 }
1504 
1505 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
1506 {
1507 	enum dc_status status = DC_OK;
1508 	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
1509 
1510 	/*TODO Seems unneeded anymore */
1511 	/*	if (old_context && resource_is_stream_unchanged(old_context, stream)) {
1512 			if (stream != NULL && old_context->streams[i] != NULL) {
1513 				 todo: shouldn't have to copy missing parameter here
1514 				resource_build_bit_depth_reduction_params(stream,
1515 						&stream->bit_depth_params);
1516 				stream->clamping.pixel_encoding =
1517 						stream->timing.pixel_encoding;
1518 
1519 				resource_build_bit_depth_reduction_params(stream,
1520 								&stream->bit_depth_params);
1521 				build_clamping_params(stream);
1522 
1523 				continue;
1524 			}
1525 		}
1526 	*/
1527 
1528 	if (!pipe_ctx)
1529 		return DC_ERROR_UNEXPECTED;
1530 
1531 
1532 	status = build_pipe_hw_param(pipe_ctx);
1533 
1534 	return status;
1535 }
1536 
1537 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1538 
1539 static void acquire_dsc(struct resource_context *res_ctx,
1540 			const struct resource_pool *pool,
1541 			struct display_stream_compressor **dsc,
1542 			int pipe_idx)
1543 {
1544 	int i;
1545 
1546 	ASSERT(*dsc == NULL);
1547 	*dsc = NULL;
1548 
1549 	if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
1550 		*dsc = pool->dscs[pipe_idx];
1551 		res_ctx->is_dsc_acquired[pipe_idx] = true;
1552 		return;
1553 	}
1554 
1555 	/* Find first free DSC */
1556 	for (i = 0; i < pool->res_cap->num_dsc; i++)
1557 		if (!res_ctx->is_dsc_acquired[i]) {
1558 			*dsc = pool->dscs[i];
1559 			res_ctx->is_dsc_acquired[i] = true;
1560 			break;
1561 		}
1562 }
1563 
1564 static void release_dsc(struct resource_context *res_ctx,
1565 			const struct resource_pool *pool,
1566 			struct display_stream_compressor **dsc)
1567 {
1568 	int i;
1569 
1570 	for (i = 0; i < pool->res_cap->num_dsc; i++)
1571 		if (pool->dscs[i] == *dsc) {
1572 			res_ctx->is_dsc_acquired[i] = false;
1573 			*dsc = NULL;
1574 			break;
1575 		}
1576 }
1577 
1578 #endif
1579 
1580 
1581 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1582 static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
1583 		struct dc_state *dc_ctx,
1584 		struct dc_stream_state *dc_stream)
1585 {
1586 	enum dc_status result = DC_OK;
1587 	int i;
1588 	const struct resource_pool *pool = dc->res_pool;
1589 
1590 	/* Get a DSC if required and available */
1591 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1592 		struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
1593 
1594 		if (pipe_ctx->stream != dc_stream)
1595 			continue;
1596 
1597 		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
1598 
1599 		/* The number of DSCs can be less than the number of pipes */
1600 		if (!pipe_ctx->stream_res.dsc) {
1601 			dm_output_to_console("No DSCs available\n");
1602 			result = DC_NO_DSC_RESOURCE;
1603 		}
1604 
1605 		break;
1606 	}
1607 
1608 	return result;
1609 }
1610 
1611 
1612 static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
1613 		struct dc_state *new_ctx,
1614 		struct dc_stream_state *dc_stream)
1615 {
1616 	struct pipe_ctx *pipe_ctx = NULL;
1617 	int i;
1618 
1619 	for (i = 0; i < MAX_PIPES; i++) {
1620 		if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
1621 			pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
1622 
1623 			if (pipe_ctx->stream_res.dsc)
1624 				release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
1625 		}
1626 	}
1627 
1628 	if (!pipe_ctx)
1629 		return DC_ERROR_UNEXPECTED;
1630 	else
1631 		return DC_OK;
1632 }
1633 #endif
1634 
1635 
1636 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1637 {
1638 	enum dc_status result = DC_ERROR_UNEXPECTED;
1639 
1640 	result = resource_map_pool_resources(dc, new_ctx, dc_stream);
1641 
1642 	if (result == DC_OK)
1643 		result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
1644 
1645 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1646 	/* Get a DSC if required and available */
1647 	if (result == DC_OK && dc_stream->timing.flags.DSC)
1648 		result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
1649 #endif
1650 
1651 	if (result == DC_OK)
1652 		result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
1653 
1654 	return result;
1655 }
1656 
1657 
1658 enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1659 {
1660 	enum dc_status result = DC_OK;
1661 
1662 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1663 	result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
1664 #endif
1665 
1666 	return result;
1667 }
1668 
1669 
1670 static void swizzle_to_dml_params(
1671 		enum swizzle_mode_values swizzle,
1672 		unsigned int *sw_mode)
1673 {
1674 	switch (swizzle) {
1675 	case DC_SW_LINEAR:
1676 		*sw_mode = dm_sw_linear;
1677 		break;
1678 	case DC_SW_4KB_S:
1679 		*sw_mode = dm_sw_4kb_s;
1680 		break;
1681 	case DC_SW_4KB_S_X:
1682 		*sw_mode = dm_sw_4kb_s_x;
1683 		break;
1684 	case DC_SW_4KB_D:
1685 		*sw_mode = dm_sw_4kb_d;
1686 		break;
1687 	case DC_SW_4KB_D_X:
1688 		*sw_mode = dm_sw_4kb_d_x;
1689 		break;
1690 	case DC_SW_64KB_S:
1691 		*sw_mode = dm_sw_64kb_s;
1692 		break;
1693 	case DC_SW_64KB_S_X:
1694 		*sw_mode = dm_sw_64kb_s_x;
1695 		break;
1696 	case DC_SW_64KB_S_T:
1697 		*sw_mode = dm_sw_64kb_s_t;
1698 		break;
1699 	case DC_SW_64KB_D:
1700 		*sw_mode = dm_sw_64kb_d;
1701 		break;
1702 	case DC_SW_64KB_D_X:
1703 		*sw_mode = dm_sw_64kb_d_x;
1704 		break;
1705 	case DC_SW_64KB_D_T:
1706 		*sw_mode = dm_sw_64kb_d_t;
1707 		break;
1708 	case DC_SW_64KB_R_X:
1709 		*sw_mode = dm_sw_64kb_r_x;
1710 		break;
1711 	case DC_SW_VAR_S:
1712 		*sw_mode = dm_sw_var_s;
1713 		break;
1714 	case DC_SW_VAR_S_X:
1715 		*sw_mode = dm_sw_var_s_x;
1716 		break;
1717 	case DC_SW_VAR_D:
1718 		*sw_mode = dm_sw_var_d;
1719 		break;
1720 	case DC_SW_VAR_D_X:
1721 		*sw_mode = dm_sw_var_d_x;
1722 		break;
1723 
1724 	default:
1725 		ASSERT(0); /* Not supported */
1726 		break;
1727 	}
1728 }
1729 
1730 bool dcn20_split_stream_for_odm(
1731 		struct resource_context *res_ctx,
1732 		const struct resource_pool *pool,
1733 		struct pipe_ctx *prev_odm_pipe,
1734 		struct pipe_ctx *next_odm_pipe)
1735 {
1736 	int pipe_idx = next_odm_pipe->pipe_idx;
1737 
1738 	*next_odm_pipe = *prev_odm_pipe;
1739 
1740 	next_odm_pipe->pipe_idx = pipe_idx;
1741 	next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
1742 	next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
1743 	next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
1744 	next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
1745 	next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
1746 	next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
1747 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1748 	next_odm_pipe->stream_res.dsc = NULL;
1749 #endif
1750 	if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
1751 		next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
1752 		next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
1753 	}
1754 	prev_odm_pipe->next_odm_pipe = next_odm_pipe;
1755 	next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
1756 	ASSERT(next_odm_pipe->top_pipe == NULL);
1757 
1758 	if (prev_odm_pipe->plane_state) {
1759 		struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
1760 		int new_width;
1761 
1762 		/* HACTIVE halved for odm combine */
1763 		sd->h_active /= 2;
1764 		/* Calculate new vp and recout for left pipe */
1765 		/* Need at least 16 pixels width per side */
1766 		if (sd->recout.x + 16 >= sd->h_active)
1767 			return false;
1768 		new_width = sd->h_active - sd->recout.x;
1769 		sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1770 				sd->ratios.horz, sd->recout.width - new_width));
1771 		sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1772 				sd->ratios.horz_c, sd->recout.width - new_width));
1773 		sd->recout.width = new_width;
1774 
1775 		/* Calculate new vp and recout for right pipe */
1776 		sd = &next_odm_pipe->plane_res.scl_data;
1777 		/* HACTIVE halved for odm combine */
1778 		sd->h_active /= 2;
1779 		/* Need at least 16 pixels width per side */
1780 		if (new_width <= 16)
1781 			return false;
1782 		new_width = sd->recout.width + sd->recout.x - sd->h_active;
1783 		sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1784 				sd->ratios.horz, sd->recout.width - new_width));
1785 		sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1786 				sd->ratios.horz_c, sd->recout.width - new_width));
1787 		sd->recout.width = new_width;
1788 		sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
1789 				sd->ratios.horz, sd->h_active - sd->recout.x));
1790 		sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
1791 				sd->ratios.horz_c, sd->h_active - sd->recout.x));
1792 		sd->recout.x = 0;
1793 	}
1794 	next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
1795 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1796 	if (next_odm_pipe->stream->timing.flags.DSC == 1) {
1797 		acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
1798 		ASSERT(next_odm_pipe->stream_res.dsc);
1799 		if (next_odm_pipe->stream_res.dsc == NULL)
1800 			return false;
1801 	}
1802 #endif
1803 
1804 	return true;
1805 }
1806 
1807 void dcn20_split_stream_for_mpc(
1808 		struct resource_context *res_ctx,
1809 		const struct resource_pool *pool,
1810 		struct pipe_ctx *primary_pipe,
1811 		struct pipe_ctx *secondary_pipe)
1812 {
1813 	int pipe_idx = secondary_pipe->pipe_idx;
1814 	struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
1815 
1816 	*secondary_pipe = *primary_pipe;
1817 	secondary_pipe->bottom_pipe = sec_bot_pipe;
1818 
1819 	secondary_pipe->pipe_idx = pipe_idx;
1820 	secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
1821 	secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
1822 	secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
1823 	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
1824 	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
1825 	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
1826 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1827 	secondary_pipe->stream_res.dsc = NULL;
1828 #endif
1829 	if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
1830 		ASSERT(!secondary_pipe->bottom_pipe);
1831 		secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
1832 		secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
1833 	}
1834 	primary_pipe->bottom_pipe = secondary_pipe;
1835 	secondary_pipe->top_pipe = primary_pipe;
1836 
1837 	ASSERT(primary_pipe->plane_state);
1838 	resource_build_scaling_params(primary_pipe);
1839 	resource_build_scaling_params(secondary_pipe);
1840 }
1841 
1842 void dcn20_populate_dml_writeback_from_context(
1843 		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
1844 {
1845 	int pipe_cnt, i;
1846 
1847 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1848 		struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
1849 
1850 		if (!res_ctx->pipe_ctx[i].stream)
1851 			continue;
1852 
1853 		/* Set writeback information */
1854 		pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
1855 		pipes[pipe_cnt].dout.num_active_wb++;
1856 		pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
1857 		pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
1858 		pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
1859 		pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
1860 		pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
1861 		pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
1862 		pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
1863 		pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
1864 		pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
1865 		pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
1866 		if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
1867 			if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
1868 				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
1869 			else
1870 				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
1871 		} else
1872 			pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
1873 
1874 		pipe_cnt++;
1875 	}
1876 
1877 }
1878 
1879 int dcn20_populate_dml_pipes_from_context(
1880 		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
1881 {
1882 	int pipe_cnt, i;
1883 	bool synchronized_vblank = true;
1884 
1885 	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
1886 		if (!res_ctx->pipe_ctx[i].stream)
1887 			continue;
1888 
1889 		if (pipe_cnt < 0) {
1890 			pipe_cnt = i;
1891 			continue;
1892 		}
1893 		if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable(
1894 				res_ctx->pipe_ctx[pipe_cnt].stream,
1895 				res_ctx->pipe_ctx[i].stream)) {
1896 			synchronized_vblank = false;
1897 			break;
1898 		}
1899 	}
1900 
1901 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1902 		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
1903 		int output_bpc;
1904 
1905 		if (!res_ctx->pipe_ctx[i].stream)
1906 			continue;
1907 		/* todo:
1908 		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
1909 		pipes[pipe_cnt].pipe.src.dcc = 0;
1910 		pipes[pipe_cnt].pipe.src.vm = 0;*/
1911 
1912 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1913 		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
1914 		/* todo: rotation?*/
1915 		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
1916 #endif
1917 		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
1918 			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
1919 			/* 1/2 vblank */
1920 			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
1921 				(timing->v_total - timing->v_addressable
1922 					- timing->v_border_top - timing->v_border_bottom) / 2;
1923 			/* 36 bytes dp, 32 hdmi */
1924 			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
1925 				dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
1926 		}
1927 		pipes[pipe_cnt].pipe.src.dcc = false;
1928 		pipes[pipe_cnt].pipe.src.dcc_rate = 1;
1929 		pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
1930 		pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
1931 		pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
1932 				- timing->h_addressable
1933 				- timing->h_border_left
1934 				- timing->h_border_right;
1935 		pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
1936 		pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
1937 				- timing->v_addressable
1938 				- timing->v_border_top
1939 				- timing->v_border_bottom;
1940 		pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
1941 		pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
1942 		pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
1943 		pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
1944 		pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
1945 		pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
1946 		if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1947 			pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
1948 		pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
1949 		pipes[pipe_cnt].dout.dp_lanes = 4;
1950 		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
1951 		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
1952 		pipes[pipe_cnt].pipe.dest.odm_combine = res_ctx->pipe_ctx[i].prev_odm_pipe
1953 							|| res_ctx->pipe_ctx[i].next_odm_pipe;
1954 		pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
1955 		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
1956 				== res_ctx->pipe_ctx[i].plane_state)
1957 			pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
1958 		else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
1959 			struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
1960 
1961 			while (first_pipe->prev_odm_pipe)
1962 				first_pipe = first_pipe->prev_odm_pipe;
1963 			pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
1964 		}
1965 
1966 		switch (res_ctx->pipe_ctx[i].stream->signal) {
1967 		case SIGNAL_TYPE_DISPLAY_PORT_MST:
1968 		case SIGNAL_TYPE_DISPLAY_PORT:
1969 			pipes[pipe_cnt].dout.output_type = dm_dp;
1970 			break;
1971 		case SIGNAL_TYPE_EDP:
1972 			pipes[pipe_cnt].dout.output_type = dm_edp;
1973 			break;
1974 		case SIGNAL_TYPE_HDMI_TYPE_A:
1975 		case SIGNAL_TYPE_DVI_SINGLE_LINK:
1976 		case SIGNAL_TYPE_DVI_DUAL_LINK:
1977 			pipes[pipe_cnt].dout.output_type = dm_hdmi;
1978 			break;
1979 		default:
1980 			/* In case there is no signal, set dp with 4 lanes to allow max config */
1981 			pipes[pipe_cnt].dout.output_type = dm_dp;
1982 			pipes[pipe_cnt].dout.dp_lanes = 4;
1983 		}
1984 
1985 		switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
1986 		case COLOR_DEPTH_666:
1987 			output_bpc = 6;
1988 			break;
1989 		case COLOR_DEPTH_888:
1990 			output_bpc = 8;
1991 			break;
1992 		case COLOR_DEPTH_101010:
1993 			output_bpc = 10;
1994 			break;
1995 		case COLOR_DEPTH_121212:
1996 			output_bpc = 12;
1997 			break;
1998 		case COLOR_DEPTH_141414:
1999 			output_bpc = 14;
2000 			break;
2001 		case COLOR_DEPTH_161616:
2002 			output_bpc = 16;
2003 			break;
2004 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
2005 		case COLOR_DEPTH_999:
2006 			output_bpc = 9;
2007 			break;
2008 		case COLOR_DEPTH_111111:
2009 			output_bpc = 11;
2010 			break;
2011 #endif
2012 		default:
2013 			output_bpc = 8;
2014 			break;
2015 		}
2016 
2017 		switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
2018 		case PIXEL_ENCODING_RGB:
2019 		case PIXEL_ENCODING_YCBCR444:
2020 			pipes[pipe_cnt].dout.output_format = dm_444;
2021 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
2022 			break;
2023 		case PIXEL_ENCODING_YCBCR420:
2024 			pipes[pipe_cnt].dout.output_format = dm_420;
2025 			pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
2026 			break;
2027 		case PIXEL_ENCODING_YCBCR422:
2028 			if (true) /* todo */
2029 				pipes[pipe_cnt].dout.output_format = dm_s422;
2030 			else
2031 				pipes[pipe_cnt].dout.output_format = dm_n422;
2032 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
2033 			break;
2034 		default:
2035 			pipes[pipe_cnt].dout.output_format = dm_444;
2036 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
2037 		}
2038 
2039 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2040 		if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
2041 			pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
2042 #endif
2043 
2044 		/* todo: default max for now, until there is logic reflecting this in dc*/
2045 		pipes[pipe_cnt].dout.output_bpc = 12;
2046 		/*
2047 		 * Use max cursor settings for calculations to minimize
2048 		 * bw calculations due to cursor on/off
2049 		 */
2050 		pipes[pipe_cnt].pipe.src.num_cursors = 2;
2051 		pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
2052 		pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
2053 		pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
2054 		pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
2055 
2056 		if (!res_ctx->pipe_ctx[i].plane_state) {
2057 			pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
2058 			pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
2059 			pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
2060 			pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
2061 			if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
2062 				pipes[pipe_cnt].pipe.src.viewport_width = 1920;
2063 			pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
2064 			if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
2065 				pipes[pipe_cnt].pipe.src.viewport_height = 1080;
2066 			pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
2067 			pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
2068 			pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
2069 			pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
2070 			pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width;  /*when is_hsplit != 1*/
2071 			pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
2072 			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
2073 			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
2074 			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
2075 			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
2076 			pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
2077 			pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
2078 			pipes[pipe_cnt].pipe.src.is_hsplit = 0;
2079 			pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2080 			pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
2081 			pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
2082 		} else {
2083 			struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
2084 			struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
2085 
2086 			pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
2087 			pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
2088 					&& res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
2089 					|| (res_ctx->pipe_ctx[i].top_pipe
2090 					&& res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
2091 			pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
2092 					|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
2093 			pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
2094 			pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
2095 			pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
2096 			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
2097 			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
2098 			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
2099 			if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2100 				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
2101 				pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
2102 				pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
2103 				pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
2104 			} else {
2105 				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch;
2106 				pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch;
2107 			}
2108 			pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
2109 			pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
2110 			pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
2111 			pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
2112 			pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
2113 			if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
2114 				pipes[pipe_cnt].pipe.dest.full_recout_width +=
2115 						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
2116 				pipes[pipe_cnt].pipe.dest.full_recout_height +=
2117 						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
2118 			} else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
2119 				pipes[pipe_cnt].pipe.dest.full_recout_width +=
2120 						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
2121 				pipes[pipe_cnt].pipe.dest.full_recout_height +=
2122 						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
2123 			}
2124 
2125 			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
2126 			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
2127 			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
2128 			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
2129 			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
2130 			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
2131 					scl->ratios.vert.value != dc_fixpt_one.value
2132 					|| scl->ratios.horz.value != dc_fixpt_one.value
2133 					|| scl->ratios.vert_c.value != dc_fixpt_one.value
2134 					|| scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
2135 					|| dc->debug.always_scale; /*support always scale*/
2136 			pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
2137 			pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
2138 			pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
2139 			pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
2140 
2141 			pipes[pipe_cnt].pipe.src.macro_tile_size =
2142 					swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
2143 			swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
2144 					&pipes[pipe_cnt].pipe.src.sw_mode);
2145 
2146 			switch (pln->format) {
2147 			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
2148 			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
2149 				pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
2150 				break;
2151 			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
2152 			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
2153 				pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
2154 				break;
2155 			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
2156 			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
2157 			case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
2158 				pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
2159 				break;
2160 			case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
2161 			case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
2162 				pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
2163 				break;
2164 			case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
2165 				pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
2166 				break;
2167 			default:
2168 				pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
2169 				break;
2170 			}
2171 		}
2172 
2173 		pipe_cnt++;
2174 	}
2175 
2176 	/* populate writeback information */
2177 	dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
2178 
2179 	return pipe_cnt;
2180 }
2181 
2182 unsigned int dcn20_calc_max_scaled_time(
2183 		unsigned int time_per_pixel,
2184 		enum mmhubbub_wbif_mode mode,
2185 		unsigned int urgent_watermark)
2186 {
2187 	unsigned int time_per_byte = 0;
2188 	unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
2189 	unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
2190 	unsigned int small_free_entry, max_free_entry;
2191 	unsigned int buf_lh_capability;
2192 	unsigned int max_scaled_time;
2193 
2194 	if (mode == PACKED_444) /* packed mode */
2195 		time_per_byte = time_per_pixel/4;
2196 	else if (mode == PLANAR_420_8BPC)
2197 		time_per_byte  = time_per_pixel;
2198 	else if (mode == PLANAR_420_10BPC) /* p010 */
2199 		time_per_byte  = time_per_pixel * 819/1024;
2200 
2201 	if (time_per_byte == 0)
2202 		time_per_byte = 1;
2203 
2204 	small_free_entry  = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
2205 	max_free_entry    = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
2206 	buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
2207 	max_scaled_time   = buf_lh_capability - urgent_watermark;
2208 	return max_scaled_time;
2209 }
2210 
2211 void dcn20_set_mcif_arb_params(
2212 		struct dc *dc,
2213 		struct dc_state *context,
2214 		display_e2e_pipe_params_st *pipes,
2215 		int pipe_cnt)
2216 {
2217 	enum mmhubbub_wbif_mode wbif_mode;
2218 	struct mcif_arb_params *wb_arb_params;
2219 	int i, j, k, dwb_pipe;
2220 
2221 	/* Writeback MCIF_WB arbitration parameters */
2222 	dwb_pipe = 0;
2223 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2224 
2225 		if (!context->res_ctx.pipe_ctx[i].stream)
2226 			continue;
2227 
2228 		for (j = 0; j < MAX_DWB_PIPES; j++) {
2229 			if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
2230 				continue;
2231 
2232 			//wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
2233 			wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
2234 
2235 			if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
2236 				if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
2237 					wbif_mode = PLANAR_420_8BPC;
2238 				else
2239 					wbif_mode = PLANAR_420_10BPC;
2240 			} else
2241 				wbif_mode = PACKED_444;
2242 
2243 			for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
2244 				wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2245 				wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2246 			}
2247 			wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
2248 			wb_arb_params->slice_lines = 32;
2249 			wb_arb_params->arbitration_slice = 2;
2250 			wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
2251 				wbif_mode,
2252 				wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
2253 
2254 			dwb_pipe++;
2255 
2256 			if (dwb_pipe >= MAX_DWB_PIPES)
2257 				return;
2258 		}
2259 		if (dwb_pipe >= MAX_DWB_PIPES)
2260 			return;
2261 	}
2262 }
2263 
2264 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2265 bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
2266 {
2267 	int i;
2268 
2269 	/* Validate DSC config, dsc count validation is already done */
2270 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2271 		struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
2272 		struct dc_stream_state *stream = pipe_ctx->stream;
2273 		struct dsc_config dsc_cfg;
2274 		struct pipe_ctx *odm_pipe;
2275 		int opp_cnt = 1;
2276 
2277 		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2278 			opp_cnt++;
2279 
2280 		/* Only need to validate top pipe */
2281 		if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
2282 			continue;
2283 
2284 		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
2285 				+ stream->timing.h_border_right) / opp_cnt;
2286 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
2287 				+ stream->timing.v_border_bottom;
2288 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
2289 		dsc_cfg.color_depth = stream->timing.display_color_depth;
2290 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
2291 		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
2292 
2293 		if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
2294 			return false;
2295 	}
2296 	return true;
2297 }
2298 #endif
2299 
2300 struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
2301 		struct resource_context *res_ctx,
2302 		const struct resource_pool *pool,
2303 		const struct pipe_ctx *primary_pipe)
2304 {
2305 	struct pipe_ctx *secondary_pipe = NULL;
2306 
2307 	if (dc && primary_pipe) {
2308 		int j;
2309 		int preferred_pipe_idx = 0;
2310 
2311 		/* first check the prev dc state:
2312 		 * if this primary pipe has a bottom pipe in prev. state
2313 		 * and if the bottom pipe is still available (which it should be),
2314 		 * pick that pipe as secondary
2315 		 * Same logic applies for ODM pipes. Since mpo is not allowed with odm
2316 		 * check in else case.
2317 		 */
2318 		if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
2319 			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
2320 			if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2321 				secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2322 				secondary_pipe->pipe_idx = preferred_pipe_idx;
2323 			}
2324 		} else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
2325 			preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
2326 			if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2327 				secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2328 				secondary_pipe->pipe_idx = preferred_pipe_idx;
2329 			}
2330 		}
2331 
2332 		/*
2333 		 * if this primary pipe does not have a bottom pipe in prev. state
2334 		 * start backward and find a pipe that did not used to be a bottom pipe in
2335 		 * prev. dc state. This way we make sure we keep the same assignment as
2336 		 * last state and will not have to reprogram every pipe
2337 		 */
2338 		if (secondary_pipe == NULL) {
2339 			for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
2340 				if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
2341 						&& dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
2342 					preferred_pipe_idx = j;
2343 
2344 					if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2345 						secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2346 						secondary_pipe->pipe_idx = preferred_pipe_idx;
2347 						break;
2348 					}
2349 				}
2350 			}
2351 		}
2352 		/*
2353 		 * We should never hit this assert unless assignments are shuffled around
2354 		 * if this happens we will prob. hit a vsync tdr
2355 		 */
2356 		ASSERT(secondary_pipe);
2357 		/*
2358 		 * search backwards for the second pipe to keep pipe
2359 		 * assignment more consistent
2360 		 */
2361 		if (secondary_pipe == NULL) {
2362 			for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
2363 				preferred_pipe_idx = j;
2364 
2365 				if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
2366 					secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
2367 					secondary_pipe->pipe_idx = preferred_pipe_idx;
2368 					break;
2369 				}
2370 			}
2371 		}
2372 	}
2373 
2374 	return secondary_pipe;
2375 }
2376 
2377 void dcn20_merge_pipes_for_validate(
2378 		struct dc *dc,
2379 		struct dc_state *context)
2380 {
2381 	int i;
2382 
2383 	/* merge previously split odm pipes since mode support needs to make the decision */
2384 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2385 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2386 		struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
2387 
2388 		if (pipe->prev_odm_pipe)
2389 			continue;
2390 
2391 		pipe->next_odm_pipe = NULL;
2392 		while (odm_pipe) {
2393 			struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
2394 
2395 			odm_pipe->plane_state = NULL;
2396 			odm_pipe->stream = NULL;
2397 			odm_pipe->top_pipe = NULL;
2398 			odm_pipe->bottom_pipe = NULL;
2399 			odm_pipe->prev_odm_pipe = NULL;
2400 			odm_pipe->next_odm_pipe = NULL;
2401 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2402 			if (odm_pipe->stream_res.dsc)
2403 				release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
2404 #endif
2405 			/* Clear plane_res and stream_res */
2406 			memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
2407 			memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
2408 			odm_pipe = next_odm_pipe;
2409 		}
2410 		if (pipe->plane_state)
2411 			resource_build_scaling_params(pipe);
2412 	}
2413 
2414 	/* merge previously mpc split pipes since mode support needs to make the decision */
2415 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2416 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2417 		struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2418 
2419 		if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
2420 			continue;
2421 
2422 		pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
2423 		if (hsplit_pipe->bottom_pipe)
2424 			hsplit_pipe->bottom_pipe->top_pipe = pipe;
2425 		hsplit_pipe->plane_state = NULL;
2426 		hsplit_pipe->stream = NULL;
2427 		hsplit_pipe->top_pipe = NULL;
2428 		hsplit_pipe->bottom_pipe = NULL;
2429 
2430 		/* Clear plane_res and stream_res */
2431 		memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
2432 		memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
2433 		if (pipe->plane_state)
2434 			resource_build_scaling_params(pipe);
2435 	}
2436 }
2437 
2438 int dcn20_validate_apply_pipe_split_flags(
2439 		struct dc *dc,
2440 		struct dc_state *context,
2441 		int vlevel,
2442 		bool *split)
2443 {
2444 	int i, pipe_idx, vlevel_split;
2445 	bool force_split = false;
2446 	bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
2447 
2448 	/* Single display loop, exits if there is more than one display */
2449 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2450 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2451 		bool exit_loop = false;
2452 
2453 		if (!pipe->stream || pipe->top_pipe)
2454 			continue;
2455 
2456 		if (dc->debug.force_single_disp_pipe_split) {
2457 			if (!force_split)
2458 				force_split = true;
2459 			else {
2460 				force_split = false;
2461 				exit_loop = true;
2462 			}
2463 		}
2464 		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
2465 			if (avoid_split)
2466 				avoid_split = false;
2467 			else {
2468 				avoid_split = true;
2469 				exit_loop = true;
2470 			}
2471 		}
2472 		if (exit_loop)
2473 			break;
2474 	}
2475 	/* TODO: fix dc bugs and remove this split threshold thing */
2476 	if (context->stream_count > dc->res_pool->pipe_count / 2)
2477 		avoid_split = true;
2478 
2479 	/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
2480 	if (avoid_split) {
2481 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2482 			if (!context->res_ctx.pipe_ctx[i].stream)
2483 				continue;
2484 
2485 			for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
2486 				if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
2487 					break;
2488 			/* Impossible to not split this pipe */
2489 			if (vlevel > context->bw_ctx.dml.soc.num_states)
2490 				vlevel = vlevel_split;
2491 			pipe_idx++;
2492 		}
2493 		context->bw_ctx.dml.vba.maxMpcComb = 0;
2494 	}
2495 
2496 	/* Split loop sets which pipe should be split based on dml outputs and dc flags */
2497 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2498 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2499 
2500 		if (!context->res_ctx.pipe_ctx[i].stream)
2501 			continue;
2502 
2503 		if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
2504 			split[i] = true;
2505 		if ((pipe->stream->view_format ==
2506 				VIEW_3D_FORMAT_SIDE_BY_SIDE ||
2507 				pipe->stream->view_format ==
2508 				VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
2509 				(pipe->stream->timing.timing_3d_format ==
2510 				TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
2511 				 pipe->stream->timing.timing_3d_format ==
2512 				TIMING_3D_FORMAT_SIDE_BY_SIDE))
2513 			split[i] = true;
2514 		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
2515 			split[i] = true;
2516 			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
2517 		}
2518 		context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
2519 			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
2520 		/* Adjust dppclk when split is forced, do not bother with dispclk */
2521 		if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
2522 			context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
2523 		pipe_idx++;
2524 	}
2525 
2526 	return vlevel;
2527 }
2528 
2529 bool dcn20_fast_validate_bw(
2530 		struct dc *dc,
2531 		struct dc_state *context,
2532 		display_e2e_pipe_params_st *pipes,
2533 		int *pipe_cnt_out,
2534 		int *pipe_split_from,
2535 		int *vlevel_out)
2536 {
2537 	bool out = false;
2538 	bool split[MAX_PIPES] = { false };
2539 	int pipe_cnt, i, pipe_idx, vlevel;
2540 
2541 	ASSERT(pipes);
2542 	if (!pipes)
2543 		return false;
2544 
2545 	dcn20_merge_pipes_for_validate(dc, context);
2546 
2547 	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes);
2548 
2549 	*pipe_cnt_out = pipe_cnt;
2550 
2551 	if (!pipe_cnt) {
2552 		out = true;
2553 		goto validate_out;
2554 	}
2555 
2556 	vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2557 
2558 	if (vlevel > context->bw_ctx.dml.soc.num_states)
2559 		goto validate_fail;
2560 
2561 	vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
2562 
2563 	/*initialize pipe_just_split_from to invalid idx*/
2564 	for (i = 0; i < MAX_PIPES; i++)
2565 		pipe_split_from[i] = -1;
2566 
2567 	for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
2568 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2569 		struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2570 
2571 		if (!pipe->stream || pipe_split_from[i] >= 0)
2572 			continue;
2573 
2574 		pipe_idx++;
2575 
2576 		if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2577 			hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
2578 			ASSERT(hsplit_pipe);
2579 			if (!dcn20_split_stream_for_odm(
2580 					&context->res_ctx, dc->res_pool,
2581 					pipe, hsplit_pipe))
2582 				goto validate_fail;
2583 			pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2584 			dcn20_build_mapped_resource(dc, context, pipe->stream);
2585 		}
2586 
2587 		if (!pipe->plane_state)
2588 			continue;
2589 		/* Skip 2nd half of already split pipe */
2590 		if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
2591 			continue;
2592 
2593 		/* We do not support mpo + odm at the moment */
2594 		if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
2595 				&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
2596 			goto validate_fail;
2597 
2598 		if (split[i]) {
2599 			if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
2600 				/* pipe not split previously needs split */
2601 				hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
2602 				ASSERT(hsplit_pipe);
2603 				if (!hsplit_pipe) {
2604 					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2;
2605 					continue;
2606 				}
2607 				if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2608 					if (!dcn20_split_stream_for_odm(
2609 							&context->res_ctx, dc->res_pool,
2610 							pipe, hsplit_pipe))
2611 						goto validate_fail;
2612 					dcn20_build_mapped_resource(dc, context, pipe->stream);
2613 				} else
2614 					dcn20_split_stream_for_mpc(
2615 						&context->res_ctx, dc->res_pool,
2616 						pipe, hsplit_pipe);
2617 				pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2618 			}
2619 		} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
2620 			/* merge should already have been done */
2621 			ASSERT(0);
2622 		}
2623 	}
2624 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2625 	/* Actual dsc count per stream dsc validation*/
2626 	if (!dcn20_validate_dsc(dc, context)) {
2627 		context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
2628 				DML_FAIL_DSC_VALIDATION_FAILURE;
2629 		goto validate_fail;
2630 	}
2631 #endif
2632 
2633 	*vlevel_out = vlevel;
2634 
2635 	out = true;
2636 	goto validate_out;
2637 
2638 validate_fail:
2639 	out = false;
2640 
2641 validate_out:
2642 	return out;
2643 }
2644 
2645 static void dcn20_calculate_wm(
2646 		struct dc *dc, struct dc_state *context,
2647 		display_e2e_pipe_params_st *pipes,
2648 		int *out_pipe_cnt,
2649 		int *pipe_split_from,
2650 		int vlevel)
2651 {
2652 	int pipe_cnt, i, pipe_idx;
2653 
2654 	for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
2655 		if (!context->res_ctx.pipe_ctx[i].stream)
2656 			continue;
2657 
2658 		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
2659 		pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2660 
2661 		if (pipe_split_from[i] < 0) {
2662 			pipes[pipe_cnt].clks_cfg.dppclk_mhz =
2663 					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
2664 			if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
2665 				pipes[pipe_cnt].pipe.dest.odm_combine =
2666 						context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx];
2667 			else
2668 				pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2669 			pipe_idx++;
2670 		} else {
2671 			pipes[pipe_cnt].clks_cfg.dppclk_mhz =
2672 					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
2673 			if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
2674 				pipes[pipe_cnt].pipe.dest.odm_combine =
2675 						context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]];
2676 			else
2677 				pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2678 		}
2679 
2680 		if (dc->config.forced_clocks) {
2681 			pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
2682 			pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
2683 		}
2684 		if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000)
2685 			pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
2686 		if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000)
2687 			pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
2688 
2689 		pipe_cnt++;
2690 	}
2691 
2692 	if (pipe_cnt != pipe_idx) {
2693 		if (dc->res_pool->funcs->populate_dml_pipes)
2694 			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
2695 				&context->res_ctx, pipes);
2696 		else
2697 			pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
2698 				&context->res_ctx, pipes);
2699 	}
2700 
2701 	*out_pipe_cnt = pipe_cnt;
2702 
2703 	pipes[0].clks_cfg.voltage = vlevel;
2704 	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
2705 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2706 
2707 	/* only pipe 0 is read for voltage and dcf/soc clocks */
2708 	if (vlevel < 1) {
2709 		pipes[0].clks_cfg.voltage = 1;
2710 		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
2711 		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
2712 	}
2713 	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2714 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2715 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2716 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2717 	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2718 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2719 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2720 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2721 	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2722 #endif
2723 
2724 	if (vlevel < 2) {
2725 		pipes[0].clks_cfg.voltage = 2;
2726 		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
2727 		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
2728 	}
2729 	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2730 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2731 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2732 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2733 	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2734 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2735 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2736 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2737 #endif
2738 
2739 	if (vlevel < 3) {
2740 		pipes[0].clks_cfg.voltage = 3;
2741 		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
2742 		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
2743 	}
2744 	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2745 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2746 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2747 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2748 	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2749 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2750 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2751 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2752 #endif
2753 
2754 	pipes[0].clks_cfg.voltage = vlevel;
2755 	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
2756 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2757 	context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2758 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2759 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2760 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2761 	context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2762 #if defined(CONFIG_DRM_AMD_DC_DCN2_1)
2763 	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2764 	context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2765 #endif
2766 }
2767 
2768 void dcn20_calculate_dlg_params(
2769 		struct dc *dc, struct dc_state *context,
2770 		display_e2e_pipe_params_st *pipes,
2771 		int pipe_cnt,
2772 		int vlevel)
2773 {
2774 	int i, j, pipe_idx, pipe_idx_unsplit;
2775 	bool visited[MAX_PIPES] = { 0 };
2776 
2777 	/* Writeback MCIF_WB arbitration parameters */
2778 	dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
2779 
2780 	context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
2781 	context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
2782 	context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
2783 	context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
2784 	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
2785 	context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
2786 	context->bw_ctx.bw.dcn.clk.p_state_change_support =
2787 		context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
2788 							!= dm_dram_clock_change_unsupported;
2789 	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
2790 
2791 	/*
2792 	 * An artifact of dml pipe split/odm is that pipes get merged back together for
2793 	 * calculation. Therefore we need to only extract for first pipe in ascending index order
2794 	 * and copy into the other split half.
2795 	 */
2796 	for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) {
2797 		if (!context->res_ctx.pipe_ctx[i].stream)
2798 			continue;
2799 
2800 		if (!visited[pipe_idx]) {
2801 			display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src;
2802 			display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest;
2803 
2804 			dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
2805 			dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
2806 			dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
2807 			dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
2808 			/*
2809 			 * j iterates inside pipes array, unlike i which iterates inside
2810 			 * pipe_ctx array
2811 			 */
2812 			if (src->is_hsplit)
2813 				for (j = pipe_idx + 1; j < pipe_cnt; j++) {
2814 					display_pipe_source_params_st *src_j = &pipes[j].pipe.src;
2815 					display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest;
2816 
2817 					if (src_j->is_hsplit && !visited[j]
2818 							&& src->hsplit_grp == src_j->hsplit_grp) {
2819 						dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit];
2820 						dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit];
2821 						dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit];
2822 						dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit];
2823 						visited[j] = true;
2824 					}
2825 				}
2826 			visited[pipe_idx] = true;
2827 			pipe_idx_unsplit++;
2828 		}
2829 		pipe_idx++;
2830 	}
2831 
2832 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2833 		if (!context->res_ctx.pipe_ctx[i].stream)
2834 			continue;
2835 		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
2836 			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
2837 		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
2838 						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
2839 		ASSERT(visited[pipe_idx]);
2840 		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
2841 		pipe_idx++;
2842 	}
2843 	/*save a original dppclock copy*/
2844 	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
2845 	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
2846 	context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
2847 	context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
2848 
2849 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2850 		bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
2851 
2852 		if (!context->res_ctx.pipe_ctx[i].stream)
2853 			continue;
2854 
2855 		context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
2856 				&context->res_ctx.pipe_ctx[i].dlg_regs,
2857 				&context->res_ctx.pipe_ctx[i].ttu_regs,
2858 				pipes,
2859 				pipe_cnt,
2860 				pipe_idx,
2861 				cstate_en,
2862 				context->bw_ctx.bw.dcn.clk.p_state_change_support,
2863 				false, false, false);
2864 
2865 		context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
2866 				&context->res_ctx.pipe_ctx[i].rq_regs,
2867 				pipes[pipe_idx].pipe);
2868 		pipe_idx++;
2869 	}
2870 }
2871 
2872 static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context,
2873 		bool fast_validate)
2874 {
2875 	bool out = false;
2876 
2877 	BW_VAL_TRACE_SETUP();
2878 
2879 	int vlevel = 0;
2880 	int pipe_split_from[MAX_PIPES];
2881 	int pipe_cnt = 0;
2882 	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
2883 	DC_LOGGER_INIT(dc->ctx->logger);
2884 
2885 	BW_VAL_TRACE_COUNT();
2886 
2887 	out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel);
2888 
2889 	if (pipe_cnt == 0)
2890 		goto validate_out;
2891 
2892 	if (!out)
2893 		goto validate_fail;
2894 
2895 	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
2896 
2897 	if (fast_validate) {
2898 		BW_VAL_TRACE_SKIP(fast);
2899 		goto validate_out;
2900 	}
2901 
2902 	dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel);
2903 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
2904 
2905 	BW_VAL_TRACE_END_WATERMARKS();
2906 
2907 	goto validate_out;
2908 
2909 validate_fail:
2910 	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
2911 		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
2912 
2913 	BW_VAL_TRACE_SKIP(fail);
2914 	out = false;
2915 
2916 validate_out:
2917 	kfree(pipes);
2918 
2919 	BW_VAL_TRACE_FINISH();
2920 
2921 	return out;
2922 }
2923 
2924 
2925 bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
2926 		bool fast_validate)
2927 {
2928 	bool voltage_supported = false;
2929 	bool full_pstate_supported = false;
2930 	bool dummy_pstate_supported = false;
2931 	double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
2932 
2933 	if (fast_validate)
2934 		return dcn20_validate_bandwidth_internal(dc, context, true);
2935 
2936 
2937 	// Best case, we support full UCLK switch latency
2938 	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
2939 	full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2940 
2941 	if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
2942 		(voltage_supported && full_pstate_supported)) {
2943 		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
2944 		goto restore_dml_state;
2945 	}
2946 
2947 	// Fallback: Try to only support G6 temperature read latency
2948 	context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
2949 
2950 	voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
2951 	dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
2952 
2953 	if (voltage_supported && dummy_pstate_supported) {
2954 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
2955 		goto restore_dml_state;
2956 	}
2957 
2958 	// ERROR: fallback is supposed to always work.
2959 	ASSERT(false);
2960 
2961 restore_dml_state:
2962 	context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
2963 
2964 	return voltage_supported;
2965 }
2966 
2967 struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
2968 		struct dc_state *state,
2969 		const struct resource_pool *pool,
2970 		struct dc_stream_state *stream)
2971 {
2972 	struct resource_context *res_ctx = &state->res_ctx;
2973 	struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
2974 	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
2975 
2976 	if (!head_pipe)
2977 		ASSERT(0);
2978 
2979 	if (!idle_pipe)
2980 		return NULL;
2981 
2982 	idle_pipe->stream = head_pipe->stream;
2983 	idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
2984 	idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
2985 
2986 	idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
2987 	idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
2988 	idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
2989 	idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
2990 
2991 	return idle_pipe;
2992 }
2993 
2994 bool dcn20_get_dcc_compression_cap(const struct dc *dc,
2995 		const struct dc_dcc_surface_param *input,
2996 		struct dc_surface_dcc_cap *output)
2997 {
2998 	return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
2999 			dc->res_pool->hubbub,
3000 			input,
3001 			output);
3002 }
3003 
3004 static void dcn20_destroy_resource_pool(struct resource_pool **pool)
3005 {
3006 	struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
3007 
3008 	destruct(dcn20_pool);
3009 	kfree(dcn20_pool);
3010 	*pool = NULL;
3011 }
3012 
3013 
3014 static struct dc_cap_funcs cap_funcs = {
3015 	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
3016 };
3017 
3018 
3019 enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
3020 {
3021 	enum dc_status result = DC_OK;
3022 
3023 	enum surface_pixel_format surf_pix_format = plane_state->format;
3024 	unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
3025 
3026 	enum swizzle_mode_values swizzle = DC_SW_LINEAR;
3027 
3028 	if (bpp == 64)
3029 		swizzle = DC_SW_64KB_D;
3030 	else
3031 		swizzle = DC_SW_64KB_S;
3032 
3033 	plane_state->tiling_info.gfx9.swizzle = swizzle;
3034 	return result;
3035 }
3036 
3037 static struct resource_funcs dcn20_res_pool_funcs = {
3038 	.destroy = dcn20_destroy_resource_pool,
3039 	.link_enc_create = dcn20_link_encoder_create,
3040 	.validate_bandwidth = dcn20_validate_bandwidth,
3041 	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
3042 	.add_stream_to_ctx = dcn20_add_stream_to_ctx,
3043 	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
3044 	.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
3045 	.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
3046 	.set_mcif_arb_params = dcn20_set_mcif_arb_params,
3047 	.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
3048 	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
3049 };
3050 
3051 bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
3052 {
3053 	int i;
3054 	uint32_t pipe_count = pool->res_cap->num_dwb;
3055 
3056 	for (i = 0; i < pipe_count; i++) {
3057 		struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
3058 						    GFP_KERNEL);
3059 
3060 		if (!dwbc20) {
3061 			dm_error("DC: failed to create dwbc20!\n");
3062 			return false;
3063 		}
3064 		dcn20_dwbc_construct(dwbc20, ctx,
3065 				&dwbc20_regs[i],
3066 				&dwbc20_shift,
3067 				&dwbc20_mask,
3068 				i);
3069 		pool->dwbc[i] = &dwbc20->base;
3070 	}
3071 	return true;
3072 }
3073 
3074 bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
3075 {
3076 	int i;
3077 	uint32_t pipe_count = pool->res_cap->num_dwb;
3078 
3079 	ASSERT(pipe_count > 0);
3080 
3081 	for (i = 0; i < pipe_count; i++) {
3082 		struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
3083 						    GFP_KERNEL);
3084 
3085 		if (!mcif_wb20) {
3086 			dm_error("DC: failed to create mcif_wb20!\n");
3087 			return false;
3088 		}
3089 
3090 		dcn20_mmhubbub_construct(mcif_wb20, ctx,
3091 				&mcif_wb20_regs[i],
3092 				&mcif_wb20_shift,
3093 				&mcif_wb20_mask,
3094 				i);
3095 
3096 		pool->mcif_wb[i] = &mcif_wb20->base;
3097 	}
3098 	return true;
3099 }
3100 
3101 static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
3102 {
3103 	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
3104 
3105 	if (!pp_smu)
3106 		return pp_smu;
3107 
3108 	dm_pp_get_funcs(ctx, pp_smu);
3109 
3110 	if (pp_smu->ctx.ver != PP_SMU_VER_NV)
3111 		pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
3112 
3113 	return pp_smu;
3114 }
3115 
3116 static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
3117 {
3118 	if (pp_smu && *pp_smu) {
3119 		kfree(*pp_smu);
3120 		*pp_smu = NULL;
3121 	}
3122 }
3123 
3124 void dcn20_cap_soc_clocks(
3125 		struct _vcs_dpi_soc_bounding_box_st *bb,
3126 		struct pp_smu_nv_clock_table max_clocks)
3127 {
3128 	int i;
3129 
3130 	// First pass - cap all clocks higher than the reported max
3131 	for (i = 0; i < bb->num_states; i++) {
3132 		if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
3133 				&& max_clocks.dcfClockInKhz != 0)
3134 			bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
3135 
3136 		if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
3137 						&& max_clocks.uClockInKhz != 0)
3138 			bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
3139 
3140 		if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
3141 						&& max_clocks.fabricClockInKhz != 0)
3142 			bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
3143 
3144 		if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
3145 						&& max_clocks.displayClockInKhz != 0)
3146 			bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
3147 
3148 		if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
3149 						&& max_clocks.dppClockInKhz != 0)
3150 			bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
3151 
3152 		if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
3153 						&& max_clocks.phyClockInKhz != 0)
3154 			bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
3155 
3156 		if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
3157 						&& max_clocks.socClockInKhz != 0)
3158 			bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
3159 
3160 		if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
3161 						&& max_clocks.dscClockInKhz != 0)
3162 			bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
3163 	}
3164 
3165 	// Second pass - remove all duplicate clock states
3166 	for (i = bb->num_states - 1; i > 1; i--) {
3167 		bool duplicate = true;
3168 
3169 		if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
3170 			duplicate = false;
3171 		if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
3172 			duplicate = false;
3173 		if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
3174 			duplicate = false;
3175 		if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
3176 			duplicate = false;
3177 		if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
3178 			duplicate = false;
3179 		if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
3180 			duplicate = false;
3181 		if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
3182 			duplicate = false;
3183 		if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
3184 			duplicate = false;
3185 
3186 		if (duplicate)
3187 			bb->num_states--;
3188 	}
3189 }
3190 
3191 void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
3192 		struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
3193 {
3194 	struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
3195 	int i;
3196 	int num_calculated_states = 0;
3197 	int min_dcfclk = 0;
3198 
3199 	if (num_states == 0)
3200 		return;
3201 
3202 	memset(calculated_states, 0, sizeof(calculated_states));
3203 
3204 	if (dc->bb_overrides.min_dcfclk_mhz > 0)
3205 		min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
3206 	else {
3207 		if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
3208 			min_dcfclk = 310;
3209 		else
3210 			// Accounting for SOC/DCF relationship, we can go as high as
3211 			// 506Mhz in Vmin.
3212 			min_dcfclk = 506;
3213 	}
3214 
3215 	for (i = 0; i < num_states; i++) {
3216 		int min_fclk_required_by_uclk;
3217 		calculated_states[i].state = i;
3218 		calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
3219 
3220 		// FCLK:UCLK ratio is 1.08
3221 		min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
3222 
3223 		calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
3224 				min_dcfclk : min_fclk_required_by_uclk;
3225 
3226 		calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
3227 				max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
3228 
3229 		calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
3230 				max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
3231 
3232 		calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
3233 		calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
3234 		calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
3235 
3236 		calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
3237 
3238 		num_calculated_states++;
3239 	}
3240 
3241 	calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000;
3242 	calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000;
3243 	calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000;
3244 
3245 	memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
3246 	bb->num_states = num_calculated_states;
3247 
3248 	// Duplicate the last state, DML always an extra state identical to max state to work
3249 	memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
3250 	bb->clock_limits[num_calculated_states].state = bb->num_states;
3251 }
3252 
3253 void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
3254 {
3255 	kernel_fpu_begin();
3256 	if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
3257 			&& dc->bb_overrides.sr_exit_time_ns) {
3258 		bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
3259 	}
3260 
3261 	if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
3262 				!= dc->bb_overrides.sr_enter_plus_exit_time_ns
3263 			&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
3264 		bb->sr_enter_plus_exit_time_us =
3265 				dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
3266 	}
3267 
3268 	if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
3269 			&& dc->bb_overrides.urgent_latency_ns) {
3270 		bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
3271 	}
3272 
3273 	if ((int)(bb->dram_clock_change_latency_us * 1000)
3274 				!= dc->bb_overrides.dram_clock_change_latency_ns
3275 			&& dc->bb_overrides.dram_clock_change_latency_ns) {
3276 		bb->dram_clock_change_latency_us =
3277 				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
3278 	}
3279 	kernel_fpu_end();
3280 }
3281 
3282 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
3283 	uint32_t hw_internal_rev)
3284 {
3285 	if (ASICREV_IS_NAVI12_P(hw_internal_rev))
3286 		return &dcn2_0_nv12_soc;
3287 
3288 	return &dcn2_0_soc;
3289 }
3290 
3291 static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
3292 	uint32_t hw_internal_rev)
3293 {
3294 	/* NV14 */
3295 	if (ASICREV_IS_NAVI14_M(hw_internal_rev))
3296 		return &dcn2_0_nv14_ip;
3297 
3298 	/* NV12 and NV10 */
3299 	return &dcn2_0_ip;
3300 }
3301 
3302 static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
3303 {
3304 	return DML_PROJECT_NAVI10v2;
3305 }
3306 
3307 #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
3308 #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
3309 
3310 static bool init_soc_bounding_box(struct dc *dc,
3311 				  struct dcn20_resource_pool *pool)
3312 {
3313 	const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
3314 	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
3315 			get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
3316 	struct _vcs_dpi_ip_params_st *loaded_ip =
3317 			get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
3318 
3319 	DC_LOGGER_INIT(dc->ctx->logger);
3320 
3321 	if (!bb && !SOC_BOUNDING_BOX_VALID) {
3322 		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
3323 		return false;
3324 	}
3325 
3326 	if (bb && !SOC_BOUNDING_BOX_VALID) {
3327 		int i;
3328 
3329 		dcn2_0_nv12_soc.sr_exit_time_us =
3330 				fixed16_to_double_to_cpu(bb->sr_exit_time_us);
3331 		dcn2_0_nv12_soc.sr_enter_plus_exit_time_us =
3332 				fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
3333 		dcn2_0_nv12_soc.urgent_latency_us =
3334 				fixed16_to_double_to_cpu(bb->urgent_latency_us);
3335 		dcn2_0_nv12_soc.urgent_latency_pixel_data_only_us =
3336 				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
3337 		dcn2_0_nv12_soc.urgent_latency_pixel_mixed_with_vm_data_us =
3338 				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
3339 		dcn2_0_nv12_soc.urgent_latency_vm_data_only_us =
3340 				fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
3341 		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
3342 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
3343 		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
3344 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
3345 		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
3346 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
3347 		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
3348 				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
3349 		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
3350 				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
3351 		dcn2_0_nv12_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
3352 				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
3353 		dcn2_0_nv12_soc.max_avg_sdp_bw_use_normal_percent =
3354 				fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
3355 		dcn2_0_nv12_soc.max_avg_dram_bw_use_normal_percent =
3356 				fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
3357 		dcn2_0_nv12_soc.writeback_latency_us =
3358 				fixed16_to_double_to_cpu(bb->writeback_latency_us);
3359 		dcn2_0_nv12_soc.ideal_dram_bw_after_urgent_percent =
3360 				fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
3361 		dcn2_0_nv12_soc.max_request_size_bytes =
3362 				le32_to_cpu(bb->max_request_size_bytes);
3363 		dcn2_0_nv12_soc.dram_channel_width_bytes =
3364 				le32_to_cpu(bb->dram_channel_width_bytes);
3365 		dcn2_0_nv12_soc.fabric_datapath_to_dcn_data_return_bytes =
3366 				le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
3367 		dcn2_0_nv12_soc.dcn_downspread_percent =
3368 				fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
3369 		dcn2_0_nv12_soc.downspread_percent =
3370 				fixed16_to_double_to_cpu(bb->downspread_percent);
3371 		dcn2_0_nv12_soc.dram_page_open_time_ns =
3372 				fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
3373 		dcn2_0_nv12_soc.dram_rw_turnaround_time_ns =
3374 				fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
3375 		dcn2_0_nv12_soc.dram_return_buffer_per_channel_bytes =
3376 				le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
3377 		dcn2_0_nv12_soc.round_trip_ping_latency_dcfclk_cycles =
3378 				le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
3379 		dcn2_0_nv12_soc.urgent_out_of_order_return_per_channel_bytes =
3380 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
3381 		dcn2_0_nv12_soc.channel_interleave_bytes =
3382 				le32_to_cpu(bb->channel_interleave_bytes);
3383 		dcn2_0_nv12_soc.num_banks =
3384 				le32_to_cpu(bb->num_banks);
3385 		dcn2_0_nv12_soc.num_chans =
3386 				le32_to_cpu(bb->num_chans);
3387 		dcn2_0_nv12_soc.vmm_page_size_bytes =
3388 				le32_to_cpu(bb->vmm_page_size_bytes);
3389 		dcn2_0_nv12_soc.dram_clock_change_latency_us =
3390 				fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
3391 		// HACK!! Lower uclock latency switch time so we don't switch
3392 		dcn2_0_nv12_soc.dram_clock_change_latency_us = 10;
3393 		dcn2_0_nv12_soc.writeback_dram_clock_change_latency_us =
3394 				fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
3395 		dcn2_0_nv12_soc.return_bus_width_bytes =
3396 				le32_to_cpu(bb->return_bus_width_bytes);
3397 		dcn2_0_nv12_soc.dispclk_dppclk_vco_speed_mhz =
3398 				le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
3399 		dcn2_0_nv12_soc.xfc_bus_transport_time_us =
3400 				le32_to_cpu(bb->xfc_bus_transport_time_us);
3401 		dcn2_0_nv12_soc.xfc_xbuf_latency_tolerance_us =
3402 				le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
3403 		dcn2_0_nv12_soc.use_urgent_burst_bw =
3404 				le32_to_cpu(bb->use_urgent_burst_bw);
3405 		dcn2_0_nv12_soc.num_states =
3406 				le32_to_cpu(bb->num_states);
3407 
3408 		for (i = 0; i < dcn2_0_nv12_soc.num_states; i++) {
3409 			dcn2_0_nv12_soc.clock_limits[i].state =
3410 					le32_to_cpu(bb->clock_limits[i].state);
3411 			dcn2_0_nv12_soc.clock_limits[i].dcfclk_mhz =
3412 					fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
3413 			dcn2_0_nv12_soc.clock_limits[i].fabricclk_mhz =
3414 					fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
3415 			dcn2_0_nv12_soc.clock_limits[i].dispclk_mhz =
3416 					fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
3417 			dcn2_0_nv12_soc.clock_limits[i].dppclk_mhz =
3418 					fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
3419 			dcn2_0_nv12_soc.clock_limits[i].phyclk_mhz =
3420 					fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
3421 			dcn2_0_nv12_soc.clock_limits[i].socclk_mhz =
3422 					fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
3423 			dcn2_0_nv12_soc.clock_limits[i].dscclk_mhz =
3424 					fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
3425 			dcn2_0_nv12_soc.clock_limits[i].dram_speed_mts =
3426 					fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
3427 		}
3428 	}
3429 
3430 	if (pool->base.pp_smu) {
3431 		struct pp_smu_nv_clock_table max_clocks = {0};
3432 		unsigned int uclk_states[8] = {0};
3433 		unsigned int num_states = 0;
3434 		enum pp_smu_status status;
3435 		bool clock_limits_available = false;
3436 		bool uclk_states_available = false;
3437 
3438 		if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
3439 			status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
3440 				(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
3441 
3442 			uclk_states_available = (status == PP_SMU_RESULT_OK);
3443 		}
3444 
3445 		if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
3446 			status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
3447 					(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
3448 			/* SMU cannot set DCF clock to anything equal to or higher than SOC clock
3449 			 */
3450 			if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
3451 				max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
3452 			clock_limits_available = (status == PP_SMU_RESULT_OK);
3453 		}
3454 
3455 		if (clock_limits_available && uclk_states_available && num_states)
3456 			dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
3457 		else if (clock_limits_available)
3458 			dcn20_cap_soc_clocks(loaded_bb, max_clocks);
3459 	}
3460 
3461 	loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
3462 	loaded_ip->max_num_dpp = pool->base.pipe_count;
3463 	dcn20_patch_bounding_box(dc, loaded_bb);
3464 
3465 	return true;
3466 }
3467 
3468 static bool construct(
3469 	uint8_t num_virtual_links,
3470 	struct dc *dc,
3471 	struct dcn20_resource_pool *pool)
3472 {
3473 	int i;
3474 	struct dc_context *ctx = dc->ctx;
3475 	struct irq_service_init_data init_data;
3476 	struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
3477 			get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
3478 	struct _vcs_dpi_ip_params_st *loaded_ip =
3479 			get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
3480 	enum dml_project dml_project_version =
3481 			get_dml_project_version(ctx->asic_id.hw_internal_rev);
3482 
3483 	ctx->dc_bios->regs = &bios_regs;
3484 	pool->base.funcs = &dcn20_res_pool_funcs;
3485 
3486 	if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
3487 		pool->base.res_cap = &res_cap_nv14;
3488 		pool->base.pipe_count = 5;
3489 		pool->base.mpcc_count = 5;
3490 	} else {
3491 		pool->base.res_cap = &res_cap_nv10;
3492 		pool->base.pipe_count = 6;
3493 		pool->base.mpcc_count = 6;
3494 	}
3495 	/*************************************************
3496 	 *  Resource + asic cap harcoding                *
3497 	 *************************************************/
3498 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
3499 
3500 	dc->caps.max_downscale_ratio = 200;
3501 	dc->caps.i2c_speed_in_khz = 100;
3502 	dc->caps.max_cursor_size = 256;
3503 	dc->caps.dmdata_alloc_size = 2048;
3504 
3505 	dc->caps.max_slave_planes = 1;
3506 	dc->caps.post_blend_color_processing = true;
3507 	dc->caps.force_dp_tps4_for_cp2520 = true;
3508 	dc->caps.hw_3d_lut = true;
3509 	dc->caps.extended_aux_timeout_support = true;
3510 
3511 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
3512 		dc->debug = debug_defaults_drv;
3513 	} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
3514 		pool->base.pipe_count = 4;
3515 		pool->base.mpcc_count = pool->base.pipe_count;
3516 		dc->debug = debug_defaults_diags;
3517 	} else {
3518 		dc->debug = debug_defaults_diags;
3519 	}
3520 	//dcn2.0x
3521 	dc->work_arounds.dedcn20_305_wa = true;
3522 
3523 	// Init the vm_helper
3524 	if (dc->vm_helper)
3525 		vm_helper_init(dc->vm_helper, 16);
3526 
3527 	/*************************************************
3528 	 *  Create resources                             *
3529 	 *************************************************/
3530 
3531 	pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
3532 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3533 				CLOCK_SOURCE_COMBO_PHY_PLL0,
3534 				&clk_src_regs[0], false);
3535 	pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
3536 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3537 				CLOCK_SOURCE_COMBO_PHY_PLL1,
3538 				&clk_src_regs[1], false);
3539 	pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
3540 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3541 				CLOCK_SOURCE_COMBO_PHY_PLL2,
3542 				&clk_src_regs[2], false);
3543 	pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
3544 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3545 				CLOCK_SOURCE_COMBO_PHY_PLL3,
3546 				&clk_src_regs[3], false);
3547 	pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
3548 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3549 				CLOCK_SOURCE_COMBO_PHY_PLL4,
3550 				&clk_src_regs[4], false);
3551 	pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
3552 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3553 				CLOCK_SOURCE_COMBO_PHY_PLL5,
3554 				&clk_src_regs[5], false);
3555 	pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
3556 	/* todo: not reuse phy_pll registers */
3557 	pool->base.dp_clock_source =
3558 			dcn20_clock_source_create(ctx, ctx->dc_bios,
3559 				CLOCK_SOURCE_ID_DP_DTO,
3560 				&clk_src_regs[0], true);
3561 
3562 	for (i = 0; i < pool->base.clk_src_count; i++) {
3563 		if (pool->base.clock_sources[i] == NULL) {
3564 			dm_error("DC: failed to create clock sources!\n");
3565 			BREAK_TO_DEBUGGER();
3566 			goto create_fail;
3567 		}
3568 	}
3569 
3570 	pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
3571 	if (pool->base.dccg == NULL) {
3572 		dm_error("DC: failed to create dccg!\n");
3573 		BREAK_TO_DEBUGGER();
3574 		goto create_fail;
3575 	}
3576 
3577 	pool->base.dmcu = dcn20_dmcu_create(ctx,
3578 			&dmcu_regs,
3579 			&dmcu_shift,
3580 			&dmcu_mask);
3581 	if (pool->base.dmcu == NULL) {
3582 		dm_error("DC: failed to create dmcu!\n");
3583 		BREAK_TO_DEBUGGER();
3584 		goto create_fail;
3585 	}
3586 
3587 	pool->base.abm = dce_abm_create(ctx,
3588 			&abm_regs,
3589 			&abm_shift,
3590 			&abm_mask);
3591 	if (pool->base.abm == NULL) {
3592 		dm_error("DC: failed to create abm!\n");
3593 		BREAK_TO_DEBUGGER();
3594 		goto create_fail;
3595 	}
3596 
3597 	pool->base.pp_smu = dcn20_pp_smu_create(ctx);
3598 
3599 
3600 	if (!init_soc_bounding_box(dc, pool)) {
3601 		dm_error("DC: failed to initialize soc bounding box!\n");
3602 		BREAK_TO_DEBUGGER();
3603 		goto create_fail;
3604 	}
3605 
3606 	dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
3607 
3608 	if (!dc->debug.disable_pplib_wm_range) {
3609 		struct pp_smu_wm_range_sets ranges = {0};
3610 		int i = 0;
3611 
3612 		ranges.num_reader_wm_sets = 0;
3613 
3614 		if (loaded_bb->num_states == 1) {
3615 			ranges.reader_wm_sets[0].wm_inst = i;
3616 			ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3617 			ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3618 			ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3619 			ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3620 
3621 			ranges.num_reader_wm_sets = 1;
3622 		} else if (loaded_bb->num_states > 1) {
3623 			for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
3624 				ranges.reader_wm_sets[i].wm_inst = i;
3625 				ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3626 				ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3627 				ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
3628 				ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
3629 
3630 				ranges.num_reader_wm_sets = i + 1;
3631 			}
3632 
3633 			ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3634 			ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3635 		}
3636 
3637 		ranges.num_writer_wm_sets = 1;
3638 
3639 		ranges.writer_wm_sets[0].wm_inst = 0;
3640 		ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3641 		ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3642 		ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3643 		ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3644 
3645 		/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
3646 		if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
3647 			pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
3648 	}
3649 
3650 	init_data.ctx = dc->ctx;
3651 	pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
3652 	if (!pool->base.irqs)
3653 		goto create_fail;
3654 
3655 	/* mem input -> ipp -> dpp -> opp -> TG */
3656 	for (i = 0; i < pool->base.pipe_count; i++) {
3657 		pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
3658 		if (pool->base.hubps[i] == NULL) {
3659 			BREAK_TO_DEBUGGER();
3660 			dm_error(
3661 				"DC: failed to create memory input!\n");
3662 			goto create_fail;
3663 		}
3664 
3665 		pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
3666 		if (pool->base.ipps[i] == NULL) {
3667 			BREAK_TO_DEBUGGER();
3668 			dm_error(
3669 				"DC: failed to create input pixel processor!\n");
3670 			goto create_fail;
3671 		}
3672 
3673 		pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
3674 		if (pool->base.dpps[i] == NULL) {
3675 			BREAK_TO_DEBUGGER();
3676 			dm_error(
3677 				"DC: failed to create dpps!\n");
3678 			goto create_fail;
3679 		}
3680 	}
3681 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
3682 		pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
3683 		if (pool->base.engines[i] == NULL) {
3684 			BREAK_TO_DEBUGGER();
3685 			dm_error(
3686 				"DC:failed to create aux engine!!\n");
3687 			goto create_fail;
3688 		}
3689 		pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
3690 		if (pool->base.hw_i2cs[i] == NULL) {
3691 			BREAK_TO_DEBUGGER();
3692 			dm_error(
3693 				"DC:failed to create hw i2c!!\n");
3694 			goto create_fail;
3695 		}
3696 		pool->base.sw_i2cs[i] = NULL;
3697 	}
3698 
3699 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
3700 		pool->base.opps[i] = dcn20_opp_create(ctx, i);
3701 		if (pool->base.opps[i] == NULL) {
3702 			BREAK_TO_DEBUGGER();
3703 			dm_error(
3704 				"DC: failed to create output pixel processor!\n");
3705 			goto create_fail;
3706 		}
3707 	}
3708 
3709 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
3710 		pool->base.timing_generators[i] = dcn20_timing_generator_create(
3711 				ctx, i);
3712 		if (pool->base.timing_generators[i] == NULL) {
3713 			BREAK_TO_DEBUGGER();
3714 			dm_error("DC: failed to create tg!\n");
3715 			goto create_fail;
3716 		}
3717 	}
3718 
3719 	pool->base.timing_generator_count = i;
3720 
3721 	pool->base.mpc = dcn20_mpc_create(ctx);
3722 	if (pool->base.mpc == NULL) {
3723 		BREAK_TO_DEBUGGER();
3724 		dm_error("DC: failed to create mpc!\n");
3725 		goto create_fail;
3726 	}
3727 
3728 	pool->base.hubbub = dcn20_hubbub_create(ctx);
3729 	if (pool->base.hubbub == NULL) {
3730 		BREAK_TO_DEBUGGER();
3731 		dm_error("DC: failed to create hubbub!\n");
3732 		goto create_fail;
3733 	}
3734 
3735 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3736 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
3737 		pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
3738 		if (pool->base.dscs[i] == NULL) {
3739 			BREAK_TO_DEBUGGER();
3740 			dm_error("DC: failed to create display stream compressor %d!\n", i);
3741 			goto create_fail;
3742 		}
3743 	}
3744 #endif
3745 
3746 	if (!dcn20_dwbc_create(ctx, &pool->base)) {
3747 		BREAK_TO_DEBUGGER();
3748 		dm_error("DC: failed to create dwbc!\n");
3749 		goto create_fail;
3750 	}
3751 	if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
3752 		BREAK_TO_DEBUGGER();
3753 		dm_error("DC: failed to create mcif_wb!\n");
3754 		goto create_fail;
3755 	}
3756 
3757 	if (!resource_construct(num_virtual_links, dc, &pool->base,
3758 			(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
3759 			&res_create_funcs : &res_create_maximus_funcs)))
3760 			goto create_fail;
3761 
3762 	dcn20_hw_sequencer_construct(dc);
3763 
3764 	dc->caps.max_planes =  pool->base.pipe_count;
3765 
3766 	for (i = 0; i < dc->caps.max_planes; ++i)
3767 		dc->caps.planes[i] = plane_cap;
3768 
3769 	dc->cap_funcs = cap_funcs;
3770 
3771 	return true;
3772 
3773 create_fail:
3774 
3775 	destruct(pool);
3776 
3777 	return false;
3778 }
3779 
3780 struct resource_pool *dcn20_create_resource_pool(
3781 		const struct dc_init_data *init_data,
3782 		struct dc *dc)
3783 {
3784 	struct dcn20_resource_pool *pool =
3785 		kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
3786 
3787 	if (!pool)
3788 		return NULL;
3789 
3790 	if (construct(init_data->num_virtual_links, dc, pool))
3791 		return &pool->base;
3792 
3793 	BREAK_TO_DEBUGGER();
3794 	kfree(pool);
3795 	return NULL;
3796 }
3797