1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/slab.h>
27 
28 #include "dm_services.h"
29 #include "dc.h"
30 
31 #include "resource.h"
32 #include "include/irq_service_interface.h"
33 #include "dcn20/dcn20_resource.h"
34 
35 #include "dcn10/dcn10_hubp.h"
36 #include "dcn10/dcn10_ipp.h"
37 #include "dcn20_hubbub.h"
38 #include "dcn20_mpc.h"
39 #include "dcn20_hubp.h"
40 #include "irq/dcn20/irq_service_dcn20.h"
41 #include "dcn20_dpp.h"
42 #include "dcn20_optc.h"
43 #include "dcn20_hwseq.h"
44 #include "dce110/dce110_hw_sequencer.h"
45 #include "dcn10/dcn10_resource.h"
46 #include "dcn20_opp.h"
47 
48 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
49 #include "dcn20_dsc.h"
50 #endif
51 
52 #include "dcn20_link_encoder.h"
53 #include "dcn20_stream_encoder.h"
54 #include "dce/dce_clock_source.h"
55 #include "dce/dce_audio.h"
56 #include "dce/dce_hwseq.h"
57 #include "virtual/virtual_stream_encoder.h"
58 #include "dce110/dce110_resource.h"
59 #include "dml/display_mode_vba.h"
60 #include "dcn20_dccg.h"
61 #include "dcn20_vmid.h"
62 
63 #include "navi10_ip_offset.h"
64 
65 #include "dcn/dcn_2_0_0_offset.h"
66 #include "dcn/dcn_2_0_0_sh_mask.h"
67 
68 #include "nbio/nbio_2_3_offset.h"
69 
70 #include "dcn20/dcn20_dwb.h"
71 #include "dcn20/dcn20_mmhubbub.h"
72 
73 #include "mmhub/mmhub_2_0_0_offset.h"
74 #include "mmhub/mmhub_2_0_0_sh_mask.h"
75 
76 #include "reg_helper.h"
77 #include "dce/dce_abm.h"
78 #include "dce/dce_dmcu.h"
79 #include "dce/dce_aux.h"
80 #include "dce/dce_i2c.h"
81 #include "vm_helper.h"
82 
83 #include "amdgpu_socbb.h"
84 
85 #define SOC_BOUNDING_BOX_VALID false
86 #define DC_LOGGER_INIT(logger)
87 
88 struct _vcs_dpi_ip_params_st dcn2_0_ip = {
89 	.odm_capable = 1,
90 	.gpuvm_enable = 0,
91 	.hostvm_enable = 0,
92 	.gpuvm_max_page_table_levels = 4,
93 	.hostvm_max_page_table_levels = 4,
94 	.hostvm_cached_page_table_levels = 0,
95 	.pte_group_size_bytes = 2048,
96 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
97 	.num_dsc = 6,
98 #else
99 	.num_dsc = 0,
100 #endif
101 	.rob_buffer_size_kbytes = 168,
102 	.det_buffer_size_kbytes = 164,
103 	.dpte_buffer_size_in_pte_reqs_luma = 84,
104 	.pde_proc_buffer_size_64k_reqs = 48,
105 	.dpp_output_buffer_pixels = 2560,
106 	.opp_output_buffer_lines = 1,
107 	.pixel_chunk_size_kbytes = 8,
108 	.pte_chunk_size_kbytes = 2,
109 	.meta_chunk_size_kbytes = 2,
110 	.writeback_chunk_size_kbytes = 2,
111 	.line_buffer_size_bits = 789504,
112 	.is_line_buffer_bpp_fixed = 0,
113 	.line_buffer_fixed_bpp = 0,
114 	.dcc_supported = true,
115 	.max_line_buffer_lines = 12,
116 	.writeback_luma_buffer_size_kbytes = 12,
117 	.writeback_chroma_buffer_size_kbytes = 8,
118 	.writeback_chroma_line_buffer_width_pixels = 4,
119 	.writeback_max_hscl_ratio = 1,
120 	.writeback_max_vscl_ratio = 1,
121 	.writeback_min_hscl_ratio = 1,
122 	.writeback_min_vscl_ratio = 1,
123 	.writeback_max_hscl_taps = 12,
124 	.writeback_max_vscl_taps = 12,
125 	.writeback_line_buffer_luma_buffer_size = 0,
126 	.writeback_line_buffer_chroma_buffer_size = 14643,
127 	.cursor_buffer_size = 8,
128 	.cursor_chunk_size = 2,
129 	.max_num_otg = 6,
130 	.max_num_dpp = 6,
131 	.max_num_wb = 1,
132 	.max_dchub_pscl_bw_pix_per_clk = 4,
133 	.max_pscl_lb_bw_pix_per_clk = 2,
134 	.max_lb_vscl_bw_pix_per_clk = 4,
135 	.max_vscl_hscl_bw_pix_per_clk = 4,
136 	.max_hscl_ratio = 8,
137 	.max_vscl_ratio = 8,
138 	.hscl_mults = 4,
139 	.vscl_mults = 4,
140 	.max_hscl_taps = 8,
141 	.max_vscl_taps = 8,
142 	.dispclk_ramp_margin_percent = 1,
143 	.underscan_factor = 1.10,
144 	.min_vblank_lines = 32, //
145 	.dppclk_delay_subtotal = 77, //
146 	.dppclk_delay_scl_lb_only = 16,
147 	.dppclk_delay_scl = 50,
148 	.dppclk_delay_cnvc_formatter = 8,
149 	.dppclk_delay_cnvc_cursor = 6,
150 	.dispclk_delay_subtotal = 87, //
151 	.dcfclk_cstate_latency = 10, // SRExitTime
152 	.max_inter_dcn_tile_repeaters = 8,
153 
154 	.xfc_supported = true,
155 	.xfc_fill_bw_overhead_percent = 10.0,
156 	.xfc_fill_constant_bytes = 0,
157 };
158 
159 struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { 0 };
160 
161 
162 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
163 	#define mmDP0_DP_DPHY_INTERNAL_CTRL		0x210f
164 	#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
165 	#define mmDP1_DP_DPHY_INTERNAL_CTRL		0x220f
166 	#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
167 	#define mmDP2_DP_DPHY_INTERNAL_CTRL		0x230f
168 	#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
169 	#define mmDP3_DP_DPHY_INTERNAL_CTRL		0x240f
170 	#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
171 	#define mmDP4_DP_DPHY_INTERNAL_CTRL		0x250f
172 	#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
173 	#define mmDP5_DP_DPHY_INTERNAL_CTRL		0x260f
174 	#define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
175 	#define mmDP6_DP_DPHY_INTERNAL_CTRL		0x270f
176 	#define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
177 #endif
178 
179 
180 enum dcn20_clk_src_array_id {
181 	DCN20_CLK_SRC_PLL0,
182 	DCN20_CLK_SRC_PLL1,
183 	DCN20_CLK_SRC_PLL2,
184 	DCN20_CLK_SRC_PLL3,
185 	DCN20_CLK_SRC_PLL4,
186 	DCN20_CLK_SRC_PLL5,
187 	DCN20_CLK_SRC_TOTAL
188 };
189 
190 /* begin *********************
191  * macros to expend register list macro defined in HW object header file */
192 
193 /* DCN */
194 /* TODO awful hack. fixup dcn20_dwb.h */
195 #undef BASE_INNER
196 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
197 
198 #define BASE(seg) BASE_INNER(seg)
199 
200 #define SR(reg_name)\
201 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
202 					mm ## reg_name
203 
204 #define SRI(reg_name, block, id)\
205 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
206 					mm ## block ## id ## _ ## reg_name
207 
208 #define SRIR(var_name, reg_name, block, id)\
209 	.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
210 					mm ## block ## id ## _ ## reg_name
211 
212 #define SRII(reg_name, block, id)\
213 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
214 					mm ## block ## id ## _ ## reg_name
215 
216 #define DCCG_SRII(reg_name, block, id)\
217 	.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
218 					mm ## block ## id ## _ ## reg_name
219 
220 /* NBIO */
221 #define NBIO_BASE_INNER(seg) \
222 	NBIO_BASE__INST0_SEG ## seg
223 
224 #define NBIO_BASE(seg) \
225 	NBIO_BASE_INNER(seg)
226 
227 #define NBIO_SR(reg_name)\
228 		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
229 					mm ## reg_name
230 
231 /* MMHUB */
232 #define MMHUB_BASE_INNER(seg) \
233 	MMHUB_BASE__INST0_SEG ## seg
234 
235 #define MMHUB_BASE(seg) \
236 	MMHUB_BASE_INNER(seg)
237 
238 #define MMHUB_SR(reg_name)\
239 		.reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
240 					mmMM ## reg_name
241 
242 static const struct bios_registers bios_regs = {
243 		NBIO_SR(BIOS_SCRATCH_3),
244 		NBIO_SR(BIOS_SCRATCH_6)
245 };
246 
247 #define clk_src_regs(index, pllid)\
248 [index] = {\
249 	CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
250 }
251 
252 static const struct dce110_clk_src_regs clk_src_regs[] = {
253 	clk_src_regs(0, A),
254 	clk_src_regs(1, B),
255 	clk_src_regs(2, C),
256 	clk_src_regs(3, D),
257 	clk_src_regs(4, E),
258 	clk_src_regs(5, F)
259 };
260 
261 static const struct dce110_clk_src_shift cs_shift = {
262 		CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
263 };
264 
265 static const struct dce110_clk_src_mask cs_mask = {
266 		CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
267 };
268 
269 static const struct dce_dmcu_registers dmcu_regs = {
270 		DMCU_DCN10_REG_LIST()
271 };
272 
273 static const struct dce_dmcu_shift dmcu_shift = {
274 		DMCU_MASK_SH_LIST_DCN10(__SHIFT)
275 };
276 
277 static const struct dce_dmcu_mask dmcu_mask = {
278 		DMCU_MASK_SH_LIST_DCN10(_MASK)
279 };
280 
281 static const struct dce_abm_registers abm_regs = {
282 		ABM_DCN20_REG_LIST()
283 };
284 
285 static const struct dce_abm_shift abm_shift = {
286 		ABM_MASK_SH_LIST_DCN20(__SHIFT)
287 };
288 
289 static const struct dce_abm_mask abm_mask = {
290 		ABM_MASK_SH_LIST_DCN20(_MASK)
291 };
292 
293 #define audio_regs(id)\
294 [id] = {\
295 		AUD_COMMON_REG_LIST(id)\
296 }
297 
298 static const struct dce_audio_registers audio_regs[] = {
299 	audio_regs(0),
300 	audio_regs(1),
301 	audio_regs(2),
302 	audio_regs(3),
303 	audio_regs(4),
304 	audio_regs(5),
305 	audio_regs(6),
306 };
307 
308 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
309 		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
310 		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
311 		AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
312 
313 static const struct dce_audio_shift audio_shift = {
314 		DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
315 };
316 
317 static const struct dce_aduio_mask audio_mask = {
318 		DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
319 };
320 
321 #define stream_enc_regs(id)\
322 [id] = {\
323 	SE_DCN2_REG_LIST(id)\
324 }
325 
326 static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
327 	stream_enc_regs(0),
328 	stream_enc_regs(1),
329 	stream_enc_regs(2),
330 	stream_enc_regs(3),
331 	stream_enc_regs(4),
332 	stream_enc_regs(5),
333 };
334 
335 static const struct dcn10_stream_encoder_shift se_shift = {
336 		SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
337 };
338 
339 static const struct dcn10_stream_encoder_mask se_mask = {
340 		SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
341 };
342 
343 
344 #define aux_regs(id)\
345 [id] = {\
346 	DCN2_AUX_REG_LIST(id)\
347 }
348 
349 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
350 		aux_regs(0),
351 		aux_regs(1),
352 		aux_regs(2),
353 		aux_regs(3),
354 		aux_regs(4),
355 		aux_regs(5)
356 };
357 
358 #define hpd_regs(id)\
359 [id] = {\
360 	HPD_REG_LIST(id)\
361 }
362 
363 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
364 		hpd_regs(0),
365 		hpd_regs(1),
366 		hpd_regs(2),
367 		hpd_regs(3),
368 		hpd_regs(4),
369 		hpd_regs(5)
370 };
371 
372 #define link_regs(id, phyid)\
373 [id] = {\
374 	LE_DCN10_REG_LIST(id), \
375 	UNIPHY_DCN2_REG_LIST(phyid), \
376 	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
377 }
378 
379 static const struct dcn10_link_enc_registers link_enc_regs[] = {
380 	link_regs(0, A),
381 	link_regs(1, B),
382 	link_regs(2, C),
383 	link_regs(3, D),
384 	link_regs(4, E),
385 	link_regs(5, F)
386 };
387 
388 static const struct dcn10_link_enc_shift le_shift = {
389 	LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT)
390 };
391 
392 static const struct dcn10_link_enc_mask le_mask = {
393 	LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK)
394 };
395 
396 #define ipp_regs(id)\
397 [id] = {\
398 	IPP_REG_LIST_DCN20(id),\
399 }
400 
401 static const struct dcn10_ipp_registers ipp_regs[] = {
402 	ipp_regs(0),
403 	ipp_regs(1),
404 	ipp_regs(2),
405 	ipp_regs(3),
406 	ipp_regs(4),
407 	ipp_regs(5),
408 };
409 
410 static const struct dcn10_ipp_shift ipp_shift = {
411 		IPP_MASK_SH_LIST_DCN20(__SHIFT)
412 };
413 
414 static const struct dcn10_ipp_mask ipp_mask = {
415 		IPP_MASK_SH_LIST_DCN20(_MASK),
416 };
417 
418 #define opp_regs(id)\
419 [id] = {\
420 	OPP_REG_LIST_DCN20(id),\
421 }
422 
423 static const struct dcn20_opp_registers opp_regs[] = {
424 	opp_regs(0),
425 	opp_regs(1),
426 	opp_regs(2),
427 	opp_regs(3),
428 	opp_regs(4),
429 	opp_regs(5),
430 };
431 
432 static const struct dcn20_opp_shift opp_shift = {
433 		OPP_MASK_SH_LIST_DCN20(__SHIFT)
434 };
435 
436 static const struct dcn20_opp_mask opp_mask = {
437 		OPP_MASK_SH_LIST_DCN20(_MASK)
438 };
439 
440 #define aux_engine_regs(id)\
441 [id] = {\
442 	AUX_COMMON_REG_LIST0(id), \
443 	.AUXN_IMPCAL = 0, \
444 	.AUXP_IMPCAL = 0, \
445 	.AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
446 }
447 
448 static const struct dce110_aux_registers aux_engine_regs[] = {
449 		aux_engine_regs(0),
450 		aux_engine_regs(1),
451 		aux_engine_regs(2),
452 		aux_engine_regs(3),
453 		aux_engine_regs(4),
454 		aux_engine_regs(5)
455 };
456 
457 #define tf_regs(id)\
458 [id] = {\
459 	TF_REG_LIST_DCN20(id),\
460 }
461 
462 static const struct dcn2_dpp_registers tf_regs[] = {
463 	tf_regs(0),
464 	tf_regs(1),
465 	tf_regs(2),
466 	tf_regs(3),
467 	tf_regs(4),
468 	tf_regs(5),
469 };
470 
471 static const struct dcn2_dpp_shift tf_shift = {
472 		TF_REG_LIST_SH_MASK_DCN20(__SHIFT)
473 };
474 
475 static const struct dcn2_dpp_mask tf_mask = {
476 		TF_REG_LIST_SH_MASK_DCN20(_MASK)
477 };
478 
479 #define dwbc_regs_dcn2(id)\
480 [id] = {\
481 	DWBC_COMMON_REG_LIST_DCN2_0(id),\
482 		}
483 
484 static const struct dcn20_dwbc_registers dwbc20_regs[] = {
485 	dwbc_regs_dcn2(0),
486 };
487 
488 static const struct dcn20_dwbc_shift dwbc20_shift = {
489 	DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
490 };
491 
492 static const struct dcn20_dwbc_mask dwbc20_mask = {
493 	DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
494 };
495 
496 #define mcif_wb_regs_dcn2(id)\
497 [id] = {\
498 	MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
499 		}
500 
501 static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
502 	mcif_wb_regs_dcn2(0),
503 };
504 
505 static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
506 	MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
507 };
508 
509 static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
510 	MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
511 };
512 
513 static const struct dcn20_mpc_registers mpc_regs = {
514 		MPC_REG_LIST_DCN2_0(0),
515 		MPC_REG_LIST_DCN2_0(1),
516 		MPC_REG_LIST_DCN2_0(2),
517 		MPC_REG_LIST_DCN2_0(3),
518 		MPC_REG_LIST_DCN2_0(4),
519 		MPC_REG_LIST_DCN2_0(5),
520 		MPC_OUT_MUX_REG_LIST_DCN2_0(0),
521 		MPC_OUT_MUX_REG_LIST_DCN2_0(1),
522 		MPC_OUT_MUX_REG_LIST_DCN2_0(2),
523 		MPC_OUT_MUX_REG_LIST_DCN2_0(3),
524 		MPC_OUT_MUX_REG_LIST_DCN2_0(4),
525 		MPC_OUT_MUX_REG_LIST_DCN2_0(5),
526 };
527 
528 static const struct dcn20_mpc_shift mpc_shift = {
529 	MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
530 };
531 
532 static const struct dcn20_mpc_mask mpc_mask = {
533 	MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
534 };
535 
536 #define tg_regs(id)\
537 [id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
538 
539 
540 static const struct dcn_optc_registers tg_regs[] = {
541 	tg_regs(0),
542 	tg_regs(1),
543 	tg_regs(2),
544 	tg_regs(3),
545 	tg_regs(4),
546 	tg_regs(5)
547 };
548 
549 static const struct dcn_optc_shift tg_shift = {
550 	TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
551 };
552 
553 static const struct dcn_optc_mask tg_mask = {
554 	TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
555 };
556 
557 #define hubp_regs(id)\
558 [id] = {\
559 	HUBP_REG_LIST_DCN20(id)\
560 }
561 
562 static const struct dcn_hubp2_registers hubp_regs[] = {
563 		hubp_regs(0),
564 		hubp_regs(1),
565 		hubp_regs(2),
566 		hubp_regs(3),
567 		hubp_regs(4),
568 		hubp_regs(5)
569 };
570 
571 static const struct dcn_hubp2_shift hubp_shift = {
572 		HUBP_MASK_SH_LIST_DCN20(__SHIFT)
573 };
574 
575 static const struct dcn_hubp2_mask hubp_mask = {
576 		HUBP_MASK_SH_LIST_DCN20(_MASK)
577 };
578 
579 static const struct dcn_hubbub_registers hubbub_reg = {
580 		HUBBUB_REG_LIST_DCN20(0)
581 };
582 
583 static const struct dcn_hubbub_shift hubbub_shift = {
584 		HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
585 };
586 
587 static const struct dcn_hubbub_mask hubbub_mask = {
588 		HUBBUB_MASK_SH_LIST_DCN20(_MASK)
589 };
590 
591 #define vmid_regs(id)\
592 [id] = {\
593 		DCN20_VMID_REG_LIST(id)\
594 }
595 
596 static const struct dcn_vmid_registers vmid_regs[] = {
597 	vmid_regs(0),
598 	vmid_regs(1),
599 	vmid_regs(2),
600 	vmid_regs(3),
601 	vmid_regs(4),
602 	vmid_regs(5),
603 	vmid_regs(6),
604 	vmid_regs(7),
605 	vmid_regs(8),
606 	vmid_regs(9),
607 	vmid_regs(10),
608 	vmid_regs(11),
609 	vmid_regs(12),
610 	vmid_regs(13),
611 	vmid_regs(14),
612 	vmid_regs(15)
613 };
614 
615 static const struct dcn20_vmid_shift vmid_shifts = {
616 		DCN20_VMID_MASK_SH_LIST(__SHIFT)
617 };
618 
619 static const struct dcn20_vmid_mask vmid_masks = {
620 		DCN20_VMID_MASK_SH_LIST(_MASK)
621 };
622 
623 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
624 #define dsc_regsDCN20(id)\
625 [id] = {\
626 	DSC_REG_LIST_DCN20(id)\
627 }
628 
629 static const struct dcn20_dsc_registers dsc_regs[] = {
630 	dsc_regsDCN20(0),
631 	dsc_regsDCN20(1),
632 	dsc_regsDCN20(2),
633 	dsc_regsDCN20(3),
634 	dsc_regsDCN20(4),
635 	dsc_regsDCN20(5)
636 };
637 
638 static const struct dcn20_dsc_shift dsc_shift = {
639 	DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
640 };
641 
642 static const struct dcn20_dsc_mask dsc_mask = {
643 	DSC_REG_LIST_SH_MASK_DCN20(_MASK)
644 };
645 #endif
646 
647 static const struct dccg_registers dccg_regs = {
648 		DCCG_REG_LIST_DCN2()
649 };
650 
651 static const struct dccg_shift dccg_shift = {
652 		DCCG_MASK_SH_LIST_DCN2(__SHIFT)
653 };
654 
655 static const struct dccg_mask dccg_mask = {
656 		DCCG_MASK_SH_LIST_DCN2(_MASK)
657 };
658 
659 static const struct resource_caps res_cap_nv10 = {
660 		.num_timing_generator = 6,
661 		.num_opp = 6,
662 		.num_video_plane = 6,
663 		.num_audio = 7,
664 		.num_stream_encoder = 6,
665 		.num_pll = 6,
666 		.num_dwb = 1,
667 		.num_ddc = 6,
668 		.num_vmid = 16,
669 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
670 		.num_dsc = 6,
671 #endif
672 };
673 
674 static const struct dc_plane_cap plane_cap = {
675 	.type = DC_PLANE_TYPE_DCN_UNIVERSAL,
676 	.blends_with_above = true,
677 	.blends_with_below = true,
678 	.per_pixel_alpha = true,
679 
680 	.pixel_format_support = {
681 			.argb8888 = true,
682 			.nv12 = true,
683 			.fp16 = true
684 	},
685 
686 	.max_upscale_factor = {
687 			.argb8888 = 16000,
688 			.nv12 = 16000,
689 			.fp16 = 1
690 	},
691 
692 	.max_downscale_factor = {
693 			.argb8888 = 250,
694 			.nv12 = 250,
695 			.fp16 = 1
696 	}
697 };
698 static const struct resource_caps res_cap_nv14 = {
699 		.num_timing_generator = 5,
700 		.num_opp = 5,
701 		.num_video_plane = 5,
702 		.num_audio = 6,
703 		.num_stream_encoder = 5,
704 		.num_pll = 5,
705 		.num_dwb = 0,
706 		.num_ddc = 5,
707 };
708 
709 static const struct dc_debug_options debug_defaults_drv = {
710 		.disable_dmcu = true,
711 		.force_abm_enable = false,
712 		.timing_trace = false,
713 		.clock_trace = true,
714 		.disable_pplib_clock_request = true,
715 		.pipe_split_policy = MPC_SPLIT_DYNAMIC,
716 		.force_single_disp_pipe_split = true,
717 		.disable_dcc = DCC_ENABLE,
718 		.vsr_support = true,
719 		.performance_trace = false,
720 		.max_downscale_src_width = 5120,/*upto 5K*/
721 		.disable_pplib_wm_range = false,
722 		.scl_reset_length10 = true,
723 		.sanity_checks = false,
724 		.disable_tri_buf = true,
725 		.underflow_assert_delay_us = 0xFFFFFFFF,
726 };
727 
728 static const struct dc_debug_options debug_defaults_diags = {
729 		.disable_dmcu = true,
730 		.force_abm_enable = false,
731 		.timing_trace = true,
732 		.clock_trace = true,
733 		.disable_dpp_power_gate = true,
734 		.disable_hubp_power_gate = true,
735 		.disable_clock_gate = true,
736 		.disable_pplib_clock_request = true,
737 		.disable_pplib_wm_range = true,
738 		.disable_stutter = true,
739 		.scl_reset_length10 = true,
740 		.underflow_assert_delay_us = 0xFFFFFFFF,
741 };
742 
743 void dcn20_dpp_destroy(struct dpp **dpp)
744 {
745 	kfree(TO_DCN20_DPP(*dpp));
746 	*dpp = NULL;
747 }
748 
749 struct dpp *dcn20_dpp_create(
750 	struct dc_context *ctx,
751 	uint32_t inst)
752 {
753 	struct dcn20_dpp *dpp =
754 		kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
755 
756 	if (!dpp)
757 		return NULL;
758 
759 	if (dpp2_construct(dpp, ctx, inst,
760 			&tf_regs[inst], &tf_shift, &tf_mask))
761 		return &dpp->base;
762 
763 	BREAK_TO_DEBUGGER();
764 	kfree(dpp);
765 	return NULL;
766 }
767 
768 struct input_pixel_processor *dcn20_ipp_create(
769 	struct dc_context *ctx, uint32_t inst)
770 {
771 	struct dcn10_ipp *ipp =
772 		kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
773 
774 	if (!ipp) {
775 		BREAK_TO_DEBUGGER();
776 		return NULL;
777 	}
778 
779 	dcn20_ipp_construct(ipp, ctx, inst,
780 			&ipp_regs[inst], &ipp_shift, &ipp_mask);
781 	return &ipp->base;
782 }
783 
784 
785 struct output_pixel_processor *dcn20_opp_create(
786 	struct dc_context *ctx, uint32_t inst)
787 {
788 	struct dcn20_opp *opp =
789 		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
790 
791 	if (!opp) {
792 		BREAK_TO_DEBUGGER();
793 		return NULL;
794 	}
795 
796 	dcn20_opp_construct(opp, ctx, inst,
797 			&opp_regs[inst], &opp_shift, &opp_mask);
798 	return &opp->base;
799 }
800 
801 struct dce_aux *dcn20_aux_engine_create(
802 	struct dc_context *ctx,
803 	uint32_t inst)
804 {
805 	struct aux_engine_dce110 *aux_engine =
806 		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
807 
808 	if (!aux_engine)
809 		return NULL;
810 
811 	dce110_aux_engine_construct(aux_engine, ctx, inst,
812 				    SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
813 				    &aux_engine_regs[inst]);
814 
815 	return &aux_engine->base;
816 }
817 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
818 
819 static const struct dce_i2c_registers i2c_hw_regs[] = {
820 		i2c_inst_regs(1),
821 		i2c_inst_regs(2),
822 		i2c_inst_regs(3),
823 		i2c_inst_regs(4),
824 		i2c_inst_regs(5),
825 		i2c_inst_regs(6),
826 };
827 
828 static const struct dce_i2c_shift i2c_shifts = {
829 		I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
830 };
831 
832 static const struct dce_i2c_mask i2c_masks = {
833 		I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
834 };
835 
836 struct dce_i2c_hw *dcn20_i2c_hw_create(
837 	struct dc_context *ctx,
838 	uint32_t inst)
839 {
840 	struct dce_i2c_hw *dce_i2c_hw =
841 		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
842 
843 	if (!dce_i2c_hw)
844 		return NULL;
845 
846 	dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
847 				    &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
848 
849 	return dce_i2c_hw;
850 }
851 struct mpc *dcn20_mpc_create(struct dc_context *ctx)
852 {
853 	struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
854 					  GFP_KERNEL);
855 
856 	if (!mpc20)
857 		return NULL;
858 
859 	dcn20_mpc_construct(mpc20, ctx,
860 			&mpc_regs,
861 			&mpc_shift,
862 			&mpc_mask,
863 			6);
864 
865 	return &mpc20->base;
866 }
867 
868 struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
869 {
870 	int i;
871 	struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
872 					  GFP_KERNEL);
873 
874 	if (!hubbub)
875 		return NULL;
876 
877 	hubbub2_construct(hubbub, ctx,
878 			&hubbub_reg,
879 			&hubbub_shift,
880 			&hubbub_mask);
881 
882 	for (i = 0; i < res_cap_nv10.num_vmid; i++) {
883 		struct dcn20_vmid *vmid = &hubbub->vmid[i];
884 
885 		vmid->ctx = ctx;
886 
887 		vmid->regs = &vmid_regs[i];
888 		vmid->shifts = &vmid_shifts;
889 		vmid->masks = &vmid_masks;
890 	}
891 
892 	return &hubbub->base;
893 }
894 
895 struct timing_generator *dcn20_timing_generator_create(
896 		struct dc_context *ctx,
897 		uint32_t instance)
898 {
899 	struct optc *tgn10 =
900 		kzalloc(sizeof(struct optc), GFP_KERNEL);
901 
902 	if (!tgn10)
903 		return NULL;
904 
905 	tgn10->base.inst = instance;
906 	tgn10->base.ctx = ctx;
907 
908 	tgn10->tg_regs = &tg_regs[instance];
909 	tgn10->tg_shift = &tg_shift;
910 	tgn10->tg_mask = &tg_mask;
911 
912 	dcn20_timing_generator_init(tgn10);
913 
914 	return &tgn10->base;
915 }
916 
917 static const struct encoder_feature_support link_enc_feature = {
918 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
919 		.max_hdmi_pixel_clock = 600000,
920 		.hdmi_ycbcr420_supported = true,
921 		.dp_ycbcr420_supported = true,
922 		.flags.bits.IS_HBR2_CAPABLE = true,
923 		.flags.bits.IS_HBR3_CAPABLE = true,
924 		.flags.bits.IS_TPS3_CAPABLE = true,
925 		.flags.bits.IS_TPS4_CAPABLE = true
926 };
927 
928 struct link_encoder *dcn20_link_encoder_create(
929 	const struct encoder_init_data *enc_init_data)
930 {
931 	struct dcn20_link_encoder *enc20 =
932 		kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
933 
934 	if (!enc20)
935 		return NULL;
936 
937 	dcn20_link_encoder_construct(enc20,
938 				      enc_init_data,
939 				      &link_enc_feature,
940 				      &link_enc_regs[enc_init_data->transmitter],
941 				      &link_enc_aux_regs[enc_init_data->channel - 1],
942 				      &link_enc_hpd_regs[enc_init_data->hpd_source],
943 				      &le_shift,
944 				      &le_mask);
945 
946 	return &enc20->enc10.base;
947 }
948 
949 struct clock_source *dcn20_clock_source_create(
950 	struct dc_context *ctx,
951 	struct dc_bios *bios,
952 	enum clock_source_id id,
953 	const struct dce110_clk_src_regs *regs,
954 	bool dp_clk_src)
955 {
956 	struct dce110_clk_src *clk_src =
957 		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
958 
959 	if (!clk_src)
960 		return NULL;
961 
962 	if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
963 			regs, &cs_shift, &cs_mask)) {
964 		clk_src->base.dp_clk_src = dp_clk_src;
965 		return &clk_src->base;
966 	}
967 
968 	BREAK_TO_DEBUGGER();
969 	return NULL;
970 }
971 
972 static void read_dce_straps(
973 	struct dc_context *ctx,
974 	struct resource_straps *straps)
975 {
976 	generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
977 		FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
978 }
979 
980 static struct audio *dcn20_create_audio(
981 		struct dc_context *ctx, unsigned int inst)
982 {
983 	return dce_audio_create(ctx, inst,
984 			&audio_regs[inst], &audio_shift, &audio_mask);
985 }
986 
987 struct stream_encoder *dcn20_stream_encoder_create(
988 	enum engine_id eng_id,
989 	struct dc_context *ctx)
990 {
991 	struct dcn10_stream_encoder *enc1 =
992 		kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
993 
994 	if (!enc1)
995 		return NULL;
996 
997 	dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
998 					&stream_enc_regs[eng_id],
999 					&se_shift, &se_mask);
1000 
1001 	return &enc1->base;
1002 }
1003 
1004 static const struct dce_hwseq_registers hwseq_reg = {
1005 		HWSEQ_DCN2_REG_LIST()
1006 };
1007 
1008 static const struct dce_hwseq_shift hwseq_shift = {
1009 		HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
1010 };
1011 
1012 static const struct dce_hwseq_mask hwseq_mask = {
1013 		HWSEQ_DCN2_MASK_SH_LIST(_MASK)
1014 };
1015 
1016 struct dce_hwseq *dcn20_hwseq_create(
1017 	struct dc_context *ctx)
1018 {
1019 	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
1020 
1021 	if (hws) {
1022 		hws->ctx = ctx;
1023 		hws->regs = &hwseq_reg;
1024 		hws->shifts = &hwseq_shift;
1025 		hws->masks = &hwseq_mask;
1026 	}
1027 	return hws;
1028 }
1029 
1030 static const struct resource_create_funcs res_create_funcs = {
1031 	.read_dce_straps = read_dce_straps,
1032 	.create_audio = dcn20_create_audio,
1033 	.create_stream_encoder = dcn20_stream_encoder_create,
1034 	.create_hwseq = dcn20_hwseq_create,
1035 };
1036 
1037 static const struct resource_create_funcs res_create_maximus_funcs = {
1038 	.read_dce_straps = NULL,
1039 	.create_audio = NULL,
1040 	.create_stream_encoder = NULL,
1041 	.create_hwseq = dcn20_hwseq_create,
1042 };
1043 
1044 void dcn20_clock_source_destroy(struct clock_source **clk_src)
1045 {
1046 	kfree(TO_DCE110_CLK_SRC(*clk_src));
1047 	*clk_src = NULL;
1048 }
1049 
1050 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1051 
1052 struct display_stream_compressor *dcn20_dsc_create(
1053 	struct dc_context *ctx, uint32_t inst)
1054 {
1055 	struct dcn20_dsc *dsc =
1056 		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
1057 
1058 	if (!dsc) {
1059 		BREAK_TO_DEBUGGER();
1060 		return NULL;
1061 	}
1062 
1063 	dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
1064 	return &dsc->base;
1065 }
1066 
1067 void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
1068 {
1069 	kfree(container_of(*dsc, struct dcn20_dsc, base));
1070 	*dsc = NULL;
1071 }
1072 
1073 #endif
1074 
1075 static void destruct(struct dcn20_resource_pool *pool)
1076 {
1077 	unsigned int i;
1078 
1079 	for (i = 0; i < pool->base.stream_enc_count; i++) {
1080 		if (pool->base.stream_enc[i] != NULL) {
1081 			kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
1082 			pool->base.stream_enc[i] = NULL;
1083 		}
1084 	}
1085 
1086 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1087 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
1088 		if (pool->base.dscs[i] != NULL)
1089 			dcn20_dsc_destroy(&pool->base.dscs[i]);
1090 	}
1091 #endif
1092 
1093 	if (pool->base.mpc != NULL) {
1094 		kfree(TO_DCN20_MPC(pool->base.mpc));
1095 		pool->base.mpc = NULL;
1096 	}
1097 	if (pool->base.hubbub != NULL) {
1098 		kfree(pool->base.hubbub);
1099 		pool->base.hubbub = NULL;
1100 	}
1101 	for (i = 0; i < pool->base.pipe_count; i++) {
1102 		if (pool->base.dpps[i] != NULL)
1103 			dcn20_dpp_destroy(&pool->base.dpps[i]);
1104 
1105 		if (pool->base.ipps[i] != NULL)
1106 			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
1107 
1108 		if (pool->base.hubps[i] != NULL) {
1109 			kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
1110 			pool->base.hubps[i] = NULL;
1111 		}
1112 
1113 		if (pool->base.irqs != NULL) {
1114 			dal_irq_service_destroy(&pool->base.irqs);
1115 		}
1116 	}
1117 
1118 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
1119 		if (pool->base.engines[i] != NULL)
1120 			dce110_engine_destroy(&pool->base.engines[i]);
1121 		if (pool->base.hw_i2cs[i] != NULL) {
1122 			kfree(pool->base.hw_i2cs[i]);
1123 			pool->base.hw_i2cs[i] = NULL;
1124 		}
1125 		if (pool->base.sw_i2cs[i] != NULL) {
1126 			kfree(pool->base.sw_i2cs[i]);
1127 			pool->base.sw_i2cs[i] = NULL;
1128 		}
1129 	}
1130 
1131 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
1132 		if (pool->base.opps[i] != NULL)
1133 			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
1134 	}
1135 
1136 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
1137 		if (pool->base.timing_generators[i] != NULL)	{
1138 			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
1139 			pool->base.timing_generators[i] = NULL;
1140 		}
1141 	}
1142 
1143 	for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
1144 		if (pool->base.dwbc[i] != NULL) {
1145 			kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
1146 			pool->base.dwbc[i] = NULL;
1147 		}
1148 		if (pool->base.mcif_wb[i] != NULL) {
1149 			kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
1150 			pool->base.mcif_wb[i] = NULL;
1151 		}
1152 	}
1153 
1154 	for (i = 0; i < pool->base.audio_count; i++) {
1155 		if (pool->base.audios[i])
1156 			dce_aud_destroy(&pool->base.audios[i]);
1157 	}
1158 
1159 	for (i = 0; i < pool->base.clk_src_count; i++) {
1160 		if (pool->base.clock_sources[i] != NULL) {
1161 			dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
1162 			pool->base.clock_sources[i] = NULL;
1163 		}
1164 	}
1165 
1166 	if (pool->base.dp_clock_source != NULL) {
1167 		dcn20_clock_source_destroy(&pool->base.dp_clock_source);
1168 		pool->base.dp_clock_source = NULL;
1169 	}
1170 
1171 
1172 	if (pool->base.abm != NULL)
1173 		dce_abm_destroy(&pool->base.abm);
1174 
1175 	if (pool->base.dmcu != NULL)
1176 		dce_dmcu_destroy(&pool->base.dmcu);
1177 
1178 	if (pool->base.dccg != NULL)
1179 		dcn_dccg_destroy(&pool->base.dccg);
1180 
1181 	if (pool->base.pp_smu != NULL)
1182 		dcn20_pp_smu_destroy(&pool->base.pp_smu);
1183 
1184 }
1185 
1186 struct hubp *dcn20_hubp_create(
1187 	struct dc_context *ctx,
1188 	uint32_t inst)
1189 {
1190 	struct dcn20_hubp *hubp2 =
1191 		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
1192 
1193 	if (!hubp2)
1194 		return NULL;
1195 
1196 	if (hubp2_construct(hubp2, ctx, inst,
1197 			&hubp_regs[inst], &hubp_shift, &hubp_mask))
1198 		return &hubp2->base;
1199 
1200 	BREAK_TO_DEBUGGER();
1201 	kfree(hubp2);
1202 	return NULL;
1203 }
1204 
1205 static void get_pixel_clock_parameters(
1206 	struct pipe_ctx *pipe_ctx,
1207 	struct pixel_clk_params *pixel_clk_params)
1208 {
1209 	const struct dc_stream_state *stream = pipe_ctx->stream;
1210 	bool odm_combine = dc_res_get_odm_bottom_pipe(pipe_ctx) != NULL;
1211 
1212 	pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
1213 	pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
1214 	pixel_clk_params->signal_type = pipe_ctx->stream->signal;
1215 	pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
1216 	/* TODO: un-hardcode*/
1217 	pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
1218 		LINK_RATE_REF_FREQ_IN_KHZ;
1219 	pixel_clk_params->flags.ENABLE_SS = 0;
1220 	pixel_clk_params->color_depth =
1221 		stream->timing.display_color_depth;
1222 	pixel_clk_params->flags.DISPLAY_BLANKED = 1;
1223 	pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
1224 
1225 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
1226 		pixel_clk_params->color_depth = COLOR_DEPTH_888;
1227 
1228 	if (optc1_is_two_pixels_per_containter(&stream->timing) || odm_combine)
1229 		pixel_clk_params->requested_pix_clk_100hz /= 2;
1230 
1231 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1232 		pixel_clk_params->requested_pix_clk_100hz *= 2;
1233 
1234 }
1235 
1236 static void build_clamping_params(struct dc_stream_state *stream)
1237 {
1238 	stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
1239 	stream->clamping.c_depth = stream->timing.display_color_depth;
1240 	stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
1241 }
1242 
1243 static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
1244 {
1245 
1246 	get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
1247 
1248 	pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
1249 		pipe_ctx->clock_source,
1250 		&pipe_ctx->stream_res.pix_clk_params,
1251 		&pipe_ctx->pll_settings);
1252 
1253 	pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
1254 
1255 	resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1256 					&pipe_ctx->stream->bit_depth_params);
1257 	build_clamping_params(pipe_ctx->stream);
1258 
1259 	return DC_OK;
1260 }
1261 
1262 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
1263 {
1264 	enum dc_status status = DC_OK;
1265 	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
1266 
1267 	/*TODO Seems unneeded anymore */
1268 	/*	if (old_context && resource_is_stream_unchanged(old_context, stream)) {
1269 			if (stream != NULL && old_context->streams[i] != NULL) {
1270 				 todo: shouldn't have to copy missing parameter here
1271 				resource_build_bit_depth_reduction_params(stream,
1272 						&stream->bit_depth_params);
1273 				stream->clamping.pixel_encoding =
1274 						stream->timing.pixel_encoding;
1275 
1276 				resource_build_bit_depth_reduction_params(stream,
1277 								&stream->bit_depth_params);
1278 				build_clamping_params(stream);
1279 
1280 				continue;
1281 			}
1282 		}
1283 	*/
1284 
1285 	if (!pipe_ctx)
1286 		return DC_ERROR_UNEXPECTED;
1287 
1288 
1289 	status = build_pipe_hw_param(pipe_ctx);
1290 
1291 	return status;
1292 }
1293 
1294 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1295 
1296 static void acquire_dsc(struct resource_context *res_ctx,
1297 			const struct resource_pool *pool,
1298 			struct display_stream_compressor **dsc)
1299 {
1300 	int i;
1301 
1302 	ASSERT(*dsc == NULL);
1303 	*dsc = NULL;
1304 
1305 	/* Find first free DSC */
1306 	for (i = 0; i < pool->res_cap->num_dsc; i++)
1307 		if (!res_ctx->is_dsc_acquired[i]) {
1308 			*dsc = pool->dscs[i];
1309 			res_ctx->is_dsc_acquired[i] = true;
1310 			break;
1311 		}
1312 }
1313 
1314 static void release_dsc(struct resource_context *res_ctx,
1315 			const struct resource_pool *pool,
1316 			struct display_stream_compressor **dsc)
1317 {
1318 	int i;
1319 
1320 	for (i = 0; i < pool->res_cap->num_dsc; i++)
1321 		if (pool->dscs[i] == *dsc) {
1322 			res_ctx->is_dsc_acquired[i] = false;
1323 			*dsc = NULL;
1324 			break;
1325 		}
1326 }
1327 
1328 #endif
1329 
1330 
1331 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1332 static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
1333 		struct dc_state *dc_ctx,
1334 		struct dc_stream_state *dc_stream)
1335 {
1336 	enum dc_status result = DC_OK;
1337 	int i;
1338 	const struct resource_pool *pool = dc->res_pool;
1339 
1340 	/* Get a DSC if required and available */
1341 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1342 		struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
1343 
1344 		if (pipe_ctx->stream != dc_stream)
1345 			continue;
1346 
1347 		acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
1348 
1349 		/* The number of DSCs can be less than the number of pipes */
1350 		if (!pipe_ctx->stream_res.dsc) {
1351 			dm_output_to_console("No DSCs available\n");
1352 			result = DC_NO_DSC_RESOURCE;
1353 		}
1354 
1355 		break;
1356 	}
1357 
1358 	return result;
1359 }
1360 
1361 
1362 static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
1363 		struct dc_state *new_ctx,
1364 		struct dc_stream_state *dc_stream)
1365 {
1366 	struct pipe_ctx *pipe_ctx = NULL;
1367 	int i;
1368 
1369 	for (i = 0; i < MAX_PIPES; i++) {
1370 		if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
1371 			pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
1372 			break;
1373 		}
1374 	}
1375 
1376 	if (!pipe_ctx)
1377 		return DC_ERROR_UNEXPECTED;
1378 
1379 	if (pipe_ctx->stream_res.dsc) {
1380 		struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx);
1381 
1382 		release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
1383 		if (odm_pipe)
1384 			release_dsc(&new_ctx->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
1385 	}
1386 
1387 	return DC_OK;
1388 }
1389 #endif
1390 
1391 
1392 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1393 {
1394 	enum dc_status result = DC_ERROR_UNEXPECTED;
1395 
1396 	result = resource_map_pool_resources(dc, new_ctx, dc_stream);
1397 
1398 	if (result == DC_OK)
1399 		result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
1400 
1401 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1402 	/* Get a DSC if required and available */
1403 	if (result == DC_OK && dc_stream->timing.flags.DSC)
1404 		result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
1405 #endif
1406 
1407 	if (result == DC_OK)
1408 		result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
1409 
1410 	return result;
1411 }
1412 
1413 
1414 enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1415 {
1416 	enum dc_status result = DC_OK;
1417 
1418 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1419 	result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
1420 #endif
1421 
1422 	return result;
1423 }
1424 
1425 
1426 static void swizzle_to_dml_params(
1427 		enum swizzle_mode_values swizzle,
1428 		unsigned int *sw_mode)
1429 {
1430 	switch (swizzle) {
1431 	case DC_SW_LINEAR:
1432 		*sw_mode = dm_sw_linear;
1433 		break;
1434 	case DC_SW_4KB_S:
1435 		*sw_mode = dm_sw_4kb_s;
1436 		break;
1437 	case DC_SW_4KB_S_X:
1438 		*sw_mode = dm_sw_4kb_s_x;
1439 		break;
1440 	case DC_SW_4KB_D:
1441 		*sw_mode = dm_sw_4kb_d;
1442 		break;
1443 	case DC_SW_4KB_D_X:
1444 		*sw_mode = dm_sw_4kb_d_x;
1445 		break;
1446 	case DC_SW_64KB_S:
1447 		*sw_mode = dm_sw_64kb_s;
1448 		break;
1449 	case DC_SW_64KB_S_X:
1450 		*sw_mode = dm_sw_64kb_s_x;
1451 		break;
1452 	case DC_SW_64KB_S_T:
1453 		*sw_mode = dm_sw_64kb_s_t;
1454 		break;
1455 	case DC_SW_64KB_D:
1456 		*sw_mode = dm_sw_64kb_d;
1457 		break;
1458 	case DC_SW_64KB_D_X:
1459 		*sw_mode = dm_sw_64kb_d_x;
1460 		break;
1461 	case DC_SW_64KB_D_T:
1462 		*sw_mode = dm_sw_64kb_d_t;
1463 		break;
1464 	case DC_SW_64KB_R_X:
1465 		*sw_mode = dm_sw_64kb_r_x;
1466 		break;
1467 	case DC_SW_VAR_S:
1468 		*sw_mode = dm_sw_var_s;
1469 		break;
1470 	case DC_SW_VAR_S_X:
1471 		*sw_mode = dm_sw_var_s_x;
1472 		break;
1473 	case DC_SW_VAR_D:
1474 		*sw_mode = dm_sw_var_d;
1475 		break;
1476 	case DC_SW_VAR_D_X:
1477 		*sw_mode = dm_sw_var_d_x;
1478 		break;
1479 
1480 	default:
1481 		ASSERT(0); /* Not supported */
1482 		break;
1483 	}
1484 }
1485 
1486 static bool dcn20_split_stream_for_combine(
1487 		struct resource_context *res_ctx,
1488 		const struct resource_pool *pool,
1489 		struct pipe_ctx *primary_pipe,
1490 		struct pipe_ctx *secondary_pipe,
1491 		bool is_odm_combine)
1492 {
1493 	int pipe_idx = secondary_pipe->pipe_idx;
1494 	struct scaler_data *sd = &primary_pipe->plane_res.scl_data;
1495 	struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
1496 	int new_width;
1497 
1498 	*secondary_pipe = *primary_pipe;
1499 	secondary_pipe->bottom_pipe = sec_bot_pipe;
1500 
1501 	secondary_pipe->pipe_idx = pipe_idx;
1502 	secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
1503 	secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
1504 	secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
1505 	secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
1506 	secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
1507 	secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
1508 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1509 	secondary_pipe->stream_res.dsc = NULL;
1510 #endif
1511 	if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
1512 		ASSERT(!secondary_pipe->bottom_pipe);
1513 		secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
1514 		secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
1515 	}
1516 	primary_pipe->bottom_pipe = secondary_pipe;
1517 	secondary_pipe->top_pipe = primary_pipe;
1518 
1519 	if (is_odm_combine) {
1520 		if (primary_pipe->plane_state) {
1521 			/* HACTIVE halved for odm combine */
1522 			sd->h_active /= 2;
1523 			/* Copy scl_data to secondary pipe */
1524 			secondary_pipe->plane_res.scl_data = *sd;
1525 
1526 			/* Calculate new vp and recout for left pipe */
1527 			/* Need at least 16 pixels width per side */
1528 			if (sd->recout.x + 16 >= sd->h_active)
1529 				return false;
1530 			new_width = sd->h_active - sd->recout.x;
1531 			sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1532 					sd->ratios.horz, sd->recout.width - new_width));
1533 			sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1534 					sd->ratios.horz_c, sd->recout.width - new_width));
1535 			sd->recout.width = new_width;
1536 
1537 			/* Calculate new vp and recout for right pipe */
1538 			sd = &secondary_pipe->plane_res.scl_data;
1539 			new_width = sd->recout.width + sd->recout.x - sd->h_active;
1540 			/* Need at least 16 pixels width per side */
1541 			if (new_width <= 16)
1542 				return false;
1543 			sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1544 					sd->ratios.horz, sd->recout.width - new_width));
1545 			sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int(
1546 					sd->ratios.horz_c, sd->recout.width - new_width));
1547 			sd->recout.width = new_width;
1548 			sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int(
1549 					sd->ratios.horz, sd->h_active - sd->recout.x));
1550 			sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
1551 					sd->ratios.horz_c, sd->h_active - sd->recout.x));
1552 			sd->recout.x = 0;
1553 		}
1554 		secondary_pipe->stream_res.opp = pool->opps[secondary_pipe->pipe_idx];
1555 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1556 		if (secondary_pipe->stream->timing.flags.DSC == 1) {
1557 			acquire_dsc(res_ctx, pool, &secondary_pipe->stream_res.dsc);
1558 			ASSERT(secondary_pipe->stream_res.dsc);
1559 			if (secondary_pipe->stream_res.dsc == NULL)
1560 				return false;
1561 		}
1562 #endif
1563 	} else {
1564 		ASSERT(primary_pipe->plane_state);
1565 		resource_build_scaling_params(primary_pipe);
1566 		resource_build_scaling_params(secondary_pipe);
1567 	}
1568 
1569 	return true;
1570 }
1571 
1572 void dcn20_populate_dml_writeback_from_context(
1573 		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
1574 {
1575 	int pipe_cnt, i;
1576 
1577 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1578 		struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
1579 
1580 		if (!res_ctx->pipe_ctx[i].stream)
1581 			continue;
1582 
1583 		/* Set writeback information */
1584 		pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
1585 		pipes[pipe_cnt].dout.num_active_wb++;
1586 		pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
1587 		pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
1588 		pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
1589 		pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
1590 		pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
1591 		pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
1592 		pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
1593 		pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
1594 		pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
1595 		pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
1596 		if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
1597 			if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
1598 				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
1599 			else
1600 				pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
1601 		} else
1602 			pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
1603 
1604 		pipe_cnt++;
1605 	}
1606 
1607 }
1608 
1609 int dcn20_populate_dml_pipes_from_context(
1610 		struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
1611 {
1612 	int pipe_cnt, i;
1613 	bool synchronized_vblank = true;
1614 
1615 	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
1616 		if (!res_ctx->pipe_ctx[i].stream)
1617 			continue;
1618 
1619 		if (pipe_cnt < 0) {
1620 			pipe_cnt = i;
1621 			continue;
1622 		}
1623 		if (!resource_are_streams_timing_synchronizable(
1624 				res_ctx->pipe_ctx[pipe_cnt].stream,
1625 				res_ctx->pipe_ctx[i].stream)) {
1626 			synchronized_vblank = false;
1627 			break;
1628 		}
1629 	}
1630 
1631 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1632 		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
1633 		int output_bpc;
1634 
1635 		if (!res_ctx->pipe_ctx[i].stream)
1636 			continue;
1637 		/* todo:
1638 		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
1639 		pipes[pipe_cnt].pipe.src.dcc = 0;
1640 		pipes[pipe_cnt].pipe.src.vm = 0;*/
1641 
1642 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1643 		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
1644 		/* todo: rotation?*/
1645 		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
1646 #endif
1647 		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
1648 			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
1649 			/* 1/2 vblank */
1650 			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
1651 				(timing->v_total - timing->v_addressable
1652 					- timing->v_border_top - timing->v_border_bottom) / 2;
1653 			/* 36 bytes dp, 32 hdmi */
1654 			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
1655 				dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
1656 		}
1657 		pipes[pipe_cnt].pipe.src.dcc = false;
1658 		pipes[pipe_cnt].pipe.src.dcc_rate = 1;
1659 		pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
1660 		pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch;
1661 		pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start
1662 				- timing->h_addressable
1663 				- timing->h_border_left
1664 				- timing->h_border_right;
1665 		pipes[pipe_cnt].pipe.dest.vblank_start = timing->v_total - timing->v_front_porch;
1666 		pipes[pipe_cnt].pipe.dest.vblank_end = pipes[pipe_cnt].pipe.dest.vblank_start
1667 				- timing->v_addressable
1668 				- timing->v_border_top
1669 				- timing->v_border_bottom;
1670 		pipes[pipe_cnt].pipe.dest.htotal = timing->h_total;
1671 		pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
1672 		pipes[pipe_cnt].pipe.dest.hactive = timing->h_addressable;
1673 		pipes[pipe_cnt].pipe.dest.vactive = timing->v_addressable;
1674 		pipes[pipe_cnt].pipe.dest.interlaced = timing->flags.INTERLACE;
1675 		pipes[pipe_cnt].pipe.dest.pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
1676 		if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1677 			pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
1678 		pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
1679 		pipes[pipe_cnt].dout.dp_lanes = 4;
1680 		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
1681 		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
1682 
1683 		switch (res_ctx->pipe_ctx[i].stream->signal) {
1684 		case SIGNAL_TYPE_DISPLAY_PORT_MST:
1685 		case SIGNAL_TYPE_DISPLAY_PORT:
1686 			pipes[pipe_cnt].dout.output_type = dm_dp;
1687 			break;
1688 		case SIGNAL_TYPE_EDP:
1689 			pipes[pipe_cnt].dout.output_type = dm_edp;
1690 			break;
1691 		case SIGNAL_TYPE_HDMI_TYPE_A:
1692 		case SIGNAL_TYPE_DVI_SINGLE_LINK:
1693 		case SIGNAL_TYPE_DVI_DUAL_LINK:
1694 			pipes[pipe_cnt].dout.output_type = dm_hdmi;
1695 			break;
1696 		default:
1697 			/* In case there is no signal, set dp with 4 lanes to allow max config */
1698 			pipes[pipe_cnt].dout.output_type = dm_dp;
1699 			pipes[pipe_cnt].dout.dp_lanes = 4;
1700 		}
1701 
1702 		switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
1703 		case COLOR_DEPTH_666:
1704 			output_bpc = 6;
1705 			break;
1706 		case COLOR_DEPTH_888:
1707 			output_bpc = 8;
1708 			break;
1709 		case COLOR_DEPTH_101010:
1710 			output_bpc = 10;
1711 			break;
1712 		case COLOR_DEPTH_121212:
1713 			output_bpc = 12;
1714 			break;
1715 		case COLOR_DEPTH_141414:
1716 			output_bpc = 14;
1717 			break;
1718 		case COLOR_DEPTH_161616:
1719 			output_bpc = 16;
1720 			break;
1721 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
1722 		case COLOR_DEPTH_999:
1723 			output_bpc = 9;
1724 			break;
1725 		case COLOR_DEPTH_111111:
1726 			output_bpc = 11;
1727 			break;
1728 #endif
1729 		default:
1730 			output_bpc = 8;
1731 			break;
1732 		}
1733 
1734 
1735 		switch (res_ctx->pipe_ctx[i].stream->timing.pixel_encoding) {
1736 		case PIXEL_ENCODING_RGB:
1737 		case PIXEL_ENCODING_YCBCR444:
1738 			pipes[pipe_cnt].dout.output_format = dm_444;
1739 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
1740 			break;
1741 		case PIXEL_ENCODING_YCBCR420:
1742 			pipes[pipe_cnt].dout.output_format = dm_420;
1743 			pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2;
1744 			break;
1745 		case PIXEL_ENCODING_YCBCR422:
1746 			if (true) /* todo */
1747 				pipes[pipe_cnt].dout.output_format = dm_s422;
1748 			else
1749 				pipes[pipe_cnt].dout.output_format = dm_n422;
1750 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
1751 			break;
1752 		default:
1753 			pipes[pipe_cnt].dout.output_format = dm_444;
1754 			pipes[pipe_cnt].dout.output_bpp = output_bpc * 3;
1755 		}
1756 		pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
1757 		if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
1758 				== res_ctx->pipe_ctx[i].plane_state)
1759 			pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
1760 
1761 		/* todo: default max for now, until there is logic reflecting this in dc*/
1762 		pipes[pipe_cnt].dout.output_bpc = 12;
1763 		/*
1764 		 * Use max cursor settings for calculations to minimize
1765 		 * bw calculations due to cursor on/off
1766 		 */
1767 		pipes[pipe_cnt].pipe.src.num_cursors = 2;
1768 		pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
1769 		pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
1770 		pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
1771 		pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
1772 
1773 		if (!res_ctx->pipe_ctx[i].plane_state) {
1774 			pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
1775 			pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
1776 			pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
1777 			pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable;
1778 			if (pipes[pipe_cnt].pipe.src.viewport_width > 1920)
1779 				pipes[pipe_cnt].pipe.src.viewport_width = 1920;
1780 			pipes[pipe_cnt].pipe.src.viewport_height = timing->v_addressable;
1781 			if (pipes[pipe_cnt].pipe.src.viewport_height > 1080)
1782 				pipes[pipe_cnt].pipe.src.viewport_height = 1080;
1783 			pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */
1784 			pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
1785 			pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/
1786 			pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/
1787 			pipes[pipe_cnt].pipe.dest.full_recout_width = pipes[pipe_cnt].pipe.dest.recout_width;  /*when is_hsplit != 1*/
1788 			pipes[pipe_cnt].pipe.dest.full_recout_height = pipes[pipe_cnt].pipe.dest.recout_height; /*when is_hsplit != 1*/
1789 			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
1790 			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = 1.0;
1791 			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = 1.0;
1792 			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
1793 			pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
1794 			pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
1795 			pipes[pipe_cnt].pipe.src.is_hsplit = 0;
1796 			pipes[pipe_cnt].pipe.dest.odm_combine = 0;
1797 			pipes[pipe_cnt].pipe.dest.vtotal_min = timing->v_total;
1798 			pipes[pipe_cnt].pipe.dest.vtotal_max = timing->v_total;
1799 		} else {
1800 			struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
1801 			struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
1802 
1803 			pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
1804 			pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
1805 					&& res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
1806 					|| (res_ctx->pipe_ctx[i].top_pipe
1807 					&& res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
1808 			pipes[pipe_cnt].pipe.dest.odm_combine = (res_ctx->pipe_ctx[i].bottom_pipe
1809 					&& res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln
1810 					&& res_ctx->pipe_ctx[i].bottom_pipe->stream_res.opp
1811 						!= res_ctx->pipe_ctx[i].stream_res.opp)
1812 				|| (res_ctx->pipe_ctx[i].top_pipe
1813 					&& res_ctx->pipe_ctx[i].top_pipe->plane_state == pln
1814 					&& res_ctx->pipe_ctx[i].top_pipe->stream_res.opp
1815 						!= res_ctx->pipe_ctx[i].stream_res.opp);
1816 			pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
1817 					|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
1818 			pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
1819 			pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
1820 			pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
1821 			pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
1822 			pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
1823 			pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
1824 			if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1825 				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.video.luma_pitch;
1826 				pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.video.chroma_pitch;
1827 				pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.video.meta_pitch_l;
1828 				pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.video.meta_pitch_c;
1829 			} else {
1830 				pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.grph.surface_pitch;
1831 				pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.grph.meta_pitch;
1832 			}
1833 			pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
1834 			pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
1835 			pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
1836 			pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
1837 			pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
1838 			if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
1839 				pipes[pipe_cnt].pipe.dest.full_recout_width +=
1840 						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
1841 				pipes[pipe_cnt].pipe.dest.full_recout_height +=
1842 						res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
1843 			} else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
1844 				pipes[pipe_cnt].pipe.dest.full_recout_width +=
1845 						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
1846 				pipes[pipe_cnt].pipe.dest.full_recout_height +=
1847 						res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
1848 			}
1849 
1850 			pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
1851 			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
1852 			pipes[pipe_cnt].pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
1853 			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
1854 			pipes[pipe_cnt].pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
1855 			pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable =
1856 					scl->ratios.vert.value != dc_fixpt_one.value
1857 					|| scl->ratios.horz.value != dc_fixpt_one.value
1858 					|| scl->ratios.vert_c.value != dc_fixpt_one.value
1859 					|| scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
1860 					|| dc->debug.always_scale; /*support always scale*/
1861 			pipes[pipe_cnt].pipe.scale_taps.htaps = scl->taps.h_taps;
1862 			pipes[pipe_cnt].pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
1863 			pipes[pipe_cnt].pipe.scale_taps.vtaps = scl->taps.v_taps;
1864 			pipes[pipe_cnt].pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
1865 
1866 			pipes[pipe_cnt].pipe.src.macro_tile_size =
1867 					swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
1868 			swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
1869 					&pipes[pipe_cnt].pipe.src.sw_mode);
1870 
1871 			switch (pln->format) {
1872 			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1873 			case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1874 				pipes[pipe_cnt].pipe.src.source_format = dm_420_8;
1875 				break;
1876 			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1877 			case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1878 				pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
1879 				break;
1880 			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1881 			case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1882 			case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1883 				pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
1884 				break;
1885 			case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1886 			case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1887 				pipes[pipe_cnt].pipe.src.source_format = dm_444_16;
1888 				break;
1889 			case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
1890 				pipes[pipe_cnt].pipe.src.source_format = dm_444_8;
1891 				break;
1892 			default:
1893 				pipes[pipe_cnt].pipe.src.source_format = dm_444_32;
1894 				break;
1895 			}
1896 		}
1897 
1898 		pipe_cnt++;
1899 	}
1900 
1901 	/* populate writeback information */
1902 	dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
1903 
1904 	return pipe_cnt;
1905 }
1906 
1907 unsigned int dcn20_calc_max_scaled_time(
1908 		unsigned int time_per_pixel,
1909 		enum mmhubbub_wbif_mode mode,
1910 		unsigned int urgent_watermark)
1911 {
1912 	unsigned int time_per_byte = 0;
1913 	unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
1914 	unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
1915 	unsigned int small_free_entry, max_free_entry;
1916 	unsigned int buf_lh_capability;
1917 	unsigned int max_scaled_time;
1918 
1919 	if (mode == PACKED_444) /* packed mode */
1920 		time_per_byte = time_per_pixel/4;
1921 	else if (mode == PLANAR_420_8BPC)
1922 		time_per_byte  = time_per_pixel;
1923 	else if (mode == PLANAR_420_10BPC) /* p010 */
1924 		time_per_byte  = time_per_pixel * 819/1024;
1925 
1926 	if (time_per_byte == 0)
1927 		time_per_byte = 1;
1928 
1929 	small_free_entry  = (total_y_free_entry > total_c_free_entry) ? total_c_free_entry : total_y_free_entry;
1930 	max_free_entry    = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
1931 	buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
1932 	max_scaled_time   = buf_lh_capability - urgent_watermark;
1933 	return max_scaled_time;
1934 }
1935 
1936 void dcn20_set_mcif_arb_params(
1937 		struct dc *dc,
1938 		struct dc_state *context,
1939 		display_e2e_pipe_params_st *pipes,
1940 		int pipe_cnt)
1941 {
1942 	enum mmhubbub_wbif_mode wbif_mode;
1943 	struct mcif_arb_params *wb_arb_params;
1944 	int i, j, k, dwb_pipe;
1945 
1946 	/* Writeback MCIF_WB arbitration parameters */
1947 	dwb_pipe = 0;
1948 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1949 
1950 		if (!context->res_ctx.pipe_ctx[i].stream)
1951 			continue;
1952 
1953 		for (j = 0; j < MAX_DWB_PIPES; j++) {
1954 			if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
1955 				continue;
1956 
1957 			//wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
1958 			wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
1959 
1960 			if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
1961 				if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
1962 					wbif_mode = PLANAR_420_8BPC;
1963 				else
1964 					wbif_mode = PLANAR_420_10BPC;
1965 			} else
1966 				wbif_mode = PACKED_444;
1967 
1968 			for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
1969 				wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1970 				wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1971 			}
1972 			wb_arb_params->time_per_pixel = 16.0 / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* 4 bit fraction, ms */
1973 			wb_arb_params->slice_lines = 32;
1974 			wb_arb_params->arbitration_slice = 2;
1975 			wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
1976 				wbif_mode,
1977 				wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
1978 
1979 			dwb_pipe++;
1980 
1981 			if (dwb_pipe >= MAX_DWB_PIPES)
1982 				return;
1983 		}
1984 		if (dwb_pipe >= MAX_DWB_PIPES)
1985 			return;
1986 	}
1987 }
1988 
1989 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1990 static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
1991 {
1992 	int i;
1993 
1994 	/* Validate DSC config, dsc count validation is already done */
1995 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1996 		struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
1997 		struct dc_stream_state *stream = pipe_ctx->stream;
1998 		struct dsc_config dsc_cfg;
1999 
2000 		/* Only need to validate top pipe */
2001 		if (pipe_ctx->top_pipe || !stream || !stream->timing.flags.DSC)
2002 			continue;
2003 
2004 		dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left
2005 				+ stream->timing.h_border_right;
2006 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
2007 				+ stream->timing.v_border_bottom;
2008 		if (dc_res_get_odm_bottom_pipe(pipe_ctx))
2009 			dsc_cfg.pic_width /= 2;
2010 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
2011 		dsc_cfg.color_depth = stream->timing.display_color_depth;
2012 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
2013 
2014 		if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
2015 			return false;
2016 	}
2017 	return true;
2018 }
2019 #endif
2020 
2021 bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
2022 		bool fast_validate)
2023 {
2024 	bool out = false;
2025 
2026 	BW_VAL_TRACE_SETUP();
2027 
2028 	int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit;
2029 	int pipe_split_from[MAX_PIPES];
2030 	bool odm_capable = context->bw_ctx.dml.ip.odm_capable;
2031 	bool force_split = false;
2032 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2033 	bool failed_non_odm_dsc = false;
2034 #endif
2035 	int split_threshold = dc->res_pool->pipe_count / 2;
2036 	bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
2037 	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
2038 	DC_LOGGER_INIT(dc->ctx->logger);
2039 
2040 	BW_VAL_TRACE_COUNT();
2041 
2042 	ASSERT(pipes);
2043 	if (!pipes)
2044 		return false;
2045 
2046 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2047 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2048 		struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2049 
2050 		if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
2051 			continue;
2052 
2053 		/* merge previously split pipe since mode support needs to make the decision */
2054 		pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
2055 		if (hsplit_pipe->bottom_pipe)
2056 			hsplit_pipe->bottom_pipe->top_pipe = pipe;
2057 		hsplit_pipe->plane_state = NULL;
2058 		hsplit_pipe->stream = NULL;
2059 		hsplit_pipe->top_pipe = NULL;
2060 		hsplit_pipe->bottom_pipe = NULL;
2061 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2062 		if (hsplit_pipe->stream_res.dsc && hsplit_pipe->stream_res.dsc != pipe->stream_res.dsc)
2063 			release_dsc(&context->res_ctx, dc->res_pool, &hsplit_pipe->stream_res.dsc);
2064 #endif
2065 		/* Clear plane_res and stream_res */
2066 		memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
2067 		memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
2068 		if (pipe->plane_state)
2069 			resource_build_scaling_params(pipe);
2070 	}
2071 
2072 	if (dc->res_pool->funcs->populate_dml_pipes)
2073 		pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
2074 			&context->res_ctx, pipes);
2075 	else
2076 		pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
2077 			&context->res_ctx, pipes);
2078 
2079 	if (!pipe_cnt) {
2080 		BW_VAL_TRACE_SKIP(pass);
2081 		out = true;
2082 		goto validate_out;
2083 	}
2084 
2085 	context->bw_ctx.dml.ip.odm_capable = 0;
2086 
2087 	vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2088 
2089 	context->bw_ctx.dml.ip.odm_capable = odm_capable;
2090 
2091 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2092 	/* 1 dsc per stream dsc validation */
2093 	if (vlevel <= context->bw_ctx.dml.soc.num_states)
2094 		if (!dcn20_validate_dsc(dc, context)) {
2095 			failed_non_odm_dsc = true;
2096 			vlevel = context->bw_ctx.dml.soc.num_states + 1;
2097 		}
2098 #endif
2099 
2100 	if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable)
2101 		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2102 
2103 	if (vlevel > context->bw_ctx.dml.soc.num_states)
2104 		goto validate_fail;
2105 
2106 	if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold)
2107 		|| (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold))
2108 		context->commit_hints.full_update_needed = true;
2109 
2110 	/*initialize pipe_just_split_from to invalid idx*/
2111 	for (i = 0; i < MAX_PIPES; i++)
2112 		pipe_split_from[i] = -1;
2113 
2114 	/* Single display only conditionals get set here */
2115 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2116 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2117 		bool exit_loop = false;
2118 
2119 		if (!pipe->stream || pipe->top_pipe)
2120 			continue;
2121 
2122 		if (dc->debug.force_single_disp_pipe_split) {
2123 			if (!force_split)
2124 				force_split = true;
2125 			else {
2126 				force_split = false;
2127 				exit_loop = true;
2128 			}
2129 		}
2130 		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
2131 			if (avoid_split)
2132 				avoid_split = false;
2133 			else {
2134 				avoid_split = true;
2135 				exit_loop = true;
2136 			}
2137 		}
2138 		if (exit_loop)
2139 			break;
2140 	}
2141 
2142 	if (context->stream_count > split_threshold)
2143 		avoid_split = true;
2144 
2145 	vlevel_unsplit = vlevel;
2146 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2147 		if (!context->res_ctx.pipe_ctx[i].stream)
2148 			continue;
2149 		for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++)
2150 			if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1)
2151 				break;
2152 		pipe_idx++;
2153 	}
2154 
2155 	for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
2156 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2157 		struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2158 		bool need_split = true;
2159 		bool need_split3d;
2160 
2161 		if (!pipe->stream || pipe_split_from[i] >= 0)
2162 			continue;
2163 
2164 		pipe_idx++;
2165 
2166 		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
2167 			force_split = true;
2168 			context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true;
2169 			context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true;
2170 		}
2171 		if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
2172 			context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
2173 		if (dc->config.forced_clocks == true) {
2174 			context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] =
2175 					context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
2176 		}
2177 		if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2178 			hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
2179 			ASSERT(hsplit_pipe);
2180 			if (!dcn20_split_stream_for_combine(
2181 					&context->res_ctx, dc->res_pool,
2182 					pipe, hsplit_pipe,
2183 					true))
2184 				goto validate_fail;
2185 			pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2186 			dcn20_build_mapped_resource(dc, context, pipe->stream);
2187 		}
2188 
2189 		if (!pipe->plane_state)
2190 			continue;
2191 		/* Skip 2nd half of already split pipe */
2192 		if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
2193 			continue;
2194 
2195 		need_split3d = ((pipe->stream->view_format ==
2196 				VIEW_3D_FORMAT_SIDE_BY_SIDE ||
2197 				pipe->stream->view_format ==
2198 				VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
2199 				(pipe->stream->timing.timing_3d_format ==
2200 				TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
2201 				 pipe->stream->timing.timing_3d_format ==
2202 				TIMING_3D_FORMAT_SIDE_BY_SIDE));
2203 
2204 		if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) {
2205 			need_split = false;
2206 			vlevel = vlevel_unsplit;
2207 			context->bw_ctx.dml.vba.maxMpcComb = 0;
2208 		} else
2209 			need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2;
2210 
2211 		/* We do not support mpo + odm at the moment */
2212 		if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
2213 				&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
2214 			goto validate_fail;
2215 
2216 		if (need_split3d || need_split || force_split) {
2217 			if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
2218 				/* pipe not split previously needs split */
2219 				hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
2220 				ASSERT(hsplit_pipe || force_split);
2221 				if (!hsplit_pipe)
2222 					continue;
2223 
2224 				if (!dcn20_split_stream_for_combine(
2225 						&context->res_ctx, dc->res_pool,
2226 						pipe, hsplit_pipe,
2227 						context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]))
2228 					goto validate_fail;
2229 				pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2230 			}
2231 		} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
2232 			/* merge should already have been done */
2233 			ASSERT(0);
2234 		}
2235 	}
2236 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2237 	/* Actual dsc count per stream dsc validation*/
2238 	if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) {
2239 		context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
2240 				DML_FAIL_DSC_VALIDATION_FAILURE;
2241 		goto validate_fail;
2242 	}
2243 #endif
2244 
2245 	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
2246 
2247 	if (fast_validate) {
2248 		BW_VAL_TRACE_SKIP(fast);
2249 		out = true;
2250 		goto validate_out;
2251 	}
2252 
2253 	for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
2254 		if (!context->res_ctx.pipe_ctx[i].stream)
2255 			continue;
2256 
2257 		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
2258 		pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2259 
2260 		if (pipe_split_from[i] < 0) {
2261 			pipes[pipe_cnt].clks_cfg.dppclk_mhz =
2262 					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx];
2263 			if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx)
2264 				pipes[pipe_cnt].pipe.dest.odm_combine =
2265 						context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
2266 			else
2267 				pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2268 			pipe_idx++;
2269 		} else {
2270 			pipes[pipe_cnt].clks_cfg.dppclk_mhz =
2271 					context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]];
2272 			if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i])
2273 				pipes[pipe_cnt].pipe.dest.odm_combine =
2274 						context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]];
2275 			else
2276 				pipes[pipe_cnt].pipe.dest.odm_combine = 0;
2277 		}
2278 		if (dc->config.forced_clocks) {
2279 			pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
2280 			pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
2281 		}
2282 		pipe_cnt++;
2283 	}
2284 
2285 	if (pipe_cnt != pipe_idx) {
2286 		if (dc->res_pool->funcs->populate_dml_pipes)
2287 			pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc,
2288 				&context->res_ctx, pipes);
2289 		else
2290 			pipe_cnt = dcn20_populate_dml_pipes_from_context(dc,
2291 				&context->res_ctx, pipes);
2292 	}
2293 
2294 	pipes[0].clks_cfg.voltage = vlevel;
2295 	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
2296 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2297 
2298 	/* only pipe 0 is read for voltage and dcf/soc clocks */
2299 	if (vlevel < 1) {
2300 		pipes[0].clks_cfg.voltage = 1;
2301 		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz;
2302 		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz;
2303 	}
2304 	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2305 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2306 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2307 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2308 	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2309 
2310 	if (vlevel < 2) {
2311 		pipes[0].clks_cfg.voltage = 2;
2312 		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
2313 		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
2314 	}
2315 	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2316 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2317 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2318 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2319 	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2320 
2321 	if (vlevel < 3) {
2322 		pipes[0].clks_cfg.voltage = 3;
2323 		pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
2324 		pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz;
2325 	}
2326 	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2327 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2328 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2329 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2330 	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2331 
2332 	pipes[0].clks_cfg.voltage = vlevel;
2333 	pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
2334 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2335 	context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2336 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2337 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2338 	context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2339 	context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2340 	/* Writeback MCIF_WB arbitration parameters */
2341 	dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
2342 
2343 	context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
2344 	context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
2345 	context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
2346 	context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
2347 	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
2348 	context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
2349 	context->bw_ctx.bw.dcn.clk.p_state_change_support =
2350 		context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
2351 							!= dm_dram_clock_change_unsupported;
2352 	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
2353 
2354 	BW_VAL_TRACE_END_WATERMARKS();
2355 
2356 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2357 		if (!context->res_ctx.pipe_ctx[i].stream)
2358 			continue;
2359 		pipes[pipe_idx].pipe.dest.vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx];
2360 		pipes[pipe_idx].pipe.dest.vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx];
2361 		pipes[pipe_idx].pipe.dest.vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx];
2362 		pipes[pipe_idx].pipe.dest.vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx];
2363 		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
2364 			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
2365 		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
2366 						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
2367 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
2368 		context->res_ctx.pipe_ctx[i].stream_res.dscclk_khz =
2369 				context->bw_ctx.dml.vba.DSCCLK_calculated[pipe_idx] * 1000;
2370 #endif
2371 		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
2372 		pipe_idx++;
2373 	}
2374 
2375 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2376 		bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
2377 
2378 		if (!context->res_ctx.pipe_ctx[i].stream)
2379 			continue;
2380 
2381 		context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
2382 				&context->res_ctx.pipe_ctx[i].dlg_regs,
2383 				&context->res_ctx.pipe_ctx[i].ttu_regs,
2384 				pipes,
2385 				pipe_cnt,
2386 				pipe_idx,
2387 				cstate_en,
2388 				context->bw_ctx.bw.dcn.clk.p_state_change_support,
2389 				false, false, false);
2390 
2391 		context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
2392 				&context->res_ctx.pipe_ctx[i].rq_regs,
2393 				pipes[pipe_idx].pipe);
2394 		pipe_idx++;
2395 	}
2396 
2397 	out = true;
2398 	goto validate_out;
2399 
2400 validate_fail:
2401 	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
2402 		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
2403 
2404 	BW_VAL_TRACE_SKIP(fail);
2405 	out = false;
2406 
2407 validate_out:
2408 	kfree(pipes);
2409 
2410 	BW_VAL_TRACE_FINISH();
2411 
2412 	return out;
2413 }
2414 
2415 struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
2416 		struct dc_state *state,
2417 		const struct resource_pool *pool,
2418 		struct dc_stream_state *stream)
2419 {
2420 	struct resource_context *res_ctx = &state->res_ctx;
2421 	struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
2422 	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
2423 
2424 	if (!head_pipe)
2425 		ASSERT(0);
2426 
2427 	if (!idle_pipe)
2428 		return NULL;
2429 
2430 	idle_pipe->stream = head_pipe->stream;
2431 	idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
2432 	idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
2433 
2434 	idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
2435 	idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
2436 	idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
2437 	idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
2438 
2439 	return idle_pipe;
2440 }
2441 
2442 bool dcn20_get_dcc_compression_cap(const struct dc *dc,
2443 		const struct dc_dcc_surface_param *input,
2444 		struct dc_surface_dcc_cap *output)
2445 {
2446 	return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
2447 			dc->res_pool->hubbub,
2448 			input,
2449 			output);
2450 }
2451 
2452 static void dcn20_destroy_resource_pool(struct resource_pool **pool)
2453 {
2454 	struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
2455 
2456 	destruct(dcn20_pool);
2457 	kfree(dcn20_pool);
2458 	*pool = NULL;
2459 }
2460 
2461 
2462 static struct dc_cap_funcs cap_funcs = {
2463 	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
2464 };
2465 
2466 
2467 enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
2468 {
2469 	enum dc_status result = DC_OK;
2470 
2471 	enum surface_pixel_format surf_pix_format = plane_state->format;
2472 	unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
2473 
2474 	enum swizzle_mode_values swizzle = DC_SW_LINEAR;
2475 
2476 	if (bpp == 64)
2477 		swizzle = DC_SW_64KB_D;
2478 	else
2479 		swizzle = DC_SW_64KB_S;
2480 
2481 	plane_state->tiling_info.gfx9.swizzle = swizzle;
2482 	return result;
2483 }
2484 
2485 static struct resource_funcs dcn20_res_pool_funcs = {
2486 	.destroy = dcn20_destroy_resource_pool,
2487 	.link_enc_create = dcn20_link_encoder_create,
2488 	.validate_bandwidth = dcn20_validate_bandwidth,
2489 	.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
2490 	.add_stream_to_ctx = dcn20_add_stream_to_ctx,
2491 	.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
2492 	.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
2493 	.get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
2494 	.set_mcif_arb_params = dcn20_set_mcif_arb_params,
2495 	.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
2496 };
2497 
2498 bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
2499 {
2500 	int i;
2501 	uint32_t pipe_count = pool->res_cap->num_dwb;
2502 
2503 	ASSERT(pipe_count > 0);
2504 
2505 	for (i = 0; i < pipe_count; i++) {
2506 		struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc),
2507 						    GFP_KERNEL);
2508 
2509 		if (!dwbc20) {
2510 			dm_error("DC: failed to create dwbc20!\n");
2511 			return false;
2512 		}
2513 		dcn20_dwbc_construct(dwbc20, ctx,
2514 				&dwbc20_regs[i],
2515 				&dwbc20_shift,
2516 				&dwbc20_mask,
2517 				i);
2518 		pool->dwbc[i] = &dwbc20->base;
2519 	}
2520 	return true;
2521 }
2522 
2523 bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
2524 {
2525 	int i;
2526 	uint32_t pipe_count = pool->res_cap->num_dwb;
2527 
2528 	ASSERT(pipe_count > 0);
2529 
2530 	for (i = 0; i < pipe_count; i++) {
2531 		struct dcn20_mmhubbub *mcif_wb20 = kzalloc(sizeof(struct dcn20_mmhubbub),
2532 						    GFP_KERNEL);
2533 
2534 		if (!mcif_wb20) {
2535 			dm_error("DC: failed to create mcif_wb20!\n");
2536 			return false;
2537 		}
2538 
2539 		dcn20_mmhubbub_construct(mcif_wb20, ctx,
2540 				&mcif_wb20_regs[i],
2541 				&mcif_wb20_shift,
2542 				&mcif_wb20_mask,
2543 				i);
2544 
2545 		pool->mcif_wb[i] = &mcif_wb20->base;
2546 	}
2547 	return true;
2548 }
2549 
2550 struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
2551 {
2552 	struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
2553 
2554 	if (!pp_smu)
2555 		return pp_smu;
2556 
2557 	dm_pp_get_funcs(ctx, pp_smu);
2558 
2559 	if (pp_smu->ctx.ver != PP_SMU_VER_NV)
2560 		pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
2561 
2562 	return pp_smu;
2563 }
2564 
2565 void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
2566 {
2567 	if (pp_smu && *pp_smu) {
2568 		kfree(*pp_smu);
2569 		*pp_smu = NULL;
2570 	}
2571 }
2572 
2573 static void cap_soc_clocks(
2574 		struct _vcs_dpi_soc_bounding_box_st *bb,
2575 		struct pp_smu_nv_clock_table max_clocks)
2576 {
2577 	int i;
2578 
2579 	// First pass - cap all clocks higher than the reported max
2580 	for (i = 0; i < bb->num_states; i++) {
2581 		if ((bb->clock_limits[i].dcfclk_mhz > (max_clocks.dcfClockInKhz / 1000))
2582 				&& max_clocks.dcfClockInKhz != 0)
2583 			bb->clock_limits[i].dcfclk_mhz = (max_clocks.dcfClockInKhz / 1000);
2584 
2585 		if ((bb->clock_limits[i].dram_speed_mts > (max_clocks.uClockInKhz / 1000) * 16)
2586 						&& max_clocks.uClockInKhz != 0)
2587 			bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
2588 
2589 		// HACK: Force every uclk to max for now to "disable" uclk switching.
2590 		bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16;
2591 
2592 		if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000))
2593 						&& max_clocks.fabricClockInKhz != 0)
2594 			bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000);
2595 
2596 		if ((bb->clock_limits[i].dispclk_mhz > (max_clocks.displayClockInKhz / 1000))
2597 						&& max_clocks.displayClockInKhz != 0)
2598 			bb->clock_limits[i].dispclk_mhz = (max_clocks.displayClockInKhz / 1000);
2599 
2600 		if ((bb->clock_limits[i].dppclk_mhz > (max_clocks.dppClockInKhz / 1000))
2601 						&& max_clocks.dppClockInKhz != 0)
2602 			bb->clock_limits[i].dppclk_mhz = (max_clocks.dppClockInKhz / 1000);
2603 
2604 		if ((bb->clock_limits[i].phyclk_mhz > (max_clocks.phyClockInKhz / 1000))
2605 						&& max_clocks.phyClockInKhz != 0)
2606 			bb->clock_limits[i].phyclk_mhz = (max_clocks.phyClockInKhz / 1000);
2607 
2608 		if ((bb->clock_limits[i].socclk_mhz > (max_clocks.socClockInKhz / 1000))
2609 						&& max_clocks.socClockInKhz != 0)
2610 			bb->clock_limits[i].socclk_mhz = (max_clocks.socClockInKhz / 1000);
2611 
2612 		if ((bb->clock_limits[i].dscclk_mhz > (max_clocks.dscClockInKhz / 1000))
2613 						&& max_clocks.dscClockInKhz != 0)
2614 			bb->clock_limits[i].dscclk_mhz = (max_clocks.dscClockInKhz / 1000);
2615 	}
2616 
2617 	// Second pass - remove all duplicate clock states
2618 	for (i = bb->num_states - 1; i > 1; i--) {
2619 		bool duplicate = true;
2620 
2621 		if (bb->clock_limits[i-1].dcfclk_mhz != bb->clock_limits[i].dcfclk_mhz)
2622 			duplicate = false;
2623 		if (bb->clock_limits[i-1].dispclk_mhz != bb->clock_limits[i].dispclk_mhz)
2624 			duplicate = false;
2625 		if (bb->clock_limits[i-1].dppclk_mhz != bb->clock_limits[i].dppclk_mhz)
2626 			duplicate = false;
2627 		if (bb->clock_limits[i-1].dram_speed_mts != bb->clock_limits[i].dram_speed_mts)
2628 			duplicate = false;
2629 		if (bb->clock_limits[i-1].dscclk_mhz != bb->clock_limits[i].dscclk_mhz)
2630 			duplicate = false;
2631 		if (bb->clock_limits[i-1].fabricclk_mhz != bb->clock_limits[i].fabricclk_mhz)
2632 			duplicate = false;
2633 		if (bb->clock_limits[i-1].phyclk_mhz != bb->clock_limits[i].phyclk_mhz)
2634 			duplicate = false;
2635 		if (bb->clock_limits[i-1].socclk_mhz != bb->clock_limits[i].socclk_mhz)
2636 			duplicate = false;
2637 
2638 		if (duplicate)
2639 			bb->num_states--;
2640 	}
2641 }
2642 
2643 static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
2644 		struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
2645 {
2646 	struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0};
2647 	int i;
2648 	int num_calculated_states = 0;
2649 	int min_dcfclk = 0;
2650 
2651 	if (num_states == 0)
2652 		return;
2653 
2654 	if (dc->bb_overrides.min_dcfclk_mhz > 0)
2655 		min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
2656 
2657 	for (i = 0; i < num_states; i++) {
2658 		int min_fclk_required_by_uclk;
2659 		calculated_states[i].state = i;
2660 		calculated_states[i].dram_speed_mts = uclk_states[i] * 16 / 1000;
2661 
2662 		// FCLK:UCLK ratio is 1.08
2663 		min_fclk_required_by_uclk = mul_u64_u32_shr(BIT_ULL(32) * 1080 / 1000000, uclk_states[i], 32);
2664 
2665 		calculated_states[i].fabricclk_mhz = (min_fclk_required_by_uclk < min_dcfclk) ?
2666 				min_dcfclk : min_fclk_required_by_uclk;
2667 
2668 		calculated_states[i].socclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->socClockInKhz / 1000) ?
2669 				max_clocks->socClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
2670 
2671 		calculated_states[i].dcfclk_mhz = (calculated_states[i].fabricclk_mhz > max_clocks->dcfClockInKhz / 1000) ?
2672 				max_clocks->dcfClockInKhz / 1000 : calculated_states[i].fabricclk_mhz;
2673 
2674 		calculated_states[i].dispclk_mhz = max_clocks->displayClockInKhz / 1000;
2675 		calculated_states[i].dppclk_mhz = max_clocks->displayClockInKhz / 1000;
2676 		calculated_states[i].dscclk_mhz = max_clocks->displayClockInKhz / (1000 * 3);
2677 
2678 		calculated_states[i].phyclk_mhz = max_clocks->phyClockInKhz / 1000;
2679 
2680 		num_calculated_states++;
2681 	}
2682 
2683 	memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits));
2684 	bb->num_states = num_calculated_states;
2685 
2686 	// Duplicate the last state, DML always an extra state identical to max state to work
2687 	memcpy(&bb->clock_limits[num_calculated_states], &bb->clock_limits[num_calculated_states - 1], sizeof(struct _vcs_dpi_voltage_scaling_st));
2688 	bb->clock_limits[num_calculated_states].state = bb->num_states;
2689 }
2690 
2691 static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
2692 {
2693 	kernel_fpu_begin();
2694 	if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
2695 			&& dc->bb_overrides.sr_exit_time_ns) {
2696 		bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
2697 	}
2698 
2699 	if ((int)(bb->sr_enter_plus_exit_time_us * 1000)
2700 				!= dc->bb_overrides.sr_enter_plus_exit_time_ns
2701 			&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
2702 		bb->sr_enter_plus_exit_time_us =
2703 				dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
2704 	}
2705 
2706 	if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
2707 			&& dc->bb_overrides.urgent_latency_ns) {
2708 		bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
2709 	}
2710 
2711 	if ((int)(bb->dram_clock_change_latency_us * 1000)
2712 				!= dc->bb_overrides.dram_clock_change_latency_ns
2713 			&& dc->bb_overrides.dram_clock_change_latency_ns) {
2714 		bb->dram_clock_change_latency_us =
2715 				dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
2716 	}
2717 	kernel_fpu_end();
2718 }
2719 
2720 #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
2721 #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
2722 
2723 static bool init_soc_bounding_box(struct dc *dc,
2724 				  struct dcn20_resource_pool *pool)
2725 {
2726 	const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box;
2727 	DC_LOGGER_INIT(dc->ctx->logger);
2728 
2729 	if (!bb && !SOC_BOUNDING_BOX_VALID) {
2730 		DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__);
2731 		return false;
2732 	}
2733 
2734 	if (bb && !SOC_BOUNDING_BOX_VALID) {
2735 		int i;
2736 
2737 		dcn2_0_soc.sr_exit_time_us =
2738 				fixed16_to_double_to_cpu(bb->sr_exit_time_us);
2739 		dcn2_0_soc.sr_enter_plus_exit_time_us =
2740 				fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us);
2741 		dcn2_0_soc.urgent_latency_us =
2742 				fixed16_to_double_to_cpu(bb->urgent_latency_us);
2743 		dcn2_0_soc.urgent_latency_pixel_data_only_us =
2744 				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us);
2745 		dcn2_0_soc.urgent_latency_pixel_mixed_with_vm_data_us =
2746 				fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us);
2747 		dcn2_0_soc.urgent_latency_vm_data_only_us =
2748 				fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us);
2749 		dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes =
2750 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes);
2751 		dcn2_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes =
2752 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes);
2753 		dcn2_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes =
2754 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes);
2755 		dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only =
2756 				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only);
2757 		dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm =
2758 				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm);
2759 		dcn2_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only =
2760 				fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only);
2761 		dcn2_0_soc.max_avg_sdp_bw_use_normal_percent =
2762 				fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent);
2763 		dcn2_0_soc.max_avg_dram_bw_use_normal_percent =
2764 				fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent);
2765 		dcn2_0_soc.writeback_latency_us =
2766 				fixed16_to_double_to_cpu(bb->writeback_latency_us);
2767 		dcn2_0_soc.ideal_dram_bw_after_urgent_percent =
2768 				fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent);
2769 		dcn2_0_soc.max_request_size_bytes =
2770 				le32_to_cpu(bb->max_request_size_bytes);
2771 		dcn2_0_soc.dram_channel_width_bytes =
2772 				le32_to_cpu(bb->dram_channel_width_bytes);
2773 		dcn2_0_soc.fabric_datapath_to_dcn_data_return_bytes =
2774 				le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes);
2775 		dcn2_0_soc.dcn_downspread_percent =
2776 				fixed16_to_double_to_cpu(bb->dcn_downspread_percent);
2777 		dcn2_0_soc.downspread_percent =
2778 				fixed16_to_double_to_cpu(bb->downspread_percent);
2779 		dcn2_0_soc.dram_page_open_time_ns =
2780 				fixed16_to_double_to_cpu(bb->dram_page_open_time_ns);
2781 		dcn2_0_soc.dram_rw_turnaround_time_ns =
2782 				fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns);
2783 		dcn2_0_soc.dram_return_buffer_per_channel_bytes =
2784 				le32_to_cpu(bb->dram_return_buffer_per_channel_bytes);
2785 		dcn2_0_soc.round_trip_ping_latency_dcfclk_cycles =
2786 				le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles);
2787 		dcn2_0_soc.urgent_out_of_order_return_per_channel_bytes =
2788 				le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes);
2789 		dcn2_0_soc.channel_interleave_bytes =
2790 				le32_to_cpu(bb->channel_interleave_bytes);
2791 		dcn2_0_soc.num_banks =
2792 				le32_to_cpu(bb->num_banks);
2793 		dcn2_0_soc.num_chans =
2794 				le32_to_cpu(bb->num_chans);
2795 		dcn2_0_soc.vmm_page_size_bytes =
2796 				le32_to_cpu(bb->vmm_page_size_bytes);
2797 		dcn2_0_soc.dram_clock_change_latency_us =
2798 				fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us);
2799 		// HACK!! Lower uclock latency switch time so we don't switch
2800 		dcn2_0_soc.dram_clock_change_latency_us = 10;
2801 		dcn2_0_soc.writeback_dram_clock_change_latency_us =
2802 				fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us);
2803 		dcn2_0_soc.return_bus_width_bytes =
2804 				le32_to_cpu(bb->return_bus_width_bytes);
2805 		dcn2_0_soc.dispclk_dppclk_vco_speed_mhz =
2806 				le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz);
2807 		dcn2_0_soc.xfc_bus_transport_time_us =
2808 				le32_to_cpu(bb->xfc_bus_transport_time_us);
2809 		dcn2_0_soc.xfc_xbuf_latency_tolerance_us =
2810 				le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us);
2811 		dcn2_0_soc.use_urgent_burst_bw =
2812 				le32_to_cpu(bb->use_urgent_burst_bw);
2813 		dcn2_0_soc.num_states =
2814 				le32_to_cpu(bb->num_states);
2815 
2816 		for (i = 0; i < dcn2_0_soc.num_states; i++) {
2817 			dcn2_0_soc.clock_limits[i].state =
2818 					le32_to_cpu(bb->clock_limits[i].state);
2819 			dcn2_0_soc.clock_limits[i].dcfclk_mhz =
2820 					fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz);
2821 			dcn2_0_soc.clock_limits[i].fabricclk_mhz =
2822 					fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz);
2823 			dcn2_0_soc.clock_limits[i].dispclk_mhz =
2824 					fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz);
2825 			dcn2_0_soc.clock_limits[i].dppclk_mhz =
2826 					fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz);
2827 			dcn2_0_soc.clock_limits[i].phyclk_mhz =
2828 					fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz);
2829 			dcn2_0_soc.clock_limits[i].socclk_mhz =
2830 					fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz);
2831 			dcn2_0_soc.clock_limits[i].dscclk_mhz =
2832 					fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz);
2833 			dcn2_0_soc.clock_limits[i].dram_speed_mts =
2834 					fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts);
2835 		}
2836 	}
2837 
2838 	if (pool->base.pp_smu) {
2839 		struct pp_smu_nv_clock_table max_clocks = {0};
2840 		unsigned int uclk_states[8] = {0};
2841 		unsigned int num_states = 0;
2842 		int i;
2843 		enum pp_smu_status status;
2844 		bool clock_limits_available = false;
2845 		bool uclk_states_available = false;
2846 
2847 		if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
2848 			status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
2849 				(&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
2850 
2851 			uclk_states_available = (status == PP_SMU_RESULT_OK);
2852 		}
2853 
2854 		if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
2855 			status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
2856 					(&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
2857 			/* SMU cannot set DCF clock to anything equal to or higher than SOC clock
2858 			 */
2859 			if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
2860 				max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
2861 			clock_limits_available = (status == PP_SMU_RESULT_OK);
2862 		}
2863 
2864 		// HACK: Use the max uclk_states value for all elements.
2865 		for (i = 0; i < num_states; i++)
2866 			uclk_states[i] = uclk_states[num_states - 1];
2867 
2868 		if (clock_limits_available && uclk_states_available && num_states)
2869 			update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states);
2870 		else if (clock_limits_available)
2871 			cap_soc_clocks(&dcn2_0_soc, max_clocks);
2872 	}
2873 
2874 	dcn2_0_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
2875 	dcn2_0_ip.max_num_dpp = pool->base.pipe_count;
2876 	patch_bounding_box(dc, &dcn2_0_soc);
2877 
2878 	return true;
2879 }
2880 
2881 static bool construct(
2882 	uint8_t num_virtual_links,
2883 	struct dc *dc,
2884 	struct dcn20_resource_pool *pool)
2885 {
2886 	int i;
2887 	struct dc_context *ctx = dc->ctx;
2888 	struct irq_service_init_data init_data;
2889 
2890 	ctx->dc_bios->regs = &bios_regs;
2891 	pool->base.funcs = &dcn20_res_pool_funcs;
2892 
2893 	if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
2894 		pool->base.res_cap = &res_cap_nv14;
2895 		pool->base.pipe_count = 5;
2896 		pool->base.mpcc_count = 5;
2897 	} else {
2898 		pool->base.res_cap = &res_cap_nv10;
2899 		pool->base.pipe_count = 6;
2900 		pool->base.mpcc_count = 6;
2901 	}
2902 	/*************************************************
2903 	 *  Resource + asic cap harcoding                *
2904 	 *************************************************/
2905 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
2906 
2907 	dc->caps.max_downscale_ratio = 200;
2908 	dc->caps.i2c_speed_in_khz = 100;
2909 	dc->caps.max_cursor_size = 256;
2910 	dc->caps.dmdata_alloc_size = 2048;
2911 
2912 	dc->caps.max_slave_planes = 1;
2913 	dc->caps.post_blend_color_processing = true;
2914 	dc->caps.force_dp_tps4_for_cp2520 = true;
2915 	dc->caps.hw_3d_lut = true;
2916 
2917 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
2918 		dc->debug = debug_defaults_drv;
2919 	} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
2920 		pool->base.pipe_count = 4;
2921 		pool->base.mpcc_count = pool->base.pipe_count;
2922 		dc->debug = debug_defaults_diags;
2923 	} else {
2924 		dc->debug = debug_defaults_diags;
2925 	}
2926 	//dcn2.0x
2927 	dc->work_arounds.dedcn20_305_wa = true;
2928 
2929 	// Init the vm_helper
2930 	if (dc->vm_helper)
2931 		vm_helper_init(dc->vm_helper, 16);
2932 
2933 	/*************************************************
2934 	 *  Create resources                             *
2935 	 *************************************************/
2936 
2937 	pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
2938 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2939 				CLOCK_SOURCE_COMBO_PHY_PLL0,
2940 				&clk_src_regs[0], false);
2941 	pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
2942 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2943 				CLOCK_SOURCE_COMBO_PHY_PLL1,
2944 				&clk_src_regs[1], false);
2945 	pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
2946 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2947 				CLOCK_SOURCE_COMBO_PHY_PLL2,
2948 				&clk_src_regs[2], false);
2949 	pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
2950 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2951 				CLOCK_SOURCE_COMBO_PHY_PLL3,
2952 				&clk_src_regs[3], false);
2953 	pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
2954 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2955 				CLOCK_SOURCE_COMBO_PHY_PLL4,
2956 				&clk_src_regs[4], false);
2957 	pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
2958 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2959 				CLOCK_SOURCE_COMBO_PHY_PLL5,
2960 				&clk_src_regs[5], false);
2961 	pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
2962 	/* todo: not reuse phy_pll registers */
2963 	pool->base.dp_clock_source =
2964 			dcn20_clock_source_create(ctx, ctx->dc_bios,
2965 				CLOCK_SOURCE_ID_DP_DTO,
2966 				&clk_src_regs[0], true);
2967 
2968 	for (i = 0; i < pool->base.clk_src_count; i++) {
2969 		if (pool->base.clock_sources[i] == NULL) {
2970 			dm_error("DC: failed to create clock sources!\n");
2971 			BREAK_TO_DEBUGGER();
2972 			goto create_fail;
2973 		}
2974 	}
2975 
2976 	pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
2977 	if (pool->base.dccg == NULL) {
2978 		dm_error("DC: failed to create dccg!\n");
2979 		BREAK_TO_DEBUGGER();
2980 		goto create_fail;
2981 	}
2982 
2983 	pool->base.dmcu = dcn20_dmcu_create(ctx,
2984 			&dmcu_regs,
2985 			&dmcu_shift,
2986 			&dmcu_mask);
2987 	if (pool->base.dmcu == NULL) {
2988 		dm_error("DC: failed to create dmcu!\n");
2989 		BREAK_TO_DEBUGGER();
2990 		goto create_fail;
2991 	}
2992 
2993 	pool->base.abm = dce_abm_create(ctx,
2994 			&abm_regs,
2995 			&abm_shift,
2996 			&abm_mask);
2997 	if (pool->base.abm == NULL) {
2998 		dm_error("DC: failed to create abm!\n");
2999 		BREAK_TO_DEBUGGER();
3000 		goto create_fail;
3001 	}
3002 
3003 	pool->base.pp_smu = dcn20_pp_smu_create(ctx);
3004 
3005 
3006 	if (!init_soc_bounding_box(dc, pool)) {
3007 		dm_error("DC: failed to initialize soc bounding box!\n");
3008 		BREAK_TO_DEBUGGER();
3009 		goto create_fail;
3010 	}
3011 
3012 	dml_init_instance(&dc->dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10);
3013 
3014 	if (!dc->debug.disable_pplib_wm_range) {
3015 		struct pp_smu_wm_range_sets ranges = {0};
3016 		int i = 0;
3017 
3018 		ranges.num_reader_wm_sets = 0;
3019 
3020 		if (dcn2_0_soc.num_states == 1) {
3021 			ranges.reader_wm_sets[0].wm_inst = i;
3022 			ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3023 			ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3024 			ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3025 			ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3026 
3027 			ranges.num_reader_wm_sets = 1;
3028 		} else if (dcn2_0_soc.num_states > 1) {
3029 			for (i = 0; i < 4 && i < dcn2_0_soc.num_states; i++) {
3030 				ranges.reader_wm_sets[i].wm_inst = i;
3031 				ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3032 				ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3033 				ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (dcn2_0_soc.clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
3034 				ranges.reader_wm_sets[i].max_fill_clk_mhz = dcn2_0_soc.clock_limits[i].dram_speed_mts / 16;
3035 
3036 				ranges.num_reader_wm_sets = i + 1;
3037 			}
3038 
3039 			ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3040 			ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3041 		}
3042 
3043 		ranges.num_writer_wm_sets = 1;
3044 
3045 		ranges.writer_wm_sets[0].wm_inst = 0;
3046 		ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3047 		ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3048 		ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
3049 		ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
3050 
3051 		/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
3052 		if (pool->base.pp_smu->nv_funcs.set_wm_ranges)
3053 			pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
3054 	}
3055 
3056 	init_data.ctx = dc->ctx;
3057 	pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
3058 	if (!pool->base.irqs)
3059 		goto create_fail;
3060 
3061 	/* mem input -> ipp -> dpp -> opp -> TG */
3062 	for (i = 0; i < pool->base.pipe_count; i++) {
3063 		pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
3064 		if (pool->base.hubps[i] == NULL) {
3065 			BREAK_TO_DEBUGGER();
3066 			dm_error(
3067 				"DC: failed to create memory input!\n");
3068 			goto create_fail;
3069 		}
3070 
3071 		pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
3072 		if (pool->base.ipps[i] == NULL) {
3073 			BREAK_TO_DEBUGGER();
3074 			dm_error(
3075 				"DC: failed to create input pixel processor!\n");
3076 			goto create_fail;
3077 		}
3078 
3079 		pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
3080 		if (pool->base.dpps[i] == NULL) {
3081 			BREAK_TO_DEBUGGER();
3082 			dm_error(
3083 				"DC: failed to create dpps!\n");
3084 			goto create_fail;
3085 		}
3086 	}
3087 	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
3088 		pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
3089 		if (pool->base.engines[i] == NULL) {
3090 			BREAK_TO_DEBUGGER();
3091 			dm_error(
3092 				"DC:failed to create aux engine!!\n");
3093 			goto create_fail;
3094 		}
3095 		pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
3096 		if (pool->base.hw_i2cs[i] == NULL) {
3097 			BREAK_TO_DEBUGGER();
3098 			dm_error(
3099 				"DC:failed to create hw i2c!!\n");
3100 			goto create_fail;
3101 		}
3102 		pool->base.sw_i2cs[i] = NULL;
3103 	}
3104 
3105 	for (i = 0; i < pool->base.res_cap->num_opp; i++) {
3106 		pool->base.opps[i] = dcn20_opp_create(ctx, i);
3107 		if (pool->base.opps[i] == NULL) {
3108 			BREAK_TO_DEBUGGER();
3109 			dm_error(
3110 				"DC: failed to create output pixel processor!\n");
3111 			goto create_fail;
3112 		}
3113 	}
3114 
3115 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
3116 		pool->base.timing_generators[i] = dcn20_timing_generator_create(
3117 				ctx, i);
3118 		if (pool->base.timing_generators[i] == NULL) {
3119 			BREAK_TO_DEBUGGER();
3120 			dm_error("DC: failed to create tg!\n");
3121 			goto create_fail;
3122 		}
3123 	}
3124 
3125 	pool->base.timing_generator_count = i;
3126 
3127 	pool->base.mpc = dcn20_mpc_create(ctx);
3128 	if (pool->base.mpc == NULL) {
3129 		BREAK_TO_DEBUGGER();
3130 		dm_error("DC: failed to create mpc!\n");
3131 		goto create_fail;
3132 	}
3133 
3134 	pool->base.hubbub = dcn20_hubbub_create(ctx);
3135 	if (pool->base.hubbub == NULL) {
3136 		BREAK_TO_DEBUGGER();
3137 		dm_error("DC: failed to create hubbub!\n");
3138 		goto create_fail;
3139 	}
3140 
3141 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3142 	for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
3143 		pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
3144 		if (pool->base.dscs[i] == NULL) {
3145 			BREAK_TO_DEBUGGER();
3146 			dm_error("DC: failed to create display stream compressor %d!\n", i);
3147 			goto create_fail;
3148 		}
3149 	}
3150 #endif
3151 
3152 	if (!dcn20_dwbc_create(ctx, &pool->base)) {
3153 		BREAK_TO_DEBUGGER();
3154 		dm_error("DC: failed to create dwbc!\n");
3155 		goto create_fail;
3156 	}
3157 	if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
3158 		BREAK_TO_DEBUGGER();
3159 		dm_error("DC: failed to create mcif_wb!\n");
3160 		goto create_fail;
3161 	}
3162 
3163 	if (!resource_construct(num_virtual_links, dc, &pool->base,
3164 			(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
3165 			&res_create_funcs : &res_create_maximus_funcs)))
3166 			goto create_fail;
3167 
3168 	dcn20_hw_sequencer_construct(dc);
3169 
3170 	dc->caps.max_planes =  pool->base.pipe_count;
3171 
3172 	for (i = 0; i < dc->caps.max_planes; ++i)
3173 		dc->caps.planes[i] = plane_cap;
3174 
3175 	dc->cap_funcs = cap_funcs;
3176 
3177 	return true;
3178 
3179 create_fail:
3180 
3181 	destruct(pool);
3182 
3183 	return false;
3184 }
3185 
3186 struct resource_pool *dcn20_create_resource_pool(
3187 		const struct dc_init_data *init_data,
3188 		struct dc *dc)
3189 {
3190 	struct dcn20_resource_pool *pool =
3191 		kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
3192 
3193 	if (!pool)
3194 		return NULL;
3195 
3196 	if (construct(init_data->num_virtual_links, dc, pool))
3197 		return &pool->base;
3198 
3199 	BREAK_TO_DEBUGGER();
3200 	kfree(pool);
3201 	return NULL;
3202 }
3203