1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "dc.h"
28 
29 #include "resource.h"
30 #include "include/irq_service_interface.h"
31 #include "dcn10/dcn10_resource.h"
32 
33 #include "dcn10/dcn10_ipp.h"
34 #include "dcn10/dcn10_mpc.h"
35 #include "irq/dcn10/irq_service_dcn10.h"
36 #include "dcn10/dcn10_dpp.h"
37 #include "dcn10/dcn10_timing_generator.h"
38 #include "dcn10/dcn10_hw_sequencer.h"
39 #include "dce110/dce110_hw_sequencer.h"
40 #include "dcn10/dcn10_opp.h"
41 #include "dce/dce_link_encoder.h"
42 #include "dce/dce_stream_encoder.h"
43 #include "dce/dce_clocks.h"
44 #include "dce/dce_clock_source.h"
45 #include "dcn10/dcn10_mem_input.h"
46 #include "dce/dce_audio.h"
47 #include "dce/dce_hwseq.h"
48 #include "../virtual/virtual_stream_encoder.h"
49 #include "dce110/dce110_resource.h"
50 #include "dce112/dce112_resource.h"
51 
52 #include "vega10/soc15ip.h"
53 
54 #include "raven1/DCN/dcn_1_0_offset.h"
55 #include "raven1/DCN/dcn_1_0_sh_mask.h"
56 
57 #include "raven1/NBIO/nbio_7_0_offset.h"
58 
59 #include "raven1/MMHUB/mmhub_9_1_offset.h"
60 #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
61 
62 #include "reg_helper.h"
63 #include "dce/dce_abm.h"
64 #include "dce/dce_dmcu.h"
65 
66 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
67 	#define mmDP0_DP_DPHY_INTERNAL_CTRL		0x210f
68 	#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
69 	#define mmDP1_DP_DPHY_INTERNAL_CTRL		0x220f
70 	#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
71 	#define mmDP2_DP_DPHY_INTERNAL_CTRL		0x230f
72 	#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
73 	#define mmDP3_DP_DPHY_INTERNAL_CTRL		0x240f
74 	#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
75 	#define mmDP4_DP_DPHY_INTERNAL_CTRL		0x250f
76 	#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
77 	#define mmDP5_DP_DPHY_INTERNAL_CTRL		0x260f
78 	#define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
79 	#define mmDP6_DP_DPHY_INTERNAL_CTRL		0x270f
80 	#define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX	2
81 #endif
82 
83 
84 enum dcn10_clk_src_array_id {
85 	DCN10_CLK_SRC_PLL0,
86 	DCN10_CLK_SRC_PLL1,
87 	DCN10_CLK_SRC_PLL2,
88 	DCN10_CLK_SRC_PLL3,
89 	DCN10_CLK_SRC_TOTAL
90 };
91 
92 /* begin *********************
93  * macros to expend register list macro defined in HW object header file */
94 
95 /* DCN */
96 #define BASE_INNER(seg) \
97 	DCE_BASE__INST0_SEG ## seg
98 
99 #define BASE(seg) \
100 	BASE_INNER(seg)
101 
102 #define SR(reg_name)\
103 		.reg_name = BASE(mm ## reg_name ## _BASE_IDX) +  \
104 					mm ## reg_name
105 
106 #define SRI(reg_name, block, id)\
107 	.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
108 					mm ## block ## id ## _ ## reg_name
109 
110 
111 #define SRII(reg_name, block, id)\
112 	.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
113 					mm ## block ## id ## _ ## reg_name
114 
115 /* NBIO */
116 #define NBIO_BASE_INNER(seg) \
117 	NBIF_BASE__INST0_SEG ## seg
118 
119 #define NBIO_BASE(seg) \
120 	NBIO_BASE_INNER(seg)
121 
122 #define NBIO_SR(reg_name)\
123 		.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) +  \
124 					mm ## reg_name
125 
126 /* MMHUB */
127 #define MMHUB_BASE_INNER(seg) \
128 	MMHUB_BASE__INST0_SEG ## seg
129 
130 #define MMHUB_BASE(seg) \
131 	MMHUB_BASE_INNER(seg)
132 
133 #define MMHUB_SR(reg_name)\
134 		.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) +  \
135 					mm ## reg_name
136 
137 /* macros to expend register list macro defined in HW object header file
138  * end *********************/
139 
140 
141 static const struct dce_dmcu_registers dmcu_regs = {
142 		DMCU_DCN10_REG_LIST()
143 };
144 
145 static const struct dce_dmcu_shift dmcu_shift = {
146 		DMCU_MASK_SH_LIST_DCN10(__SHIFT)
147 };
148 
149 static const struct dce_dmcu_mask dmcu_mask = {
150 		DMCU_MASK_SH_LIST_DCN10(_MASK)
151 };
152 
153 static const struct dce_abm_registers abm_regs = {
154 		ABM_DCN10_REG_LIST(0)
155 };
156 
157 static const struct dce_abm_shift abm_shift = {
158 		ABM_MASK_SH_LIST_DCN10(__SHIFT)
159 };
160 
161 static const struct dce_abm_mask abm_mask = {
162 		ABM_MASK_SH_LIST_DCN10(_MASK)
163 };
164 
165 #define stream_enc_regs(id)\
166 [id] = {\
167 	SE_DCN_REG_LIST(id),\
168 	.TMDS_CNTL = 0,\
169 	.AFMT_AVI_INFO0 = 0,\
170 	.AFMT_AVI_INFO1 = 0,\
171 	.AFMT_AVI_INFO2 = 0,\
172 	.AFMT_AVI_INFO3 = 0,\
173 }
174 
175 static const struct dce110_stream_enc_registers stream_enc_regs[] = {
176 	stream_enc_regs(0),
177 	stream_enc_regs(1),
178 	stream_enc_regs(2),
179 	stream_enc_regs(3),
180 };
181 
182 static const struct dce_stream_encoder_shift se_shift = {
183 		SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
184 };
185 
186 static const struct dce_stream_encoder_mask se_mask = {
187 		SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
188 		.AFMT_GENERIC0_UPDATE = 0,
189 		.AFMT_GENERIC2_UPDATE = 0,
190 		.DP_DYN_RANGE = 0,
191 		.DP_YCBCR_RANGE = 0,
192 		.HDMI_AVI_INFO_SEND = 0,
193 		.HDMI_AVI_INFO_CONT = 0,
194 		.HDMI_AVI_INFO_LINE = 0,
195 		.DP_SEC_AVI_ENABLE = 0,
196 		.AFMT_AVI_INFO_VERSION = 0
197 };
198 
199 #define audio_regs(id)\
200 [id] = {\
201 		AUD_COMMON_REG_LIST(id)\
202 }
203 
204 static const struct dce_audio_registers audio_regs[] = {
205 	audio_regs(0),
206 	audio_regs(1),
207 	audio_regs(2),
208 	audio_regs(3),
209 };
210 
211 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
212 		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
213 		SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
214 		AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
215 
216 static const struct dce_audio_shift audio_shift = {
217 		DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
218 };
219 
220 static const struct dce_aduio_mask audio_mask = {
221 		DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
222 };
223 
224 #define aux_regs(id)\
225 [id] = {\
226 	AUX_REG_LIST(id)\
227 }
228 
229 static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
230 		aux_regs(0),
231 		aux_regs(1),
232 		aux_regs(2),
233 		aux_regs(3),
234 		aux_regs(4),
235 		aux_regs(5)
236 };
237 
238 #define hpd_regs(id)\
239 [id] = {\
240 	HPD_REG_LIST(id)\
241 }
242 
243 static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
244 		hpd_regs(0),
245 		hpd_regs(1),
246 		hpd_regs(2),
247 		hpd_regs(3),
248 		hpd_regs(4),
249 		hpd_regs(5)
250 };
251 
252 #define link_regs(id)\
253 [id] = {\
254 	LE_DCN10_REG_LIST(id), \
255 	SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
256 }
257 
258 static const struct dce110_link_enc_registers link_enc_regs[] = {
259 	link_regs(0),
260 	link_regs(1),
261 	link_regs(2),
262 	link_regs(3),
263 	link_regs(4),
264 	link_regs(5),
265 	link_regs(6),
266 };
267 
268 #define ipp_regs(id)\
269 [id] = {\
270 	IPP_REG_LIST_DCN10(id),\
271 }
272 
273 static const struct dcn10_ipp_registers ipp_regs[] = {
274 	ipp_regs(0),
275 	ipp_regs(1),
276 	ipp_regs(2),
277 	ipp_regs(3),
278 };
279 
280 static const struct dcn10_ipp_shift ipp_shift = {
281 		IPP_MASK_SH_LIST_DCN10(__SHIFT)
282 };
283 
284 static const struct dcn10_ipp_mask ipp_mask = {
285 		IPP_MASK_SH_LIST_DCN10(_MASK),
286 };
287 
288 #define opp_regs(id)\
289 [id] = {\
290 	OPP_REG_LIST_DCN10(id),\
291 }
292 
293 static const struct dcn10_opp_registers opp_regs[] = {
294 	opp_regs(0),
295 	opp_regs(1),
296 	opp_regs(2),
297 	opp_regs(3),
298 };
299 
300 static const struct dcn10_opp_shift opp_shift = {
301 		OPP_MASK_SH_LIST_DCN10(__SHIFT)
302 };
303 
304 static const struct dcn10_opp_mask opp_mask = {
305 		OPP_MASK_SH_LIST_DCN10(_MASK),
306 };
307 
308 #define tf_regs(id)\
309 [id] = {\
310 	TF_REG_LIST_DCN10(id),\
311 }
312 
313 static const struct dcn_dpp_registers tf_regs[] = {
314 	tf_regs(0),
315 	tf_regs(1),
316 	tf_regs(2),
317 	tf_regs(3),
318 };
319 
320 static const struct dcn_dpp_shift tf_shift = {
321 	TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
322 };
323 
324 static const struct dcn_dpp_mask tf_mask = {
325 	TF_REG_LIST_SH_MASK_DCN10(_MASK),
326 };
327 
328 static const struct dcn_mpc_registers mpc_regs = {
329 		MPC_COMMON_REG_LIST_DCN1_0(0),
330 		MPC_COMMON_REG_LIST_DCN1_0(1),
331 		MPC_COMMON_REG_LIST_DCN1_0(2),
332 		MPC_COMMON_REG_LIST_DCN1_0(3)
333 };
334 
335 static const struct dcn_mpc_shift mpc_shift = {
336 	MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
337 };
338 
339 static const struct dcn_mpc_mask mpc_mask = {
340 	MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
341 };
342 
343 #define tg_regs(id)\
344 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
345 
346 static const struct dcn_tg_registers tg_regs[] = {
347 	tg_regs(0),
348 	tg_regs(1),
349 	tg_regs(2),
350 	tg_regs(3),
351 };
352 
353 static const struct dcn_tg_shift tg_shift = {
354 	TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
355 };
356 
357 static const struct dcn_tg_mask tg_mask = {
358 	TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
359 };
360 
361 
362 static const struct bios_registers bios_regs = {
363 		NBIO_SR(BIOS_SCRATCH_6)
364 };
365 
366 #define mi_regs(id)\
367 [id] = {\
368 	MI_REG_LIST_DCN10(id)\
369 }
370 
371 
372 static const struct dcn_mi_registers mi_regs[] = {
373 	mi_regs(0),
374 	mi_regs(1),
375 	mi_regs(2),
376 	mi_regs(3),
377 };
378 
379 static const struct dcn_mi_shift mi_shift = {
380 		MI_MASK_SH_LIST_DCN10(__SHIFT)
381 };
382 
383 static const struct dcn_mi_mask mi_mask = {
384 		MI_MASK_SH_LIST_DCN10(_MASK)
385 };
386 
387 #define clk_src_regs(index, pllid)\
388 [index] = {\
389 	CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
390 }
391 
392 static const struct dce110_clk_src_regs clk_src_regs[] = {
393 	clk_src_regs(0, A),
394 	clk_src_regs(1, B),
395 	clk_src_regs(2, C),
396 	clk_src_regs(3, D)
397 };
398 
399 static const struct dce110_clk_src_shift cs_shift = {
400 		CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
401 };
402 
403 static const struct dce110_clk_src_mask cs_mask = {
404 		CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
405 };
406 
407 
408 static const struct resource_caps res_cap = {
409 		.num_timing_generator = 4,
410 		.num_video_plane = 4,
411 		.num_audio = 4,
412 		.num_stream_encoder = 4,
413 		.num_pll = 4,
414 };
415 
416 static const struct dc_debug debug_defaults_drv = {
417 		.disable_dcc = false,
418 		.sanity_checks = true,
419 		.disable_dmcu = true,
420 		.force_abm_enable = false,
421 		.timing_trace = false,
422 		.clock_trace = true,
423 		/* spread sheet doesn't handle taps_c is one properly,
424 		 * need to enable scaler for video surface to pass
425 		 * bandwidth validation.*/
426 		.always_scale = true,
427 		.disable_pplib_clock_request = true,
428 		.disable_pplib_wm_range = false,
429 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
430 		.use_dml_wm = false,
431 		.disable_pipe_split = true
432 #endif
433 };
434 
435 static const struct dc_debug debug_defaults_diags = {
436 		.disable_dmcu = true,
437 		.force_abm_enable = false,
438 		.timing_trace = true,
439 		.clock_trace = true,
440 		.disable_stutter = true,
441 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
442 		.disable_pplib_clock_request = true,
443 		.disable_pplib_wm_range = true,
444 		.use_dml_wm = false,
445 		.disable_pipe_split = false
446 #endif
447 };
448 
449 static void dcn10_dpp_destroy(struct transform **xfm)
450 {
451 	kfree(TO_DCN10_DPP(*xfm));
452 	*xfm = NULL;
453 }
454 
455 static struct transform *dcn10_dpp_create(
456 	struct dc_context *ctx,
457 	uint32_t inst)
458 {
459 	struct dcn10_dpp *dpp =
460 		kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL);
461 
462 	if (!dpp)
463 		return NULL;
464 
465 	dcn10_dpp_construct(dpp, ctx, inst,
466 			    &tf_regs[inst], &tf_shift, &tf_mask);
467 	return &dpp->base;
468 }
469 
470 static struct input_pixel_processor *dcn10_ipp_create(
471 	struct dc_context *ctx, uint32_t inst)
472 {
473 	struct dcn10_ipp *ipp =
474 		kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
475 
476 	if (!ipp) {
477 		BREAK_TO_DEBUGGER();
478 		return NULL;
479 	}
480 
481 	dcn10_ipp_construct(ipp, ctx, inst,
482 			&ipp_regs[inst], &ipp_shift, &ipp_mask);
483 	return &ipp->base;
484 }
485 
486 
487 static struct output_pixel_processor *dcn10_opp_create(
488 	struct dc_context *ctx, uint32_t inst)
489 {
490 	struct dcn10_opp *opp =
491 		kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL);
492 
493 	if (!opp) {
494 		BREAK_TO_DEBUGGER();
495 		return NULL;
496 	}
497 
498 	dcn10_opp_construct(opp, ctx, inst,
499 			&opp_regs[inst], &opp_shift, &opp_mask);
500 	return &opp->base;
501 }
502 
503 static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
504 {
505 	struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
506 					  GFP_KERNEL);
507 
508 	if (!mpc10)
509 		return NULL;
510 
511 	dcn10_mpc_construct(mpc10, ctx,
512 			&mpc_regs,
513 			&mpc_shift,
514 			&mpc_mask,
515 			4);
516 
517 	return &mpc10->base;
518 }
519 
520 static struct timing_generator *dcn10_timing_generator_create(
521 		struct dc_context *ctx,
522 		uint32_t instance)
523 {
524 	struct dcn10_timing_generator *tgn10 =
525 		kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
526 
527 	if (!tgn10)
528 		return NULL;
529 
530 	tgn10->base.inst = instance;
531 	tgn10->base.ctx = ctx;
532 
533 	tgn10->tg_regs = &tg_regs[instance];
534 	tgn10->tg_shift = &tg_shift;
535 	tgn10->tg_mask = &tg_mask;
536 
537 	dcn10_timing_generator_init(tgn10);
538 
539 	return &tgn10->base;
540 }
541 
542 static const struct encoder_feature_support link_enc_feature = {
543 		.max_hdmi_deep_color = COLOR_DEPTH_121212,
544 		.max_hdmi_pixel_clock = 600000,
545 		.ycbcr420_supported = true,
546 		.flags.bits.IS_HBR2_CAPABLE = true,
547 		.flags.bits.IS_HBR3_CAPABLE = true,
548 		.flags.bits.IS_TPS3_CAPABLE = true,
549 		.flags.bits.IS_TPS4_CAPABLE = true,
550 		.flags.bits.IS_YCBCR_CAPABLE = true
551 };
552 
553 struct link_encoder *dcn10_link_encoder_create(
554 	const struct encoder_init_data *enc_init_data)
555 {
556 	struct dce110_link_encoder *enc110 =
557 		kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
558 
559 	if (!enc110)
560 		return NULL;
561 
562 	dce110_link_encoder_construct(enc110,
563 				      enc_init_data,
564 				      &link_enc_feature,
565 				      &link_enc_regs[enc_init_data->transmitter],
566 				      &link_enc_aux_regs[enc_init_data->channel - 1],
567 				      &link_enc_hpd_regs[enc_init_data->hpd_source]);
568 
569 	return &enc110->base;
570 }
571 
572 struct clock_source *dcn10_clock_source_create(
573 	struct dc_context *ctx,
574 	struct dc_bios *bios,
575 	enum clock_source_id id,
576 	const struct dce110_clk_src_regs *regs,
577 	bool dp_clk_src)
578 {
579 	struct dce110_clk_src *clk_src =
580 		kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
581 
582 	if (!clk_src)
583 		return NULL;
584 
585 	if (dce110_clk_src_construct(clk_src, ctx, bios, id,
586 			regs, &cs_shift, &cs_mask)) {
587 		clk_src->base.dp_clk_src = dp_clk_src;
588 		return &clk_src->base;
589 	}
590 
591 	BREAK_TO_DEBUGGER();
592 	return NULL;
593 }
594 
595 static void read_dce_straps(
596 	struct dc_context *ctx,
597 	struct resource_straps *straps)
598 {
599 	generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
600 		FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
601 }
602 
603 static struct audio *create_audio(
604 		struct dc_context *ctx, unsigned int inst)
605 {
606 	return dce_audio_create(ctx, inst,
607 			&audio_regs[inst], &audio_shift, &audio_mask);
608 }
609 
610 static struct stream_encoder *dcn10_stream_encoder_create(
611 	enum engine_id eng_id,
612 	struct dc_context *ctx)
613 {
614 	struct dce110_stream_encoder *enc110 =
615 		kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
616 
617 	if (!enc110)
618 		return NULL;
619 
620 	dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
621 					&stream_enc_regs[eng_id],
622 					&se_shift, &se_mask);
623 	return &enc110->base;
624 }
625 
626 static const struct dce_hwseq_registers hwseq_reg = {
627 		HWSEQ_DCN1_REG_LIST()
628 };
629 
630 static const struct dce_hwseq_shift hwseq_shift = {
631 		HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
632 };
633 
634 static const struct dce_hwseq_mask hwseq_mask = {
635 		HWSEQ_DCN1_MASK_SH_LIST(_MASK)
636 };
637 
638 static struct dce_hwseq *dcn10_hwseq_create(
639 	struct dc_context *ctx)
640 {
641 	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
642 
643 	if (hws) {
644 		hws->ctx = ctx;
645 		hws->regs = &hwseq_reg;
646 		hws->shifts = &hwseq_shift;
647 		hws->masks = &hwseq_mask;
648 	}
649 	return hws;
650 }
651 
652 static const struct resource_create_funcs res_create_funcs = {
653 	.read_dce_straps = read_dce_straps,
654 	.create_audio = create_audio,
655 	.create_stream_encoder = dcn10_stream_encoder_create,
656 	.create_hwseq = dcn10_hwseq_create,
657 };
658 
659 static const struct resource_create_funcs res_create_maximus_funcs = {
660 	.read_dce_straps = NULL,
661 	.create_audio = NULL,
662 	.create_stream_encoder = NULL,
663 	.create_hwseq = dcn10_hwseq_create,
664 };
665 
666 void dcn10_clock_source_destroy(struct clock_source **clk_src)
667 {
668 	kfree(TO_DCE110_CLK_SRC(*clk_src));
669 	*clk_src = NULL;
670 }
671 
672 static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
673 {
674 	struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
675 
676 	if (!pp_smu)
677 		return pp_smu;
678 
679 	dm_pp_get_funcs_rv(ctx, pp_smu);
680 	return pp_smu;
681 }
682 
683 static void destruct(struct dcn10_resource_pool *pool)
684 {
685 	unsigned int i;
686 
687 	for (i = 0; i < pool->base.stream_enc_count; i++) {
688 		if (pool->base.stream_enc[i] != NULL) {
689 			/* TODO: free dcn version of stream encoder once implemented
690 			 * rather than using virtual stream encoder
691 			 */
692 			kfree(pool->base.stream_enc[i]);
693 			pool->base.stream_enc[i] = NULL;
694 		}
695 	}
696 
697 	if (pool->base.mpc != NULL) {
698 		kfree(TO_DCN10_MPC(pool->base.mpc));
699 		pool->base.mpc = NULL;
700 	}
701 	for (i = 0; i < pool->base.pipe_count; i++) {
702 		if (pool->base.opps[i] != NULL)
703 			pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
704 
705 		if (pool->base.transforms[i] != NULL)
706 			dcn10_dpp_destroy(&pool->base.transforms[i]);
707 
708 		if (pool->base.ipps[i] != NULL)
709 			pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
710 
711 		if (pool->base.mis[i] != NULL) {
712 			kfree(TO_DCN10_MEM_INPUT(pool->base.mis[i]));
713 			pool->base.mis[i] = NULL;
714 		}
715 
716 		if (pool->base.irqs != NULL) {
717 			dal_irq_service_destroy(&pool->base.irqs);
718 		}
719 
720 		if (pool->base.timing_generators[i] != NULL)	{
721 			kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
722 			pool->base.timing_generators[i] = NULL;
723 		}
724 	}
725 
726 	for (i = 0; i < pool->base.stream_enc_count; i++) {
727 		if (pool->base.stream_enc[i] != NULL)
728 		kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
729 	}
730 
731 	for (i = 0; i < pool->base.audio_count; i++) {
732 		if (pool->base.audios[i])
733 			dce_aud_destroy(&pool->base.audios[i]);
734 	}
735 
736 	for (i = 0; i < pool->base.clk_src_count; i++) {
737 		if (pool->base.clock_sources[i] != NULL) {
738 			dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
739 			pool->base.clock_sources[i] = NULL;
740 		}
741 	}
742 
743 	if (pool->base.dp_clock_source != NULL) {
744 		dcn10_clock_source_destroy(&pool->base.dp_clock_source);
745 		pool->base.dp_clock_source = NULL;
746 	}
747 
748 	if (pool->base.abm != NULL)
749 		dce_abm_destroy(&pool->base.abm);
750 
751 	if (pool->base.dmcu != NULL)
752 		dce_dmcu_destroy(&pool->base.dmcu);
753 
754 	if (pool->base.display_clock != NULL)
755 		dce_disp_clk_destroy(&pool->base.display_clock);
756 
757 	kfree(pool->base.pp_smu);
758 }
759 
760 static struct mem_input *dcn10_mem_input_create(
761 	struct dc_context *ctx,
762 	uint32_t inst)
763 {
764 	struct dcn10_mem_input *mem_inputn10 =
765 		kzalloc(sizeof(struct dcn10_mem_input), GFP_KERNEL);
766 
767 	if (!mem_inputn10)
768 		return NULL;
769 
770 	dcn10_mem_input_construct(mem_inputn10, ctx, inst,
771 				  &mi_regs[inst], &mi_shift, &mi_mask);
772 	return &mem_inputn10->base;
773 }
774 
775 static void get_pixel_clock_parameters(
776 	const struct pipe_ctx *pipe_ctx,
777 	struct pixel_clk_params *pixel_clk_params)
778 {
779 	const struct dc_stream_state *stream = pipe_ctx->stream;
780 	pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
781 	pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
782 	pixel_clk_params->signal_type = pipe_ctx->stream->signal;
783 	pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
784 	/* TODO: un-hardcode*/
785 	pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
786 		LINK_RATE_REF_FREQ_IN_KHZ;
787 	pixel_clk_params->flags.ENABLE_SS = 0;
788 	pixel_clk_params->color_depth =
789 		stream->timing.display_color_depth;
790 	pixel_clk_params->flags.DISPLAY_BLANKED = 1;
791 	pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
792 
793 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
794 		pixel_clk_params->color_depth = COLOR_DEPTH_888;
795 
796 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
797 		pixel_clk_params->requested_pix_clk  /= 2;
798 
799 }
800 
801 static void build_clamping_params(struct dc_stream_state *stream)
802 {
803 	stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
804 	stream->clamping.c_depth = stream->timing.display_color_depth;
805 	stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
806 }
807 
808 static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
809 {
810 
811 	get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
812 
813 	pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
814 		pipe_ctx->clock_source,
815 		&pipe_ctx->stream_res.pix_clk_params,
816 		&pipe_ctx->pll_settings);
817 
818 	pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
819 
820 	resource_build_bit_depth_reduction_params(pipe_ctx->stream,
821 					&pipe_ctx->stream->bit_depth_params);
822 	build_clamping_params(pipe_ctx->stream);
823 }
824 
825 static enum dc_status build_mapped_resource(
826 		const struct dc *dc,
827 		struct dc_state *context,
828 		struct dc_stream_state *stream)
829 {
830 	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
831 
832 	/*TODO Seems unneeded anymore */
833 	/*	if (old_context && resource_is_stream_unchanged(old_context, stream)) {
834 			if (stream != NULL && old_context->streams[i] != NULL) {
835 				 todo: shouldn't have to copy missing parameter here
836 				resource_build_bit_depth_reduction_params(stream,
837 						&stream->bit_depth_params);
838 				stream->clamping.pixel_encoding =
839 						stream->timing.pixel_encoding;
840 
841 				resource_build_bit_depth_reduction_params(stream,
842 								&stream->bit_depth_params);
843 				build_clamping_params(stream);
844 
845 				continue;
846 			}
847 		}
848 	*/
849 
850 	if (!pipe_ctx)
851 		return DC_ERROR_UNEXPECTED;
852 
853 	build_pipe_hw_param(pipe_ctx);
854 	return DC_OK;
855 }
856 
857 enum dc_status dcn10_add_stream_to_ctx(
858 		struct dc *dc,
859 		struct dc_state *new_ctx,
860 		struct dc_stream_state *dc_stream)
861 {
862 	enum dc_status result = DC_ERROR_UNEXPECTED;
863 
864 	result = resource_map_pool_resources(dc, new_ctx, dc_stream);
865 
866 	if (result == DC_OK)
867 		result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
868 
869 
870 	if (result == DC_OK)
871 		result = build_mapped_resource(dc, new_ctx, dc_stream);
872 
873 	return result;
874 }
875 
876 enum dc_status dcn10_validate_guaranteed(
877 		struct dc *dc,
878 		struct dc_stream_state *dc_stream,
879 		struct dc_state *context)
880 {
881 	enum dc_status result = DC_ERROR_UNEXPECTED;
882 
883 	context->streams[0] = dc_stream;
884 	dc_stream_retain(context->streams[0]);
885 	context->stream_count++;
886 
887 	result = resource_map_pool_resources(dc, context, dc_stream);
888 
889 	if (result == DC_OK)
890 		result = resource_map_phy_clock_resources(dc, context, dc_stream);
891 
892 	if (result == DC_OK)
893 		result = build_mapped_resource(dc, context, dc_stream);
894 
895 	if (result == DC_OK) {
896 		validate_guaranteed_copy_streams(
897 				context, dc->caps.max_streams);
898 		result = resource_build_scaling_params_for_context(dc, context);
899 	}
900 	if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
901 		return DC_FAIL_BANDWIDTH_VALIDATE;
902 
903 	return result;
904 }
905 
906 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
907 		struct dc_state *context,
908 		const struct resource_pool *pool,
909 		struct dc_stream_state *stream)
910 {
911 	struct resource_context *res_ctx = &context->res_ctx;
912 	struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
913 	struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
914 
915 	if (!head_pipe)
916 		ASSERT(0);
917 
918 	if (!idle_pipe)
919 		return false;
920 
921 	idle_pipe->stream = head_pipe->stream;
922 	idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
923 	idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
924 
925 	idle_pipe->plane_res.mi = pool->mis[idle_pipe->pipe_idx];
926 	idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
927 	idle_pipe->plane_res.xfm = pool->transforms[idle_pipe->pipe_idx];
928 
929 	return idle_pipe;
930 }
931 
932 enum dcc_control {
933 	dcc_control__256_256_xxx,
934 	dcc_control__128_128_xxx,
935 	dcc_control__256_64_64,
936 };
937 
938 enum segment_order {
939 	segment_order__na,
940 	segment_order__contiguous,
941 	segment_order__non_contiguous,
942 };
943 
944 static bool dcc_support_pixel_format(
945 		enum surface_pixel_format format,
946 		unsigned int *bytes_per_element)
947 {
948 	/* DML: get_bytes_per_element */
949 	switch (format) {
950 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
951 	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
952 		*bytes_per_element = 2;
953 		return true;
954 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
955 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
956 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
957 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
958 		*bytes_per_element = 4;
959 		return true;
960 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
961 	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
962 	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
963 		*bytes_per_element = 8;
964 		return true;
965 	default:
966 		return false;
967 	}
968 }
969 
970 static bool dcc_support_swizzle(
971 		enum swizzle_mode_values swizzle,
972 		unsigned int bytes_per_element,
973 		enum segment_order *segment_order_horz,
974 		enum segment_order *segment_order_vert)
975 {
976 	bool standard_swizzle = false;
977 	bool display_swizzle = false;
978 
979 	switch (swizzle) {
980 	case DC_SW_4KB_S:
981 	case DC_SW_64KB_S:
982 	case DC_SW_VAR_S:
983 	case DC_SW_4KB_S_X:
984 	case DC_SW_64KB_S_X:
985 	case DC_SW_VAR_S_X:
986 		standard_swizzle = true;
987 		break;
988 	case DC_SW_4KB_D:
989 	case DC_SW_64KB_D:
990 	case DC_SW_VAR_D:
991 	case DC_SW_4KB_D_X:
992 	case DC_SW_64KB_D_X:
993 	case DC_SW_VAR_D_X:
994 		display_swizzle = true;
995 		break;
996 	default:
997 		break;
998 	}
999 
1000 	if (bytes_per_element == 1 && standard_swizzle) {
1001 		*segment_order_horz = segment_order__contiguous;
1002 		*segment_order_vert = segment_order__na;
1003 		return true;
1004 	}
1005 	if (bytes_per_element == 2 && standard_swizzle) {
1006 		*segment_order_horz = segment_order__non_contiguous;
1007 		*segment_order_vert = segment_order__contiguous;
1008 		return true;
1009 	}
1010 	if (bytes_per_element == 4 && standard_swizzle) {
1011 		*segment_order_horz = segment_order__non_contiguous;
1012 		*segment_order_vert = segment_order__contiguous;
1013 		return true;
1014 	}
1015 	if (bytes_per_element == 8 && standard_swizzle) {
1016 		*segment_order_horz = segment_order__na;
1017 		*segment_order_vert = segment_order__contiguous;
1018 		return true;
1019 	}
1020 	if (bytes_per_element == 8 && display_swizzle) {
1021 		*segment_order_horz = segment_order__contiguous;
1022 		*segment_order_vert = segment_order__non_contiguous;
1023 		return true;
1024 	}
1025 
1026 	return false;
1027 }
1028 
1029 static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
1030 		unsigned int bytes_per_element)
1031 {
1032 	/* copied from DML.  might want to refactor DML to leverage from DML */
1033 	/* DML : get_blk256_size */
1034 	if (bytes_per_element == 1) {
1035 		*blk256_width = 16;
1036 		*blk256_height = 16;
1037 	} else if (bytes_per_element == 2) {
1038 		*blk256_width = 16;
1039 		*blk256_height = 8;
1040 	} else if (bytes_per_element == 4) {
1041 		*blk256_width = 8;
1042 		*blk256_height = 8;
1043 	} else if (bytes_per_element == 8) {
1044 		*blk256_width = 8;
1045 		*blk256_height = 4;
1046 	}
1047 }
1048 
1049 static void det_request_size(
1050 		unsigned int height,
1051 		unsigned int width,
1052 		unsigned int bpe,
1053 		bool *req128_horz_wc,
1054 		bool *req128_vert_wc)
1055 {
1056 	unsigned int detile_buf_size = 164 * 1024;  /* 164KB for DCN1.0 */
1057 
1058 	unsigned int blk256_height = 0;
1059 	unsigned int blk256_width = 0;
1060 	unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
1061 
1062 	get_blk256_size(&blk256_width, &blk256_height, bpe);
1063 
1064 	swath_bytes_horz_wc = height * blk256_height * bpe;
1065 	swath_bytes_vert_wc = width * blk256_width * bpe;
1066 
1067 	*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
1068 			false : /* full 256B request */
1069 			true; /* half 128b request */
1070 
1071 	*req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
1072 			false : /* full 256B request */
1073 			true; /* half 128b request */
1074 }
1075 
1076 static bool get_dcc_compression_cap(const struct dc *dc,
1077 		const struct dc_dcc_surface_param *input,
1078 		struct dc_surface_dcc_cap *output)
1079 {
1080 	/* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
1081 	enum dcc_control dcc_control;
1082 	unsigned int bpe;
1083 	enum segment_order segment_order_horz, segment_order_vert;
1084 	bool req128_horz_wc, req128_vert_wc;
1085 
1086 	memset(output, 0, sizeof(*output));
1087 
1088 	if (dc->debug.disable_dcc)
1089 		return false;
1090 
1091 	if (!dcc_support_pixel_format(input->format,
1092 			&bpe))
1093 		return false;
1094 
1095 	if (!dcc_support_swizzle(input->swizzle_mode, bpe,
1096 			&segment_order_horz, &segment_order_vert))
1097 		return false;
1098 
1099 	det_request_size(input->surface_size.height,  input->surface_size.width,
1100 			bpe, &req128_horz_wc, &req128_vert_wc);
1101 
1102 	if (!req128_horz_wc && !req128_vert_wc) {
1103 		dcc_control = dcc_control__256_256_xxx;
1104 	} else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1105 		if (!req128_horz_wc)
1106 			dcc_control = dcc_control__256_256_xxx;
1107 		else if (segment_order_horz == segment_order__contiguous)
1108 			dcc_control = dcc_control__128_128_xxx;
1109 		else
1110 			dcc_control = dcc_control__256_64_64;
1111 	} else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1112 		if (!req128_vert_wc)
1113 			dcc_control = dcc_control__256_256_xxx;
1114 		else if (segment_order_vert == segment_order__contiguous)
1115 			dcc_control = dcc_control__128_128_xxx;
1116 		else
1117 			dcc_control = dcc_control__256_64_64;
1118 	} else {
1119 		if ((req128_horz_wc &&
1120 			segment_order_horz == segment_order__non_contiguous) ||
1121 			(req128_vert_wc &&
1122 			segment_order_vert == segment_order__non_contiguous))
1123 			/* access_dir not known, must use most constraining */
1124 			dcc_control = dcc_control__256_64_64;
1125 		else
1126 			/* reg128 is true for either horz and vert
1127 			 * but segment_order is contiguous
1128 			 */
1129 			dcc_control = dcc_control__128_128_xxx;
1130 	}
1131 
1132 	switch (dcc_control) {
1133 	case dcc_control__256_256_xxx:
1134 		output->grph.rgb.max_uncompressed_blk_size = 256;
1135 		output->grph.rgb.max_compressed_blk_size = 256;
1136 		output->grph.rgb.independent_64b_blks = false;
1137 		break;
1138 	case dcc_control__128_128_xxx:
1139 		output->grph.rgb.max_uncompressed_blk_size = 128;
1140 		output->grph.rgb.max_compressed_blk_size = 128;
1141 		output->grph.rgb.independent_64b_blks = false;
1142 		break;
1143 	case dcc_control__256_64_64:
1144 		output->grph.rgb.max_uncompressed_blk_size = 256;
1145 		output->grph.rgb.max_compressed_blk_size = 64;
1146 		output->grph.rgb.independent_64b_blks = true;
1147 		break;
1148 	}
1149 	output->capable = true;
1150 	output->const_color_support = false;
1151 
1152 	return true;
1153 }
1154 
1155 
1156 static void dcn10_destroy_resource_pool(struct resource_pool **pool)
1157 {
1158 	struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
1159 
1160 	destruct(dcn10_pool);
1161 	kfree(dcn10_pool);
1162 	*pool = NULL;
1163 }
1164 
1165 
1166 static struct dc_cap_funcs cap_funcs = {
1167 	.get_dcc_compression_cap = get_dcc_compression_cap
1168 };
1169 
1170 static struct resource_funcs dcn10_res_pool_funcs = {
1171 	.destroy = dcn10_destroy_resource_pool,
1172 	.link_enc_create = dcn10_link_encoder_create,
1173 	.validate_guaranteed = dcn10_validate_guaranteed,
1174 	.validate_bandwidth = dcn_validate_bandwidth,
1175 	.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
1176 	.add_stream_to_ctx = dcn10_add_stream_to_ctx
1177 };
1178 
1179 static uint32_t read_pipe_fuses(struct dc_context *ctx)
1180 {
1181 	uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
1182 	/* RV1 support max 4 pipes */
1183 	value = value & 0xf;
1184 	return value;
1185 }
1186 
1187 static bool construct(
1188 	uint8_t num_virtual_links,
1189 	struct dc *dc,
1190 	struct dcn10_resource_pool *pool)
1191 {
1192 	int i;
1193 	int j;
1194 	struct dc_context *ctx = dc->ctx;
1195 	uint32_t pipe_fuses = read_pipe_fuses(ctx);
1196 
1197 	ctx->dc_bios->regs = &bios_regs;
1198 
1199 	pool->base.res_cap = &res_cap;
1200 	pool->base.funcs = &dcn10_res_pool_funcs;
1201 
1202 	/*
1203 	 * TODO fill in from actual raven resource when we create
1204 	 * more than virtual encoder
1205 	 */
1206 
1207 	/*************************************************
1208 	 *  Resource + asic cap harcoding                *
1209 	 *************************************************/
1210 	pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
1211 
1212 	/* max pipe num for ASIC before check pipe fuses */
1213 	pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
1214 
1215 	dc->caps.max_downscale_ratio = 200;
1216 	dc->caps.i2c_speed_in_khz = 100;
1217 	dc->caps.max_cursor_size = 256;
1218 
1219 	dc->caps.max_slave_planes = 1;
1220 
1221 	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
1222 		dc->debug = debug_defaults_drv;
1223 	else
1224 		dc->debug = debug_defaults_diags;
1225 
1226 	/*************************************************
1227 	 *  Create resources                             *
1228 	 *************************************************/
1229 
1230 	pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
1231 			dcn10_clock_source_create(ctx, ctx->dc_bios,
1232 				CLOCK_SOURCE_COMBO_PHY_PLL0,
1233 				&clk_src_regs[0], false);
1234 	pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
1235 			dcn10_clock_source_create(ctx, ctx->dc_bios,
1236 				CLOCK_SOURCE_COMBO_PHY_PLL1,
1237 				&clk_src_regs[1], false);
1238 	pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
1239 			dcn10_clock_source_create(ctx, ctx->dc_bios,
1240 				CLOCK_SOURCE_COMBO_PHY_PLL2,
1241 				&clk_src_regs[2], false);
1242 	pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
1243 			dcn10_clock_source_create(ctx, ctx->dc_bios,
1244 				CLOCK_SOURCE_COMBO_PHY_PLL3,
1245 				&clk_src_regs[3], false);
1246 
1247 	pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
1248 
1249 	pool->base.dp_clock_source =
1250 			dcn10_clock_source_create(ctx, ctx->dc_bios,
1251 				CLOCK_SOURCE_ID_DP_DTO,
1252 				/* todo: not reuse phy_pll registers */
1253 				&clk_src_regs[0], true);
1254 
1255 	for (i = 0; i < pool->base.clk_src_count; i++) {
1256 		if (pool->base.clock_sources[i] == NULL) {
1257 			dm_error("DC: failed to create clock sources!\n");
1258 			BREAK_TO_DEBUGGER();
1259 			goto clock_source_create_fail;
1260 		}
1261 	}
1262 
1263 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1264 		pool->base.display_clock = dce120_disp_clk_create(ctx);
1265 		if (pool->base.display_clock == NULL) {
1266 			dm_error("DC: failed to create display clock!\n");
1267 			BREAK_TO_DEBUGGER();
1268 			goto disp_clk_create_fail;
1269 		}
1270 	}
1271 
1272 	pool->base.dmcu = dcn10_dmcu_create(ctx,
1273 			&dmcu_regs,
1274 			&dmcu_shift,
1275 			&dmcu_mask);
1276 	if (pool->base.dmcu == NULL) {
1277 		dm_error("DC: failed to create dmcu!\n");
1278 		BREAK_TO_DEBUGGER();
1279 		goto res_create_fail;
1280 	}
1281 
1282 	pool->base.abm = dce_abm_create(ctx,
1283 			&abm_regs,
1284 			&abm_shift,
1285 			&abm_mask);
1286 	if (pool->base.abm == NULL) {
1287 		dm_error("DC: failed to create abm!\n");
1288 		BREAK_TO_DEBUGGER();
1289 		goto res_create_fail;
1290 	}
1291 
1292 	dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
1293 	memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
1294 	memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
1295 
1296 	if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
1297 		dc->dcn_soc->urgent_latency = 3;
1298 		dc->debug.disable_dmcu = true;
1299 		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
1300 	}
1301 
1302 
1303 	dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
1304 	ASSERT(dc->dcn_soc->number_of_channels < 3);
1305 	if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
1306 		dc->dcn_soc->number_of_channels = 2;
1307 
1308 	if (dc->dcn_soc->number_of_channels == 1) {
1309 		dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
1310 		dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
1311 		dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
1312 		dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
1313 		if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
1314 			dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
1315 		}
1316 	}
1317 
1318 	pool->base.pp_smu = dcn10_pp_smu_create(ctx);
1319 
1320 	if (!dc->debug.disable_pplib_clock_request)
1321 		dcn_bw_update_from_pplib(dc);
1322 	dcn_bw_sync_calcs_and_dml(dc);
1323 	if (!dc->debug.disable_pplib_wm_range) {
1324 		dc->res_pool = &pool->base;
1325 		dcn_bw_notify_pplib_of_wm_ranges(dc);
1326 	}
1327 
1328 	{
1329 	#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1330 		struct irq_service_init_data init_data;
1331 		init_data.ctx = dc->ctx;
1332 		pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
1333 		if (!pool->base.irqs)
1334 			goto irqs_create_fail;
1335 	#endif
1336 	}
1337 
1338 	/* index to valid pipe resource  */
1339 	j = 0;
1340 	/* mem input -> ipp -> dpp -> opp -> TG */
1341 	for (i = 0; i < pool->base.pipe_count; i++) {
1342 		/* if pipe is disabled, skip instance of HW pipe,
1343 		 * i.e, skip ASIC register instance
1344 		 */
1345 		if ((pipe_fuses & (1 << i)) != 0)
1346 			continue;
1347 
1348 		pool->base.mis[j] = dcn10_mem_input_create(ctx, i);
1349 		if (pool->base.mis[j] == NULL) {
1350 			BREAK_TO_DEBUGGER();
1351 			dm_error(
1352 				"DC: failed to create memory input!\n");
1353 			goto mi_create_fail;
1354 		}
1355 
1356 		pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
1357 		if (pool->base.ipps[j] == NULL) {
1358 			BREAK_TO_DEBUGGER();
1359 			dm_error(
1360 				"DC: failed to create input pixel processor!\n");
1361 			goto ipp_create_fail;
1362 		}
1363 
1364 		pool->base.transforms[j] = dcn10_dpp_create(ctx, i);
1365 		if (pool->base.transforms[j] == NULL) {
1366 			BREAK_TO_DEBUGGER();
1367 			dm_error(
1368 				"DC: failed to create dpp!\n");
1369 			goto dpp_create_fail;
1370 		}
1371 
1372 		pool->base.opps[j] = dcn10_opp_create(ctx, i);
1373 		if (pool->base.opps[j] == NULL) {
1374 			BREAK_TO_DEBUGGER();
1375 			dm_error(
1376 				"DC: failed to create output pixel processor!\n");
1377 			goto opp_create_fail;
1378 		}
1379 
1380 		pool->base.timing_generators[j] = dcn10_timing_generator_create(
1381 				ctx, i);
1382 		if (pool->base.timing_generators[j] == NULL) {
1383 			BREAK_TO_DEBUGGER();
1384 			dm_error("DC: failed to create tg!\n");
1385 			goto otg_create_fail;
1386 		}
1387 		/* check next valid pipe */
1388 		j++;
1389 	}
1390 
1391 	/* valid pipe num */
1392 	pool->base.pipe_count = j;
1393 
1394 	/* within dml lib, it is hard code to 4. If ASIC pipe is fused,
1395 	 * the value may be changed
1396 	 */
1397 	dc->dml.ip.max_num_dpp = pool->base.pipe_count;
1398 	dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
1399 
1400 	pool->base.mpc = dcn10_mpc_create(ctx);
1401 	if (pool->base.mpc == NULL) {
1402 		BREAK_TO_DEBUGGER();
1403 		dm_error("DC: failed to create mpc!\n");
1404 		goto mpc_create_fail;
1405 	}
1406 
1407 	if (!resource_construct(num_virtual_links, dc, &pool->base,
1408 			(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
1409 			&res_create_funcs : &res_create_maximus_funcs)))
1410 			goto res_create_fail;
1411 
1412 	dcn10_hw_sequencer_construct(dc);
1413 	dc->caps.max_planes =  pool->base.pipe_count;
1414 
1415 	dc->cap_funcs = cap_funcs;
1416 
1417 	return true;
1418 
1419 disp_clk_create_fail:
1420 mpc_create_fail:
1421 otg_create_fail:
1422 opp_create_fail:
1423 dpp_create_fail:
1424 ipp_create_fail:
1425 mi_create_fail:
1426 irqs_create_fail:
1427 res_create_fail:
1428 clock_source_create_fail:
1429 
1430 	destruct(pool);
1431 
1432 	return false;
1433 }
1434 
1435 struct resource_pool *dcn10_create_resource_pool(
1436 		uint8_t num_virtual_links,
1437 		struct dc *dc)
1438 {
1439 	struct dcn10_resource_pool *pool =
1440 		kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL);
1441 
1442 	if (!pool)
1443 		return NULL;
1444 
1445 	if (construct(num_virtual_links, dc, pool))
1446 		return &pool->base;
1447 
1448 	BREAK_TO_DEBUGGER();
1449 	return NULL;
1450 }
1451