1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services.h" 27 #include "dc.h" 28 29 #include "resource.h" 30 #include "include/irq_service_interface.h" 31 #include "dcn10/dcn10_resource.h" 32 33 #include "dcn10/dcn10_ipp.h" 34 #include "dcn10/dcn10_mpc.h" 35 #include "irq/dcn10/irq_service_dcn10.h" 36 #include "dcn10/dcn10_dpp.h" 37 #include "dcn10_optc.h" 38 #include "dcn10/dcn10_hw_sequencer.h" 39 #include "dce110/dce110_hw_sequencer.h" 40 #include "dcn10/dcn10_opp.h" 41 #include "dce/dce_link_encoder.h" 42 #include "dce/dce_stream_encoder.h" 43 #include "dce/dce_clocks.h" 44 #include "dce/dce_clock_source.h" 45 #include "dce/dce_audio.h" 46 #include "dce/dce_hwseq.h" 47 #include "../virtual/virtual_stream_encoder.h" 48 #include "dce110/dce110_resource.h" 49 #include "dce112/dce112_resource.h" 50 #include "dcn10_hubp.h" 51 #include "dcn10_hubbub.h" 52 53 #include "soc15_hw_ip.h" 54 #include "vega10_ip_offset.h" 55 56 #include "dcn/dcn_1_0_offset.h" 57 #include "dcn/dcn_1_0_sh_mask.h" 58 59 #include "nbio/nbio_7_0_offset.h" 60 61 #include "mmhub/mmhub_9_1_offset.h" 62 #include "mmhub/mmhub_9_1_sh_mask.h" 63 64 #include "reg_helper.h" 65 #include "dce/dce_abm.h" 66 #include "dce/dce_dmcu.h" 67 68 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL 69 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f 70 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 71 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f 72 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 73 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f 74 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 75 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f 76 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 77 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f 78 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 79 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f 80 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 81 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f 82 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 83 #endif 84 85 86 enum dcn10_clk_src_array_id { 87 DCN10_CLK_SRC_PLL0, 88 DCN10_CLK_SRC_PLL1, 89 DCN10_CLK_SRC_PLL2, 90 DCN10_CLK_SRC_PLL3, 91 DCN10_CLK_SRC_TOTAL 92 }; 93 94 /* begin ********************* 95 * macros to expend register list macro defined in HW object header file */ 96 97 /* DCN */ 98 #define BASE_INNER(seg) \ 99 DCE_BASE__INST0_SEG ## seg 100 101 #define BASE(seg) \ 102 BASE_INNER(seg) 103 104 #define SR(reg_name)\ 105 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 106 mm ## reg_name 107 108 #define SRI(reg_name, block, id)\ 109 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 110 mm ## block ## id ## _ ## reg_name 111 112 113 #define SRII(reg_name, block, id)\ 114 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 115 mm ## block ## id ## _ ## reg_name 116 117 /* NBIO */ 118 #define NBIO_BASE_INNER(seg) \ 119 NBIF_BASE__INST0_SEG ## seg 120 121 #define NBIO_BASE(seg) \ 122 NBIO_BASE_INNER(seg) 123 124 #define NBIO_SR(reg_name)\ 125 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ 126 mm ## reg_name 127 128 /* MMHUB */ 129 #define MMHUB_BASE_INNER(seg) \ 130 MMHUB_BASE__INST0_SEG ## seg 131 132 #define MMHUB_BASE(seg) \ 133 MMHUB_BASE_INNER(seg) 134 135 #define MMHUB_SR(reg_name)\ 136 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ 137 mm ## reg_name 138 139 /* macros to expend register list macro defined in HW object header file 140 * end *********************/ 141 142 143 static const struct dce_dmcu_registers dmcu_regs = { 144 DMCU_DCN10_REG_LIST() 145 }; 146 147 static const struct dce_dmcu_shift dmcu_shift = { 148 DMCU_MASK_SH_LIST_DCN10(__SHIFT) 149 }; 150 151 static const struct dce_dmcu_mask dmcu_mask = { 152 DMCU_MASK_SH_LIST_DCN10(_MASK) 153 }; 154 155 static const struct dce_abm_registers abm_regs = { 156 ABM_DCN10_REG_LIST(0) 157 }; 158 159 static const struct dce_abm_shift abm_shift = { 160 ABM_MASK_SH_LIST_DCN10(__SHIFT) 161 }; 162 163 static const struct dce_abm_mask abm_mask = { 164 ABM_MASK_SH_LIST_DCN10(_MASK) 165 }; 166 167 #define stream_enc_regs(id)\ 168 [id] = {\ 169 SE_DCN_REG_LIST(id),\ 170 .TMDS_CNTL = 0,\ 171 .AFMT_AVI_INFO0 = 0,\ 172 .AFMT_AVI_INFO1 = 0,\ 173 .AFMT_AVI_INFO2 = 0,\ 174 .AFMT_AVI_INFO3 = 0,\ 175 } 176 177 static const struct dce110_stream_enc_registers stream_enc_regs[] = { 178 stream_enc_regs(0), 179 stream_enc_regs(1), 180 stream_enc_regs(2), 181 stream_enc_regs(3), 182 }; 183 184 static const struct dce_stream_encoder_shift se_shift = { 185 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) 186 }; 187 188 static const struct dce_stream_encoder_mask se_mask = { 189 SE_COMMON_MASK_SH_LIST_DCN10(_MASK), 190 .AFMT_GENERIC0_UPDATE = 0, 191 .AFMT_GENERIC2_UPDATE = 0, 192 .DP_DYN_RANGE = 0, 193 .DP_YCBCR_RANGE = 0, 194 .HDMI_AVI_INFO_SEND = 0, 195 .HDMI_AVI_INFO_CONT = 0, 196 .HDMI_AVI_INFO_LINE = 0, 197 .DP_SEC_AVI_ENABLE = 0, 198 .AFMT_AVI_INFO_VERSION = 0 199 }; 200 201 #define audio_regs(id)\ 202 [id] = {\ 203 AUD_COMMON_REG_LIST(id)\ 204 } 205 206 static const struct dce_audio_registers audio_regs[] = { 207 audio_regs(0), 208 audio_regs(1), 209 audio_regs(2), 210 audio_regs(3), 211 }; 212 213 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ 214 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ 215 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ 216 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) 217 218 static const struct dce_audio_shift audio_shift = { 219 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) 220 }; 221 222 static const struct dce_aduio_mask audio_mask = { 223 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) 224 }; 225 226 #define aux_regs(id)\ 227 [id] = {\ 228 AUX_REG_LIST(id)\ 229 } 230 231 static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { 232 aux_regs(0), 233 aux_regs(1), 234 aux_regs(2), 235 aux_regs(3), 236 aux_regs(4), 237 aux_regs(5) 238 }; 239 240 #define hpd_regs(id)\ 241 [id] = {\ 242 HPD_REG_LIST(id)\ 243 } 244 245 static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { 246 hpd_regs(0), 247 hpd_regs(1), 248 hpd_regs(2), 249 hpd_regs(3), 250 hpd_regs(4), 251 hpd_regs(5) 252 }; 253 254 #define link_regs(id)\ 255 [id] = {\ 256 LE_DCN10_REG_LIST(id), \ 257 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ 258 } 259 260 static const struct dce110_link_enc_registers link_enc_regs[] = { 261 link_regs(0), 262 link_regs(1), 263 link_regs(2), 264 link_regs(3), 265 link_regs(4), 266 link_regs(5), 267 link_regs(6), 268 }; 269 270 #define ipp_regs(id)\ 271 [id] = {\ 272 IPP_REG_LIST_DCN10(id),\ 273 } 274 275 static const struct dcn10_ipp_registers ipp_regs[] = { 276 ipp_regs(0), 277 ipp_regs(1), 278 ipp_regs(2), 279 ipp_regs(3), 280 }; 281 282 static const struct dcn10_ipp_shift ipp_shift = { 283 IPP_MASK_SH_LIST_DCN10(__SHIFT) 284 }; 285 286 static const struct dcn10_ipp_mask ipp_mask = { 287 IPP_MASK_SH_LIST_DCN10(_MASK), 288 }; 289 290 #define opp_regs(id)\ 291 [id] = {\ 292 OPP_REG_LIST_DCN10(id),\ 293 } 294 295 static const struct dcn10_opp_registers opp_regs[] = { 296 opp_regs(0), 297 opp_regs(1), 298 opp_regs(2), 299 opp_regs(3), 300 }; 301 302 static const struct dcn10_opp_shift opp_shift = { 303 OPP_MASK_SH_LIST_DCN10(__SHIFT) 304 }; 305 306 static const struct dcn10_opp_mask opp_mask = { 307 OPP_MASK_SH_LIST_DCN10(_MASK), 308 }; 309 310 #define tf_regs(id)\ 311 [id] = {\ 312 TF_REG_LIST_DCN10(id),\ 313 } 314 315 static const struct dcn_dpp_registers tf_regs[] = { 316 tf_regs(0), 317 tf_regs(1), 318 tf_regs(2), 319 tf_regs(3), 320 }; 321 322 static const struct dcn_dpp_shift tf_shift = { 323 TF_REG_LIST_SH_MASK_DCN10(__SHIFT) 324 }; 325 326 static const struct dcn_dpp_mask tf_mask = { 327 TF_REG_LIST_SH_MASK_DCN10(_MASK), 328 }; 329 330 static const struct dcn_mpc_registers mpc_regs = { 331 MPC_COMMON_REG_LIST_DCN1_0(0), 332 MPC_COMMON_REG_LIST_DCN1_0(1), 333 MPC_COMMON_REG_LIST_DCN1_0(2), 334 MPC_COMMON_REG_LIST_DCN1_0(3), 335 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), 336 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), 337 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), 338 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) 339 }; 340 341 static const struct dcn_mpc_shift mpc_shift = { 342 MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 343 }; 344 345 static const struct dcn_mpc_mask mpc_mask = { 346 MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), 347 }; 348 349 #define tg_regs(id)\ 350 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)} 351 352 static const struct dcn_optc_registers tg_regs[] = { 353 tg_regs(0), 354 tg_regs(1), 355 tg_regs(2), 356 tg_regs(3), 357 }; 358 359 static const struct dcn_optc_shift tg_shift = { 360 TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 361 }; 362 363 static const struct dcn_optc_mask tg_mask = { 364 TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) 365 }; 366 367 368 static const struct bios_registers bios_regs = { 369 NBIO_SR(BIOS_SCRATCH_6) 370 }; 371 372 #define hubp_regs(id)\ 373 [id] = {\ 374 HUBP_REG_LIST_DCN10(id)\ 375 } 376 377 378 static const struct dcn_mi_registers hubp_regs[] = { 379 hubp_regs(0), 380 hubp_regs(1), 381 hubp_regs(2), 382 hubp_regs(3), 383 }; 384 385 static const struct dcn_mi_shift hubp_shift = { 386 HUBP_MASK_SH_LIST_DCN10(__SHIFT) 387 }; 388 389 static const struct dcn_mi_mask hubp_mask = { 390 HUBP_MASK_SH_LIST_DCN10(_MASK) 391 }; 392 393 394 static const struct dcn_hubbub_registers hubbub_reg = { 395 HUBBUB_REG_LIST_DCN10(0) 396 }; 397 398 static const struct dcn_hubbub_shift hubbub_shift = { 399 HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) 400 }; 401 402 static const struct dcn_hubbub_mask hubbub_mask = { 403 HUBBUB_MASK_SH_LIST_DCN10(_MASK) 404 }; 405 406 #define clk_src_regs(index, pllid)\ 407 [index] = {\ 408 CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ 409 } 410 411 static const struct dce110_clk_src_regs clk_src_regs[] = { 412 clk_src_regs(0, A), 413 clk_src_regs(1, B), 414 clk_src_regs(2, C), 415 clk_src_regs(3, D) 416 }; 417 418 static const struct dce110_clk_src_shift cs_shift = { 419 CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 420 }; 421 422 static const struct dce110_clk_src_mask cs_mask = { 423 CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) 424 }; 425 426 427 static const struct resource_caps res_cap = { 428 .num_timing_generator = 4, 429 .num_video_plane = 4, 430 .num_audio = 4, 431 .num_stream_encoder = 4, 432 .num_pll = 4, 433 }; 434 435 static const struct dc_debug debug_defaults_drv = { 436 .sanity_checks = true, 437 .disable_dmcu = true, 438 .force_abm_enable = false, 439 .timing_trace = false, 440 .clock_trace = true, 441 442 .min_disp_clk_khz = 300000, 443 444 .disable_pplib_clock_request = true, 445 .disable_pplib_wm_range = false, 446 .pplib_wm_report_mode = WM_REPORT_DEFAULT, 447 .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, 448 .force_single_disp_pipe_split = true, 449 .disable_dcc = DCC_ENABLE, 450 .voltage_align_fclk = true, 451 .disable_stereo_support = true, 452 .vsr_support = true, 453 .performance_trace = false, 454 }; 455 456 static const struct dc_debug debug_defaults_diags = { 457 .disable_dmcu = true, 458 .force_abm_enable = false, 459 .timing_trace = true, 460 .clock_trace = true, 461 .disable_stutter = true, 462 .disable_pplib_clock_request = true, 463 .disable_pplib_wm_range = true 464 }; 465 466 static void dcn10_dpp_destroy(struct dpp **dpp) 467 { 468 kfree(TO_DCN10_DPP(*dpp)); 469 *dpp = NULL; 470 } 471 472 static struct dpp *dcn10_dpp_create( 473 struct dc_context *ctx, 474 uint32_t inst) 475 { 476 struct dcn10_dpp *dpp = 477 kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); 478 479 if (!dpp) 480 return NULL; 481 482 dpp1_construct(dpp, ctx, inst, 483 &tf_regs[inst], &tf_shift, &tf_mask); 484 return &dpp->base; 485 } 486 487 static struct input_pixel_processor *dcn10_ipp_create( 488 struct dc_context *ctx, uint32_t inst) 489 { 490 struct dcn10_ipp *ipp = 491 kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); 492 493 if (!ipp) { 494 BREAK_TO_DEBUGGER(); 495 return NULL; 496 } 497 498 dcn10_ipp_construct(ipp, ctx, inst, 499 &ipp_regs[inst], &ipp_shift, &ipp_mask); 500 return &ipp->base; 501 } 502 503 504 static struct output_pixel_processor *dcn10_opp_create( 505 struct dc_context *ctx, uint32_t inst) 506 { 507 struct dcn10_opp *opp = 508 kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); 509 510 if (!opp) { 511 BREAK_TO_DEBUGGER(); 512 return NULL; 513 } 514 515 dcn10_opp_construct(opp, ctx, inst, 516 &opp_regs[inst], &opp_shift, &opp_mask); 517 return &opp->base; 518 } 519 520 static struct mpc *dcn10_mpc_create(struct dc_context *ctx) 521 { 522 struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), 523 GFP_KERNEL); 524 525 if (!mpc10) 526 return NULL; 527 528 dcn10_mpc_construct(mpc10, ctx, 529 &mpc_regs, 530 &mpc_shift, 531 &mpc_mask, 532 4); 533 534 return &mpc10->base; 535 } 536 537 static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) 538 { 539 struct hubbub *hubbub = kzalloc(sizeof(struct hubbub), 540 GFP_KERNEL); 541 542 if (!hubbub) 543 return NULL; 544 545 hubbub1_construct(hubbub, ctx, 546 &hubbub_reg, 547 &hubbub_shift, 548 &hubbub_mask); 549 550 return hubbub; 551 } 552 553 static struct timing_generator *dcn10_timing_generator_create( 554 struct dc_context *ctx, 555 uint32_t instance) 556 { 557 struct optc *tgn10 = 558 kzalloc(sizeof(struct optc), GFP_KERNEL); 559 560 if (!tgn10) 561 return NULL; 562 563 tgn10->base.inst = instance; 564 tgn10->base.ctx = ctx; 565 566 tgn10->tg_regs = &tg_regs[instance]; 567 tgn10->tg_shift = &tg_shift; 568 tgn10->tg_mask = &tg_mask; 569 570 dcn10_timing_generator_init(tgn10); 571 572 return &tgn10->base; 573 } 574 575 static const struct encoder_feature_support link_enc_feature = { 576 .max_hdmi_deep_color = COLOR_DEPTH_121212, 577 .max_hdmi_pixel_clock = 600000, 578 .ycbcr420_supported = true, 579 .flags.bits.IS_HBR2_CAPABLE = true, 580 .flags.bits.IS_HBR3_CAPABLE = true, 581 .flags.bits.IS_TPS3_CAPABLE = true, 582 .flags.bits.IS_TPS4_CAPABLE = true, 583 .flags.bits.IS_YCBCR_CAPABLE = true 584 }; 585 586 struct link_encoder *dcn10_link_encoder_create( 587 const struct encoder_init_data *enc_init_data) 588 { 589 struct dce110_link_encoder *enc110 = 590 kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); 591 592 if (!enc110) 593 return NULL; 594 595 dce110_link_encoder_construct(enc110, 596 enc_init_data, 597 &link_enc_feature, 598 &link_enc_regs[enc_init_data->transmitter], 599 &link_enc_aux_regs[enc_init_data->channel - 1], 600 &link_enc_hpd_regs[enc_init_data->hpd_source]); 601 602 return &enc110->base; 603 } 604 605 struct clock_source *dcn10_clock_source_create( 606 struct dc_context *ctx, 607 struct dc_bios *bios, 608 enum clock_source_id id, 609 const struct dce110_clk_src_regs *regs, 610 bool dp_clk_src) 611 { 612 struct dce110_clk_src *clk_src = 613 kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); 614 615 if (!clk_src) 616 return NULL; 617 618 if (dce110_clk_src_construct(clk_src, ctx, bios, id, 619 regs, &cs_shift, &cs_mask)) { 620 clk_src->base.dp_clk_src = dp_clk_src; 621 return &clk_src->base; 622 } 623 624 BREAK_TO_DEBUGGER(); 625 return NULL; 626 } 627 628 static void read_dce_straps( 629 struct dc_context *ctx, 630 struct resource_straps *straps) 631 { 632 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), 633 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); 634 } 635 636 static struct audio *create_audio( 637 struct dc_context *ctx, unsigned int inst) 638 { 639 return dce_audio_create(ctx, inst, 640 &audio_regs[inst], &audio_shift, &audio_mask); 641 } 642 643 static struct stream_encoder *dcn10_stream_encoder_create( 644 enum engine_id eng_id, 645 struct dc_context *ctx) 646 { 647 struct dce110_stream_encoder *enc110 = 648 kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); 649 650 if (!enc110) 651 return NULL; 652 653 dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 654 &stream_enc_regs[eng_id], 655 &se_shift, &se_mask); 656 return &enc110->base; 657 } 658 659 static const struct dce_hwseq_registers hwseq_reg = { 660 HWSEQ_DCN1_REG_LIST() 661 }; 662 663 static const struct dce_hwseq_shift hwseq_shift = { 664 HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) 665 }; 666 667 static const struct dce_hwseq_mask hwseq_mask = { 668 HWSEQ_DCN1_MASK_SH_LIST(_MASK) 669 }; 670 671 static struct dce_hwseq *dcn10_hwseq_create( 672 struct dc_context *ctx) 673 { 674 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); 675 676 if (hws) { 677 hws->ctx = ctx; 678 hws->regs = &hwseq_reg; 679 hws->shifts = &hwseq_shift; 680 hws->masks = &hwseq_mask; 681 hws->wa.DEGVIDCN10_253 = true; 682 hws->wa.false_optc_underflow = true; 683 } 684 return hws; 685 } 686 687 static const struct resource_create_funcs res_create_funcs = { 688 .read_dce_straps = read_dce_straps, 689 .create_audio = create_audio, 690 .create_stream_encoder = dcn10_stream_encoder_create, 691 .create_hwseq = dcn10_hwseq_create, 692 }; 693 694 static const struct resource_create_funcs res_create_maximus_funcs = { 695 .read_dce_straps = NULL, 696 .create_audio = NULL, 697 .create_stream_encoder = NULL, 698 .create_hwseq = dcn10_hwseq_create, 699 }; 700 701 void dcn10_clock_source_destroy(struct clock_source **clk_src) 702 { 703 kfree(TO_DCE110_CLK_SRC(*clk_src)); 704 *clk_src = NULL; 705 } 706 707 static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx) 708 { 709 struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); 710 711 if (!pp_smu) 712 return pp_smu; 713 714 dm_pp_get_funcs_rv(ctx, pp_smu); 715 return pp_smu; 716 } 717 718 static void destruct(struct dcn10_resource_pool *pool) 719 { 720 unsigned int i; 721 722 for (i = 0; i < pool->base.stream_enc_count; i++) { 723 if (pool->base.stream_enc[i] != NULL) { 724 /* TODO: free dcn version of stream encoder once implemented 725 * rather than using virtual stream encoder 726 */ 727 kfree(pool->base.stream_enc[i]); 728 pool->base.stream_enc[i] = NULL; 729 } 730 } 731 732 if (pool->base.mpc != NULL) { 733 kfree(TO_DCN10_MPC(pool->base.mpc)); 734 pool->base.mpc = NULL; 735 } 736 737 if (pool->base.hubbub != NULL) { 738 kfree(pool->base.hubbub); 739 pool->base.hubbub = NULL; 740 } 741 742 for (i = 0; i < pool->base.pipe_count; i++) { 743 if (pool->base.opps[i] != NULL) 744 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); 745 746 if (pool->base.dpps[i] != NULL) 747 dcn10_dpp_destroy(&pool->base.dpps[i]); 748 749 if (pool->base.ipps[i] != NULL) 750 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); 751 752 if (pool->base.hubps[i] != NULL) { 753 kfree(TO_DCN10_HUBP(pool->base.hubps[i])); 754 pool->base.hubps[i] = NULL; 755 } 756 757 if (pool->base.irqs != NULL) { 758 dal_irq_service_destroy(&pool->base.irqs); 759 } 760 761 if (pool->base.timing_generators[i] != NULL) { 762 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); 763 pool->base.timing_generators[i] = NULL; 764 } 765 } 766 767 for (i = 0; i < pool->base.stream_enc_count; i++) 768 kfree(pool->base.stream_enc[i]); 769 770 for (i = 0; i < pool->base.audio_count; i++) { 771 if (pool->base.audios[i]) 772 dce_aud_destroy(&pool->base.audios[i]); 773 } 774 775 for (i = 0; i < pool->base.clk_src_count; i++) { 776 if (pool->base.clock_sources[i] != NULL) { 777 dcn10_clock_source_destroy(&pool->base.clock_sources[i]); 778 pool->base.clock_sources[i] = NULL; 779 } 780 } 781 782 if (pool->base.dp_clock_source != NULL) { 783 dcn10_clock_source_destroy(&pool->base.dp_clock_source); 784 pool->base.dp_clock_source = NULL; 785 } 786 787 if (pool->base.abm != NULL) 788 dce_abm_destroy(&pool->base.abm); 789 790 if (pool->base.dmcu != NULL) 791 dce_dmcu_destroy(&pool->base.dmcu); 792 793 if (pool->base.display_clock != NULL) 794 dce_disp_clk_destroy(&pool->base.display_clock); 795 796 kfree(pool->base.pp_smu); 797 } 798 799 static struct hubp *dcn10_hubp_create( 800 struct dc_context *ctx, 801 uint32_t inst) 802 { 803 struct dcn10_hubp *hubp1 = 804 kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); 805 806 if (!hubp1) 807 return NULL; 808 809 dcn10_hubp_construct(hubp1, ctx, inst, 810 &hubp_regs[inst], &hubp_shift, &hubp_mask); 811 return &hubp1->base; 812 } 813 814 static void get_pixel_clock_parameters( 815 const struct pipe_ctx *pipe_ctx, 816 struct pixel_clk_params *pixel_clk_params) 817 { 818 const struct dc_stream_state *stream = pipe_ctx->stream; 819 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; 820 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; 821 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 822 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; 823 /* TODO: un-hardcode*/ 824 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * 825 LINK_RATE_REF_FREQ_IN_KHZ; 826 pixel_clk_params->flags.ENABLE_SS = 0; 827 pixel_clk_params->color_depth = 828 stream->timing.display_color_depth; 829 pixel_clk_params->flags.DISPLAY_BLANKED = 1; 830 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; 831 832 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) 833 pixel_clk_params->color_depth = COLOR_DEPTH_888; 834 835 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 836 pixel_clk_params->requested_pix_clk /= 2; 837 838 } 839 840 static void build_clamping_params(struct dc_stream_state *stream) 841 { 842 stream->clamping.clamping_level = CLAMPING_FULL_RANGE; 843 stream->clamping.c_depth = stream->timing.display_color_depth; 844 stream->clamping.pixel_encoding = stream->timing.pixel_encoding; 845 } 846 847 static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) 848 { 849 850 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); 851 852 pipe_ctx->clock_source->funcs->get_pix_clk_dividers( 853 pipe_ctx->clock_source, 854 &pipe_ctx->stream_res.pix_clk_params, 855 &pipe_ctx->pll_settings); 856 857 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; 858 859 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 860 &pipe_ctx->stream->bit_depth_params); 861 build_clamping_params(pipe_ctx->stream); 862 } 863 864 static enum dc_status build_mapped_resource( 865 const struct dc *dc, 866 struct dc_state *context, 867 struct dc_stream_state *stream) 868 { 869 struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); 870 871 /*TODO Seems unneeded anymore */ 872 /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { 873 if (stream != NULL && old_context->streams[i] != NULL) { 874 todo: shouldn't have to copy missing parameter here 875 resource_build_bit_depth_reduction_params(stream, 876 &stream->bit_depth_params); 877 stream->clamping.pixel_encoding = 878 stream->timing.pixel_encoding; 879 880 resource_build_bit_depth_reduction_params(stream, 881 &stream->bit_depth_params); 882 build_clamping_params(stream); 883 884 continue; 885 } 886 } 887 */ 888 889 if (!pipe_ctx) 890 return DC_ERROR_UNEXPECTED; 891 892 build_pipe_hw_param(pipe_ctx); 893 return DC_OK; 894 } 895 896 enum dc_status dcn10_add_stream_to_ctx( 897 struct dc *dc, 898 struct dc_state *new_ctx, 899 struct dc_stream_state *dc_stream) 900 { 901 enum dc_status result = DC_ERROR_UNEXPECTED; 902 903 result = resource_map_pool_resources(dc, new_ctx, dc_stream); 904 905 if (result == DC_OK) 906 result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); 907 908 909 if (result == DC_OK) 910 result = build_mapped_resource(dc, new_ctx, dc_stream); 911 912 return result; 913 } 914 915 enum dc_status dcn10_validate_guaranteed( 916 struct dc *dc, 917 struct dc_stream_state *dc_stream, 918 struct dc_state *context) 919 { 920 enum dc_status result = DC_ERROR_UNEXPECTED; 921 922 context->streams[0] = dc_stream; 923 dc_stream_retain(context->streams[0]); 924 context->stream_count++; 925 926 result = resource_map_pool_resources(dc, context, dc_stream); 927 928 if (result == DC_OK) 929 result = resource_map_phy_clock_resources(dc, context, dc_stream); 930 931 if (result == DC_OK) 932 result = build_mapped_resource(dc, context, dc_stream); 933 934 if (result == DC_OK) { 935 validate_guaranteed_copy_streams( 936 context, dc->caps.max_streams); 937 result = resource_build_scaling_params_for_context(dc, context); 938 } 939 if (result == DC_OK && !dcn_validate_bandwidth(dc, context)) 940 return DC_FAIL_BANDWIDTH_VALIDATE; 941 942 return result; 943 } 944 945 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( 946 struct dc_state *context, 947 const struct resource_pool *pool, 948 struct dc_stream_state *stream) 949 { 950 struct resource_context *res_ctx = &context->res_ctx; 951 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 952 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool); 953 954 if (!head_pipe) { 955 ASSERT(0); 956 return NULL; 957 } 958 959 if (!idle_pipe) 960 return NULL; 961 962 idle_pipe->stream = head_pipe->stream; 963 idle_pipe->stream_res.tg = head_pipe->stream_res.tg; 964 idle_pipe->stream_res.opp = head_pipe->stream_res.opp; 965 966 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; 967 idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; 968 idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; 969 idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; 970 971 return idle_pipe; 972 } 973 974 enum dcc_control { 975 dcc_control__256_256_xxx, 976 dcc_control__128_128_xxx, 977 dcc_control__256_64_64, 978 }; 979 980 enum segment_order { 981 segment_order__na, 982 segment_order__contiguous, 983 segment_order__non_contiguous, 984 }; 985 986 static bool dcc_support_pixel_format( 987 enum surface_pixel_format format, 988 unsigned int *bytes_per_element) 989 { 990 /* DML: get_bytes_per_element */ 991 switch (format) { 992 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: 993 case SURFACE_PIXEL_FORMAT_GRPH_RGB565: 994 *bytes_per_element = 2; 995 return true; 996 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: 997 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: 998 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: 999 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: 1000 *bytes_per_element = 4; 1001 return true; 1002 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: 1003 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: 1004 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: 1005 *bytes_per_element = 8; 1006 return true; 1007 default: 1008 return false; 1009 } 1010 } 1011 1012 static bool dcc_support_swizzle( 1013 enum swizzle_mode_values swizzle, 1014 unsigned int bytes_per_element, 1015 enum segment_order *segment_order_horz, 1016 enum segment_order *segment_order_vert) 1017 { 1018 bool standard_swizzle = false; 1019 bool display_swizzle = false; 1020 1021 switch (swizzle) { 1022 case DC_SW_4KB_S: 1023 case DC_SW_64KB_S: 1024 case DC_SW_VAR_S: 1025 case DC_SW_4KB_S_X: 1026 case DC_SW_64KB_S_X: 1027 case DC_SW_VAR_S_X: 1028 standard_swizzle = true; 1029 break; 1030 case DC_SW_4KB_D: 1031 case DC_SW_64KB_D: 1032 case DC_SW_VAR_D: 1033 case DC_SW_4KB_D_X: 1034 case DC_SW_64KB_D_X: 1035 case DC_SW_VAR_D_X: 1036 display_swizzle = true; 1037 break; 1038 default: 1039 break; 1040 } 1041 1042 if (bytes_per_element == 1 && standard_swizzle) { 1043 *segment_order_horz = segment_order__contiguous; 1044 *segment_order_vert = segment_order__na; 1045 return true; 1046 } 1047 if (bytes_per_element == 2 && standard_swizzle) { 1048 *segment_order_horz = segment_order__non_contiguous; 1049 *segment_order_vert = segment_order__contiguous; 1050 return true; 1051 } 1052 if (bytes_per_element == 4 && standard_swizzle) { 1053 *segment_order_horz = segment_order__non_contiguous; 1054 *segment_order_vert = segment_order__contiguous; 1055 return true; 1056 } 1057 if (bytes_per_element == 8 && standard_swizzle) { 1058 *segment_order_horz = segment_order__na; 1059 *segment_order_vert = segment_order__contiguous; 1060 return true; 1061 } 1062 if (bytes_per_element == 8 && display_swizzle) { 1063 *segment_order_horz = segment_order__contiguous; 1064 *segment_order_vert = segment_order__non_contiguous; 1065 return true; 1066 } 1067 1068 return false; 1069 } 1070 1071 static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, 1072 unsigned int bytes_per_element) 1073 { 1074 /* copied from DML. might want to refactor DML to leverage from DML */ 1075 /* DML : get_blk256_size */ 1076 if (bytes_per_element == 1) { 1077 *blk256_width = 16; 1078 *blk256_height = 16; 1079 } else if (bytes_per_element == 2) { 1080 *blk256_width = 16; 1081 *blk256_height = 8; 1082 } else if (bytes_per_element == 4) { 1083 *blk256_width = 8; 1084 *blk256_height = 8; 1085 } else if (bytes_per_element == 8) { 1086 *blk256_width = 8; 1087 *blk256_height = 4; 1088 } 1089 } 1090 1091 static void det_request_size( 1092 unsigned int height, 1093 unsigned int width, 1094 unsigned int bpe, 1095 bool *req128_horz_wc, 1096 bool *req128_vert_wc) 1097 { 1098 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ 1099 1100 unsigned int blk256_height = 0; 1101 unsigned int blk256_width = 0; 1102 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; 1103 1104 get_blk256_size(&blk256_width, &blk256_height, bpe); 1105 1106 swath_bytes_horz_wc = height * blk256_height * bpe; 1107 swath_bytes_vert_wc = width * blk256_width * bpe; 1108 1109 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? 1110 false : /* full 256B request */ 1111 true; /* half 128b request */ 1112 1113 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? 1114 false : /* full 256B request */ 1115 true; /* half 128b request */ 1116 } 1117 1118 static bool get_dcc_compression_cap(const struct dc *dc, 1119 const struct dc_dcc_surface_param *input, 1120 struct dc_surface_dcc_cap *output) 1121 { 1122 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ 1123 enum dcc_control dcc_control; 1124 unsigned int bpe; 1125 enum segment_order segment_order_horz, segment_order_vert; 1126 bool req128_horz_wc, req128_vert_wc; 1127 1128 memset(output, 0, sizeof(*output)); 1129 1130 if (dc->debug.disable_dcc == DCC_DISABLE) 1131 return false; 1132 1133 if (!dcc_support_pixel_format(input->format, 1134 &bpe)) 1135 return false; 1136 1137 if (!dcc_support_swizzle(input->swizzle_mode, bpe, 1138 &segment_order_horz, &segment_order_vert)) 1139 return false; 1140 1141 det_request_size(input->surface_size.height, input->surface_size.width, 1142 bpe, &req128_horz_wc, &req128_vert_wc); 1143 1144 if (!req128_horz_wc && !req128_vert_wc) { 1145 dcc_control = dcc_control__256_256_xxx; 1146 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { 1147 if (!req128_horz_wc) 1148 dcc_control = dcc_control__256_256_xxx; 1149 else if (segment_order_horz == segment_order__contiguous) 1150 dcc_control = dcc_control__128_128_xxx; 1151 else 1152 dcc_control = dcc_control__256_64_64; 1153 } else if (input->scan == SCAN_DIRECTION_VERTICAL) { 1154 if (!req128_vert_wc) 1155 dcc_control = dcc_control__256_256_xxx; 1156 else if (segment_order_vert == segment_order__contiguous) 1157 dcc_control = dcc_control__128_128_xxx; 1158 else 1159 dcc_control = dcc_control__256_64_64; 1160 } else { 1161 if ((req128_horz_wc && 1162 segment_order_horz == segment_order__non_contiguous) || 1163 (req128_vert_wc && 1164 segment_order_vert == segment_order__non_contiguous)) 1165 /* access_dir not known, must use most constraining */ 1166 dcc_control = dcc_control__256_64_64; 1167 else 1168 /* reg128 is true for either horz and vert 1169 * but segment_order is contiguous 1170 */ 1171 dcc_control = dcc_control__128_128_xxx; 1172 } 1173 1174 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && 1175 dcc_control != dcc_control__256_256_xxx) 1176 return false; 1177 1178 switch (dcc_control) { 1179 case dcc_control__256_256_xxx: 1180 output->grph.rgb.max_uncompressed_blk_size = 256; 1181 output->grph.rgb.max_compressed_blk_size = 256; 1182 output->grph.rgb.independent_64b_blks = false; 1183 break; 1184 case dcc_control__128_128_xxx: 1185 output->grph.rgb.max_uncompressed_blk_size = 128; 1186 output->grph.rgb.max_compressed_blk_size = 128; 1187 output->grph.rgb.independent_64b_blks = false; 1188 break; 1189 case dcc_control__256_64_64: 1190 output->grph.rgb.max_uncompressed_blk_size = 256; 1191 output->grph.rgb.max_compressed_blk_size = 64; 1192 output->grph.rgb.independent_64b_blks = true; 1193 break; 1194 } 1195 1196 output->capable = true; 1197 output->const_color_support = false; 1198 1199 return true; 1200 } 1201 1202 1203 static void dcn10_destroy_resource_pool(struct resource_pool **pool) 1204 { 1205 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); 1206 1207 destruct(dcn10_pool); 1208 kfree(dcn10_pool); 1209 *pool = NULL; 1210 } 1211 1212 static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) 1213 { 1214 if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN 1215 && caps->max_video_width != 0 1216 && plane_state->src_rect.width > caps->max_video_width) 1217 return DC_FAIL_SURFACE_VALIDATE; 1218 1219 return DC_OK; 1220 } 1221 1222 static struct dc_cap_funcs cap_funcs = { 1223 .get_dcc_compression_cap = get_dcc_compression_cap 1224 }; 1225 1226 static struct resource_funcs dcn10_res_pool_funcs = { 1227 .destroy = dcn10_destroy_resource_pool, 1228 .link_enc_create = dcn10_link_encoder_create, 1229 .validate_guaranteed = dcn10_validate_guaranteed, 1230 .validate_bandwidth = dcn_validate_bandwidth, 1231 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, 1232 .validate_plane = dcn10_validate_plane, 1233 .add_stream_to_ctx = dcn10_add_stream_to_ctx 1234 }; 1235 1236 static uint32_t read_pipe_fuses(struct dc_context *ctx) 1237 { 1238 uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); 1239 /* RV1 support max 4 pipes */ 1240 value = value & 0xf; 1241 return value; 1242 } 1243 1244 static bool construct( 1245 uint8_t num_virtual_links, 1246 struct dc *dc, 1247 struct dcn10_resource_pool *pool) 1248 { 1249 int i; 1250 int j; 1251 struct dc_context *ctx = dc->ctx; 1252 uint32_t pipe_fuses = read_pipe_fuses(ctx); 1253 1254 ctx->dc_bios->regs = &bios_regs; 1255 1256 pool->base.res_cap = &res_cap; 1257 pool->base.funcs = &dcn10_res_pool_funcs; 1258 1259 /* 1260 * TODO fill in from actual raven resource when we create 1261 * more than virtual encoder 1262 */ 1263 1264 /************************************************* 1265 * Resource + asic cap harcoding * 1266 *************************************************/ 1267 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1268 1269 /* max pipe num for ASIC before check pipe fuses */ 1270 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1271 1272 dc->caps.max_video_width = 3840; 1273 dc->caps.max_downscale_ratio = 200; 1274 dc->caps.i2c_speed_in_khz = 100; 1275 dc->caps.max_cursor_size = 256; 1276 dc->caps.max_slave_planes = 1; 1277 dc->caps.is_apu = true; 1278 1279 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) 1280 dc->debug = debug_defaults_drv; 1281 else 1282 dc->debug = debug_defaults_diags; 1283 1284 /************************************************* 1285 * Create resources * 1286 *************************************************/ 1287 1288 pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = 1289 dcn10_clock_source_create(ctx, ctx->dc_bios, 1290 CLOCK_SOURCE_COMBO_PHY_PLL0, 1291 &clk_src_regs[0], false); 1292 pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = 1293 dcn10_clock_source_create(ctx, ctx->dc_bios, 1294 CLOCK_SOURCE_COMBO_PHY_PLL1, 1295 &clk_src_regs[1], false); 1296 pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = 1297 dcn10_clock_source_create(ctx, ctx->dc_bios, 1298 CLOCK_SOURCE_COMBO_PHY_PLL2, 1299 &clk_src_regs[2], false); 1300 pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = 1301 dcn10_clock_source_create(ctx, ctx->dc_bios, 1302 CLOCK_SOURCE_COMBO_PHY_PLL3, 1303 &clk_src_regs[3], false); 1304 1305 pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; 1306 1307 pool->base.dp_clock_source = 1308 dcn10_clock_source_create(ctx, ctx->dc_bios, 1309 CLOCK_SOURCE_ID_DP_DTO, 1310 /* todo: not reuse phy_pll registers */ 1311 &clk_src_regs[0], true); 1312 1313 for (i = 0; i < pool->base.clk_src_count; i++) { 1314 if (pool->base.clock_sources[i] == NULL) { 1315 dm_error("DC: failed to create clock sources!\n"); 1316 BREAK_TO_DEBUGGER(); 1317 goto fail; 1318 } 1319 } 1320 1321 pool->base.display_clock = dce120_disp_clk_create(ctx); 1322 if (pool->base.display_clock == NULL) { 1323 dm_error("DC: failed to create display clock!\n"); 1324 BREAK_TO_DEBUGGER(); 1325 goto fail; 1326 } 1327 1328 pool->base.dmcu = dcn10_dmcu_create(ctx, 1329 &dmcu_regs, 1330 &dmcu_shift, 1331 &dmcu_mask); 1332 if (pool->base.dmcu == NULL) { 1333 dm_error("DC: failed to create dmcu!\n"); 1334 BREAK_TO_DEBUGGER(); 1335 goto fail; 1336 } 1337 1338 pool->base.abm = dce_abm_create(ctx, 1339 &abm_regs, 1340 &abm_shift, 1341 &abm_mask); 1342 if (pool->base.abm == NULL) { 1343 dm_error("DC: failed to create abm!\n"); 1344 BREAK_TO_DEBUGGER(); 1345 goto fail; 1346 } 1347 1348 dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1); 1349 memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); 1350 memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); 1351 1352 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { 1353 dc->dcn_soc->urgent_latency = 3; 1354 dc->debug.disable_dmcu = true; 1355 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; 1356 } 1357 1358 1359 dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; 1360 ASSERT(dc->dcn_soc->number_of_channels < 3); 1361 if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ 1362 dc->dcn_soc->number_of_channels = 2; 1363 1364 if (dc->dcn_soc->number_of_channels == 1) { 1365 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; 1366 dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; 1367 dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; 1368 dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; 1369 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { 1370 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; 1371 } 1372 } 1373 1374 pool->base.pp_smu = dcn10_pp_smu_create(ctx); 1375 1376 if (!dc->debug.disable_pplib_clock_request) 1377 dcn_bw_update_from_pplib(dc); 1378 dcn_bw_sync_calcs_and_dml(dc); 1379 if (!dc->debug.disable_pplib_wm_range) { 1380 dc->res_pool = &pool->base; 1381 dcn_bw_notify_pplib_of_wm_ranges(dc); 1382 } 1383 1384 { 1385 struct irq_service_init_data init_data; 1386 init_data.ctx = dc->ctx; 1387 pool->base.irqs = dal_irq_service_dcn10_create(&init_data); 1388 if (!pool->base.irqs) 1389 goto fail; 1390 } 1391 1392 /* index to valid pipe resource */ 1393 j = 0; 1394 /* mem input -> ipp -> dpp -> opp -> TG */ 1395 for (i = 0; i < pool->base.pipe_count; i++) { 1396 /* if pipe is disabled, skip instance of HW pipe, 1397 * i.e, skip ASIC register instance 1398 */ 1399 if ((pipe_fuses & (1 << i)) != 0) 1400 continue; 1401 1402 pool->base.hubps[j] = dcn10_hubp_create(ctx, i); 1403 if (pool->base.hubps[j] == NULL) { 1404 BREAK_TO_DEBUGGER(); 1405 dm_error( 1406 "DC: failed to create memory input!\n"); 1407 goto fail; 1408 } 1409 1410 pool->base.ipps[j] = dcn10_ipp_create(ctx, i); 1411 if (pool->base.ipps[j] == NULL) { 1412 BREAK_TO_DEBUGGER(); 1413 dm_error( 1414 "DC: failed to create input pixel processor!\n"); 1415 goto fail; 1416 } 1417 1418 pool->base.dpps[j] = dcn10_dpp_create(ctx, i); 1419 if (pool->base.dpps[j] == NULL) { 1420 BREAK_TO_DEBUGGER(); 1421 dm_error( 1422 "DC: failed to create dpp!\n"); 1423 goto fail; 1424 } 1425 1426 pool->base.opps[j] = dcn10_opp_create(ctx, i); 1427 if (pool->base.opps[j] == NULL) { 1428 BREAK_TO_DEBUGGER(); 1429 dm_error( 1430 "DC: failed to create output pixel processor!\n"); 1431 goto fail; 1432 } 1433 1434 pool->base.timing_generators[j] = dcn10_timing_generator_create( 1435 ctx, i); 1436 if (pool->base.timing_generators[j] == NULL) { 1437 BREAK_TO_DEBUGGER(); 1438 dm_error("DC: failed to create tg!\n"); 1439 goto fail; 1440 } 1441 1442 /* check next valid pipe */ 1443 j++; 1444 } 1445 1446 /* valid pipe num */ 1447 pool->base.pipe_count = j; 1448 1449 /* within dml lib, it is hard code to 4. If ASIC pipe is fused, 1450 * the value may be changed 1451 */ 1452 dc->dml.ip.max_num_dpp = pool->base.pipe_count; 1453 dc->dcn_ip->max_num_dpp = pool->base.pipe_count; 1454 1455 pool->base.mpc = dcn10_mpc_create(ctx); 1456 if (pool->base.mpc == NULL) { 1457 BREAK_TO_DEBUGGER(); 1458 dm_error("DC: failed to create mpc!\n"); 1459 goto fail; 1460 } 1461 1462 pool->base.hubbub = dcn10_hubbub_create(ctx); 1463 if (pool->base.hubbub == NULL) { 1464 BREAK_TO_DEBUGGER(); 1465 dm_error("DC: failed to create hubbub!\n"); 1466 goto fail; 1467 } 1468 1469 if (!resource_construct(num_virtual_links, dc, &pool->base, 1470 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? 1471 &res_create_funcs : &res_create_maximus_funcs))) 1472 goto fail; 1473 1474 dcn10_hw_sequencer_construct(dc); 1475 dc->caps.max_planes = pool->base.pipe_count; 1476 1477 dc->cap_funcs = cap_funcs; 1478 1479 return true; 1480 1481 fail: 1482 1483 destruct(pool); 1484 1485 return false; 1486 } 1487 1488 struct resource_pool *dcn10_create_resource_pool( 1489 uint8_t num_virtual_links, 1490 struct dc *dc) 1491 { 1492 struct dcn10_resource_pool *pool = 1493 kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); 1494 1495 if (!pool) 1496 return NULL; 1497 1498 if (construct(num_virtual_links, dc, pool)) 1499 return &pool->base; 1500 1501 BREAK_TO_DEBUGGER(); 1502 return NULL; 1503 } 1504