1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services.h" 27 #include "dc.h" 28 29 #include "resource.h" 30 #include "include/irq_service_interface.h" 31 #include "dcn10/dcn10_resource.h" 32 33 #include "dcn10/dcn10_ipp.h" 34 #include "dcn10/dcn10_mpc.h" 35 #include "irq/dcn10/irq_service_dcn10.h" 36 #include "dcn10/dcn10_dpp.h" 37 #include "dcn10_optc.h" 38 #include "dcn10/dcn10_hw_sequencer.h" 39 #include "dce110/dce110_hw_sequencer.h" 40 #include "dcn10/dcn10_opp.h" 41 #include "dce/dce_link_encoder.h" 42 #include "dce/dce_stream_encoder.h" 43 #include "dce/dce_clocks.h" 44 #include "dce/dce_clock_source.h" 45 #include "dce/dce_audio.h" 46 #include "dce/dce_hwseq.h" 47 #include "../virtual/virtual_stream_encoder.h" 48 #include "dce110/dce110_resource.h" 49 #include "dce112/dce112_resource.h" 50 #include "dcn10_hubp.h" 51 #include "dcn10_hubbub.h" 52 53 #include "soc15_hw_ip.h" 54 #include "vega10_ip_offset.h" 55 56 #include "dcn/dcn_1_0_offset.h" 57 #include "dcn/dcn_1_0_sh_mask.h" 58 59 #include "nbio/nbio_7_0_offset.h" 60 61 #include "mmhub/mmhub_9_1_offset.h" 62 #include "mmhub/mmhub_9_1_sh_mask.h" 63 64 #include "reg_helper.h" 65 #include "dce/dce_abm.h" 66 #include "dce/dce_dmcu.h" 67 68 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL 69 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f 70 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 71 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f 72 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 73 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f 74 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 75 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f 76 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 77 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f 78 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 79 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f 80 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 81 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f 82 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 83 #endif 84 85 86 enum dcn10_clk_src_array_id { 87 DCN10_CLK_SRC_PLL0, 88 DCN10_CLK_SRC_PLL1, 89 DCN10_CLK_SRC_PLL2, 90 DCN10_CLK_SRC_PLL3, 91 DCN10_CLK_SRC_TOTAL 92 }; 93 94 /* begin ********************* 95 * macros to expend register list macro defined in HW object header file */ 96 97 /* DCN */ 98 #define BASE_INNER(seg) \ 99 DCE_BASE__INST0_SEG ## seg 100 101 #define BASE(seg) \ 102 BASE_INNER(seg) 103 104 #define SR(reg_name)\ 105 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 106 mm ## reg_name 107 108 #define SRI(reg_name, block, id)\ 109 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 110 mm ## block ## id ## _ ## reg_name 111 112 113 #define SRII(reg_name, block, id)\ 114 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 115 mm ## block ## id ## _ ## reg_name 116 117 /* NBIO */ 118 #define NBIO_BASE_INNER(seg) \ 119 NBIF_BASE__INST0_SEG ## seg 120 121 #define NBIO_BASE(seg) \ 122 NBIO_BASE_INNER(seg) 123 124 #define NBIO_SR(reg_name)\ 125 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ 126 mm ## reg_name 127 128 /* MMHUB */ 129 #define MMHUB_BASE_INNER(seg) \ 130 MMHUB_BASE__INST0_SEG ## seg 131 132 #define MMHUB_BASE(seg) \ 133 MMHUB_BASE_INNER(seg) 134 135 #define MMHUB_SR(reg_name)\ 136 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ 137 mm ## reg_name 138 139 /* macros to expend register list macro defined in HW object header file 140 * end *********************/ 141 142 143 static const struct dce_dmcu_registers dmcu_regs = { 144 DMCU_DCN10_REG_LIST() 145 }; 146 147 static const struct dce_dmcu_shift dmcu_shift = { 148 DMCU_MASK_SH_LIST_DCN10(__SHIFT) 149 }; 150 151 static const struct dce_dmcu_mask dmcu_mask = { 152 DMCU_MASK_SH_LIST_DCN10(_MASK) 153 }; 154 155 static const struct dce_abm_registers abm_regs = { 156 ABM_DCN10_REG_LIST(0) 157 }; 158 159 static const struct dce_abm_shift abm_shift = { 160 ABM_MASK_SH_LIST_DCN10(__SHIFT) 161 }; 162 163 static const struct dce_abm_mask abm_mask = { 164 ABM_MASK_SH_LIST_DCN10(_MASK) 165 }; 166 167 #define stream_enc_regs(id)\ 168 [id] = {\ 169 SE_DCN_REG_LIST(id),\ 170 .TMDS_CNTL = 0,\ 171 .AFMT_AVI_INFO0 = 0,\ 172 .AFMT_AVI_INFO1 = 0,\ 173 .AFMT_AVI_INFO2 = 0,\ 174 .AFMT_AVI_INFO3 = 0,\ 175 } 176 177 static const struct dce110_stream_enc_registers stream_enc_regs[] = { 178 stream_enc_regs(0), 179 stream_enc_regs(1), 180 stream_enc_regs(2), 181 stream_enc_regs(3), 182 }; 183 184 static const struct dce_stream_encoder_shift se_shift = { 185 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) 186 }; 187 188 static const struct dce_stream_encoder_mask se_mask = { 189 SE_COMMON_MASK_SH_LIST_DCN10(_MASK), 190 .AFMT_GENERIC0_UPDATE = 0, 191 .AFMT_GENERIC2_UPDATE = 0, 192 .DP_DYN_RANGE = 0, 193 .DP_YCBCR_RANGE = 0, 194 .HDMI_AVI_INFO_SEND = 0, 195 .HDMI_AVI_INFO_CONT = 0, 196 .HDMI_AVI_INFO_LINE = 0, 197 .DP_SEC_AVI_ENABLE = 0, 198 .AFMT_AVI_INFO_VERSION = 0 199 }; 200 201 #define audio_regs(id)\ 202 [id] = {\ 203 AUD_COMMON_REG_LIST(id)\ 204 } 205 206 static const struct dce_audio_registers audio_regs[] = { 207 audio_regs(0), 208 audio_regs(1), 209 audio_regs(2), 210 audio_regs(3), 211 }; 212 213 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ 214 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ 215 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ 216 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) 217 218 static const struct dce_audio_shift audio_shift = { 219 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) 220 }; 221 222 static const struct dce_aduio_mask audio_mask = { 223 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) 224 }; 225 226 #define aux_regs(id)\ 227 [id] = {\ 228 AUX_REG_LIST(id)\ 229 } 230 231 static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { 232 aux_regs(0), 233 aux_regs(1), 234 aux_regs(2), 235 aux_regs(3), 236 aux_regs(4), 237 aux_regs(5) 238 }; 239 240 #define hpd_regs(id)\ 241 [id] = {\ 242 HPD_REG_LIST(id)\ 243 } 244 245 static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { 246 hpd_regs(0), 247 hpd_regs(1), 248 hpd_regs(2), 249 hpd_regs(3), 250 hpd_regs(4), 251 hpd_regs(5) 252 }; 253 254 #define link_regs(id)\ 255 [id] = {\ 256 LE_DCN10_REG_LIST(id), \ 257 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ 258 } 259 260 static const struct dce110_link_enc_registers link_enc_regs[] = { 261 link_regs(0), 262 link_regs(1), 263 link_regs(2), 264 link_regs(3), 265 link_regs(4), 266 link_regs(5), 267 link_regs(6), 268 }; 269 270 #define ipp_regs(id)\ 271 [id] = {\ 272 IPP_REG_LIST_DCN10(id),\ 273 } 274 275 static const struct dcn10_ipp_registers ipp_regs[] = { 276 ipp_regs(0), 277 ipp_regs(1), 278 ipp_regs(2), 279 ipp_regs(3), 280 }; 281 282 static const struct dcn10_ipp_shift ipp_shift = { 283 IPP_MASK_SH_LIST_DCN10(__SHIFT) 284 }; 285 286 static const struct dcn10_ipp_mask ipp_mask = { 287 IPP_MASK_SH_LIST_DCN10(_MASK), 288 }; 289 290 #define opp_regs(id)\ 291 [id] = {\ 292 OPP_REG_LIST_DCN10(id),\ 293 } 294 295 static const struct dcn10_opp_registers opp_regs[] = { 296 opp_regs(0), 297 opp_regs(1), 298 opp_regs(2), 299 opp_regs(3), 300 }; 301 302 static const struct dcn10_opp_shift opp_shift = { 303 OPP_MASK_SH_LIST_DCN10(__SHIFT) 304 }; 305 306 static const struct dcn10_opp_mask opp_mask = { 307 OPP_MASK_SH_LIST_DCN10(_MASK), 308 }; 309 310 #define tf_regs(id)\ 311 [id] = {\ 312 TF_REG_LIST_DCN10(id),\ 313 } 314 315 static const struct dcn_dpp_registers tf_regs[] = { 316 tf_regs(0), 317 tf_regs(1), 318 tf_regs(2), 319 tf_regs(3), 320 }; 321 322 static const struct dcn_dpp_shift tf_shift = { 323 TF_REG_LIST_SH_MASK_DCN10(__SHIFT) 324 }; 325 326 static const struct dcn_dpp_mask tf_mask = { 327 TF_REG_LIST_SH_MASK_DCN10(_MASK), 328 }; 329 330 static const struct dcn_mpc_registers mpc_regs = { 331 MPC_COMMON_REG_LIST_DCN1_0(0), 332 MPC_COMMON_REG_LIST_DCN1_0(1), 333 MPC_COMMON_REG_LIST_DCN1_0(2), 334 MPC_COMMON_REG_LIST_DCN1_0(3), 335 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0), 336 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1), 337 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2), 338 MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3) 339 }; 340 341 static const struct dcn_mpc_shift mpc_shift = { 342 MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 343 }; 344 345 static const struct dcn_mpc_mask mpc_mask = { 346 MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), 347 }; 348 349 #define tg_regs(id)\ 350 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)} 351 352 static const struct dcn_optc_registers tg_regs[] = { 353 tg_regs(0), 354 tg_regs(1), 355 tg_regs(2), 356 tg_regs(3), 357 }; 358 359 static const struct dcn_optc_shift tg_shift = { 360 TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 361 }; 362 363 static const struct dcn_optc_mask tg_mask = { 364 TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK) 365 }; 366 367 368 static const struct bios_registers bios_regs = { 369 NBIO_SR(BIOS_SCRATCH_3), 370 NBIO_SR(BIOS_SCRATCH_6) 371 }; 372 373 #define hubp_regs(id)\ 374 [id] = {\ 375 HUBP_REG_LIST_DCN10(id)\ 376 } 377 378 379 static const struct dcn_mi_registers hubp_regs[] = { 380 hubp_regs(0), 381 hubp_regs(1), 382 hubp_regs(2), 383 hubp_regs(3), 384 }; 385 386 static const struct dcn_mi_shift hubp_shift = { 387 HUBP_MASK_SH_LIST_DCN10(__SHIFT) 388 }; 389 390 static const struct dcn_mi_mask hubp_mask = { 391 HUBP_MASK_SH_LIST_DCN10(_MASK) 392 }; 393 394 395 static const struct dcn_hubbub_registers hubbub_reg = { 396 HUBBUB_REG_LIST_DCN10(0) 397 }; 398 399 static const struct dcn_hubbub_shift hubbub_shift = { 400 HUBBUB_MASK_SH_LIST_DCN10(__SHIFT) 401 }; 402 403 static const struct dcn_hubbub_mask hubbub_mask = { 404 HUBBUB_MASK_SH_LIST_DCN10(_MASK) 405 }; 406 407 #define clk_src_regs(index, pllid)\ 408 [index] = {\ 409 CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ 410 } 411 412 static const struct dce110_clk_src_regs clk_src_regs[] = { 413 clk_src_regs(0, A), 414 clk_src_regs(1, B), 415 clk_src_regs(2, C), 416 clk_src_regs(3, D) 417 }; 418 419 static const struct dce110_clk_src_shift cs_shift = { 420 CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 421 }; 422 423 static const struct dce110_clk_src_mask cs_mask = { 424 CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK) 425 }; 426 427 428 static const struct resource_caps res_cap = { 429 .num_timing_generator = 4, 430 .num_video_plane = 4, 431 .num_audio = 4, 432 .num_stream_encoder = 4, 433 .num_pll = 4, 434 }; 435 436 static const struct dc_debug debug_defaults_drv = { 437 .sanity_checks = true, 438 .disable_dmcu = true, 439 .force_abm_enable = false, 440 .timing_trace = false, 441 .clock_trace = true, 442 443 /* raven smu dones't allow 0 disp clk, 444 * smu min disp clk limit is 50Mhz 445 * keep min disp clk 100Mhz avoid smu hang 446 */ 447 .min_disp_clk_khz = 100000, 448 449 .disable_pplib_clock_request = true, 450 .disable_pplib_wm_range = false, 451 .pplib_wm_report_mode = WM_REPORT_DEFAULT, 452 .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, 453 .force_single_disp_pipe_split = true, 454 .disable_dcc = DCC_ENABLE, 455 .voltage_align_fclk = true, 456 .disable_stereo_support = true, 457 .vsr_support = true, 458 .performance_trace = false, 459 .az_endpoint_mute_only = true, 460 }; 461 462 static const struct dc_debug debug_defaults_diags = { 463 .disable_dmcu = true, 464 .force_abm_enable = false, 465 .timing_trace = true, 466 .clock_trace = true, 467 .disable_stutter = true, 468 .disable_pplib_clock_request = true, 469 .disable_pplib_wm_range = true 470 }; 471 472 static void dcn10_dpp_destroy(struct dpp **dpp) 473 { 474 kfree(TO_DCN10_DPP(*dpp)); 475 *dpp = NULL; 476 } 477 478 static struct dpp *dcn10_dpp_create( 479 struct dc_context *ctx, 480 uint32_t inst) 481 { 482 struct dcn10_dpp *dpp = 483 kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL); 484 485 if (!dpp) 486 return NULL; 487 488 dpp1_construct(dpp, ctx, inst, 489 &tf_regs[inst], &tf_shift, &tf_mask); 490 return &dpp->base; 491 } 492 493 static struct input_pixel_processor *dcn10_ipp_create( 494 struct dc_context *ctx, uint32_t inst) 495 { 496 struct dcn10_ipp *ipp = 497 kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); 498 499 if (!ipp) { 500 BREAK_TO_DEBUGGER(); 501 return NULL; 502 } 503 504 dcn10_ipp_construct(ipp, ctx, inst, 505 &ipp_regs[inst], &ipp_shift, &ipp_mask); 506 return &ipp->base; 507 } 508 509 510 static struct output_pixel_processor *dcn10_opp_create( 511 struct dc_context *ctx, uint32_t inst) 512 { 513 struct dcn10_opp *opp = 514 kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL); 515 516 if (!opp) { 517 BREAK_TO_DEBUGGER(); 518 return NULL; 519 } 520 521 dcn10_opp_construct(opp, ctx, inst, 522 &opp_regs[inst], &opp_shift, &opp_mask); 523 return &opp->base; 524 } 525 526 static struct mpc *dcn10_mpc_create(struct dc_context *ctx) 527 { 528 struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc), 529 GFP_KERNEL); 530 531 if (!mpc10) 532 return NULL; 533 534 dcn10_mpc_construct(mpc10, ctx, 535 &mpc_regs, 536 &mpc_shift, 537 &mpc_mask, 538 4); 539 540 return &mpc10->base; 541 } 542 543 static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) 544 { 545 struct hubbub *hubbub = kzalloc(sizeof(struct hubbub), 546 GFP_KERNEL); 547 548 if (!hubbub) 549 return NULL; 550 551 hubbub1_construct(hubbub, ctx, 552 &hubbub_reg, 553 &hubbub_shift, 554 &hubbub_mask); 555 556 return hubbub; 557 } 558 559 static struct timing_generator *dcn10_timing_generator_create( 560 struct dc_context *ctx, 561 uint32_t instance) 562 { 563 struct optc *tgn10 = 564 kzalloc(sizeof(struct optc), GFP_KERNEL); 565 566 if (!tgn10) 567 return NULL; 568 569 tgn10->base.inst = instance; 570 tgn10->base.ctx = ctx; 571 572 tgn10->tg_regs = &tg_regs[instance]; 573 tgn10->tg_shift = &tg_shift; 574 tgn10->tg_mask = &tg_mask; 575 576 dcn10_timing_generator_init(tgn10); 577 578 return &tgn10->base; 579 } 580 581 static const struct encoder_feature_support link_enc_feature = { 582 .max_hdmi_deep_color = COLOR_DEPTH_121212, 583 .max_hdmi_pixel_clock = 600000, 584 .ycbcr420_supported = true, 585 .flags.bits.IS_HBR2_CAPABLE = true, 586 .flags.bits.IS_HBR3_CAPABLE = true, 587 .flags.bits.IS_TPS3_CAPABLE = true, 588 .flags.bits.IS_TPS4_CAPABLE = true, 589 .flags.bits.IS_YCBCR_CAPABLE = true 590 }; 591 592 struct link_encoder *dcn10_link_encoder_create( 593 const struct encoder_init_data *enc_init_data) 594 { 595 struct dce110_link_encoder *enc110 = 596 kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); 597 598 if (!enc110) 599 return NULL; 600 601 dce110_link_encoder_construct(enc110, 602 enc_init_data, 603 &link_enc_feature, 604 &link_enc_regs[enc_init_data->transmitter], 605 &link_enc_aux_regs[enc_init_data->channel - 1], 606 &link_enc_hpd_regs[enc_init_data->hpd_source]); 607 608 return &enc110->base; 609 } 610 611 struct clock_source *dcn10_clock_source_create( 612 struct dc_context *ctx, 613 struct dc_bios *bios, 614 enum clock_source_id id, 615 const struct dce110_clk_src_regs *regs, 616 bool dp_clk_src) 617 { 618 struct dce110_clk_src *clk_src = 619 kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); 620 621 if (!clk_src) 622 return NULL; 623 624 if (dce110_clk_src_construct(clk_src, ctx, bios, id, 625 regs, &cs_shift, &cs_mask)) { 626 clk_src->base.dp_clk_src = dp_clk_src; 627 return &clk_src->base; 628 } 629 630 BREAK_TO_DEBUGGER(); 631 return NULL; 632 } 633 634 static void read_dce_straps( 635 struct dc_context *ctx, 636 struct resource_straps *straps) 637 { 638 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), 639 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); 640 } 641 642 static struct audio *create_audio( 643 struct dc_context *ctx, unsigned int inst) 644 { 645 return dce_audio_create(ctx, inst, 646 &audio_regs[inst], &audio_shift, &audio_mask); 647 } 648 649 static struct stream_encoder *dcn10_stream_encoder_create( 650 enum engine_id eng_id, 651 struct dc_context *ctx) 652 { 653 struct dce110_stream_encoder *enc110 = 654 kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); 655 656 if (!enc110) 657 return NULL; 658 659 dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, 660 &stream_enc_regs[eng_id], 661 &se_shift, &se_mask); 662 return &enc110->base; 663 } 664 665 static const struct dce_hwseq_registers hwseq_reg = { 666 HWSEQ_DCN1_REG_LIST() 667 }; 668 669 static const struct dce_hwseq_shift hwseq_shift = { 670 HWSEQ_DCN1_MASK_SH_LIST(__SHIFT) 671 }; 672 673 static const struct dce_hwseq_mask hwseq_mask = { 674 HWSEQ_DCN1_MASK_SH_LIST(_MASK) 675 }; 676 677 static struct dce_hwseq *dcn10_hwseq_create( 678 struct dc_context *ctx) 679 { 680 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); 681 682 if (hws) { 683 hws->ctx = ctx; 684 hws->regs = &hwseq_reg; 685 hws->shifts = &hwseq_shift; 686 hws->masks = &hwseq_mask; 687 hws->wa.DEGVIDCN10_253 = true; 688 hws->wa.false_optc_underflow = true; 689 } 690 return hws; 691 } 692 693 static const struct resource_create_funcs res_create_funcs = { 694 .read_dce_straps = read_dce_straps, 695 .create_audio = create_audio, 696 .create_stream_encoder = dcn10_stream_encoder_create, 697 .create_hwseq = dcn10_hwseq_create, 698 }; 699 700 static const struct resource_create_funcs res_create_maximus_funcs = { 701 .read_dce_straps = NULL, 702 .create_audio = NULL, 703 .create_stream_encoder = NULL, 704 .create_hwseq = dcn10_hwseq_create, 705 }; 706 707 void dcn10_clock_source_destroy(struct clock_source **clk_src) 708 { 709 kfree(TO_DCE110_CLK_SRC(*clk_src)); 710 *clk_src = NULL; 711 } 712 713 static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx) 714 { 715 struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); 716 717 if (!pp_smu) 718 return pp_smu; 719 720 dm_pp_get_funcs_rv(ctx, pp_smu); 721 return pp_smu; 722 } 723 724 static void destruct(struct dcn10_resource_pool *pool) 725 { 726 unsigned int i; 727 728 for (i = 0; i < pool->base.stream_enc_count; i++) { 729 if (pool->base.stream_enc[i] != NULL) { 730 /* TODO: free dcn version of stream encoder once implemented 731 * rather than using virtual stream encoder 732 */ 733 kfree(pool->base.stream_enc[i]); 734 pool->base.stream_enc[i] = NULL; 735 } 736 } 737 738 if (pool->base.mpc != NULL) { 739 kfree(TO_DCN10_MPC(pool->base.mpc)); 740 pool->base.mpc = NULL; 741 } 742 743 if (pool->base.hubbub != NULL) { 744 kfree(pool->base.hubbub); 745 pool->base.hubbub = NULL; 746 } 747 748 for (i = 0; i < pool->base.pipe_count; i++) { 749 if (pool->base.opps[i] != NULL) 750 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); 751 752 if (pool->base.dpps[i] != NULL) 753 dcn10_dpp_destroy(&pool->base.dpps[i]); 754 755 if (pool->base.ipps[i] != NULL) 756 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); 757 758 if (pool->base.hubps[i] != NULL) { 759 kfree(TO_DCN10_HUBP(pool->base.hubps[i])); 760 pool->base.hubps[i] = NULL; 761 } 762 763 if (pool->base.irqs != NULL) { 764 dal_irq_service_destroy(&pool->base.irqs); 765 } 766 767 if (pool->base.timing_generators[i] != NULL) { 768 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); 769 pool->base.timing_generators[i] = NULL; 770 } 771 } 772 773 for (i = 0; i < pool->base.stream_enc_count; i++) 774 kfree(pool->base.stream_enc[i]); 775 776 for (i = 0; i < pool->base.audio_count; i++) { 777 if (pool->base.audios[i]) 778 dce_aud_destroy(&pool->base.audios[i]); 779 } 780 781 for (i = 0; i < pool->base.clk_src_count; i++) { 782 if (pool->base.clock_sources[i] != NULL) { 783 dcn10_clock_source_destroy(&pool->base.clock_sources[i]); 784 pool->base.clock_sources[i] = NULL; 785 } 786 } 787 788 if (pool->base.dp_clock_source != NULL) { 789 dcn10_clock_source_destroy(&pool->base.dp_clock_source); 790 pool->base.dp_clock_source = NULL; 791 } 792 793 if (pool->base.abm != NULL) 794 dce_abm_destroy(&pool->base.abm); 795 796 if (pool->base.dmcu != NULL) 797 dce_dmcu_destroy(&pool->base.dmcu); 798 799 if (pool->base.display_clock != NULL) 800 dce_disp_clk_destroy(&pool->base.display_clock); 801 802 kfree(pool->base.pp_smu); 803 } 804 805 static struct hubp *dcn10_hubp_create( 806 struct dc_context *ctx, 807 uint32_t inst) 808 { 809 struct dcn10_hubp *hubp1 = 810 kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL); 811 812 if (!hubp1) 813 return NULL; 814 815 dcn10_hubp_construct(hubp1, ctx, inst, 816 &hubp_regs[inst], &hubp_shift, &hubp_mask); 817 return &hubp1->base; 818 } 819 820 static void get_pixel_clock_parameters( 821 const struct pipe_ctx *pipe_ctx, 822 struct pixel_clk_params *pixel_clk_params) 823 { 824 const struct dc_stream_state *stream = pipe_ctx->stream; 825 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; 826 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; 827 pixel_clk_params->signal_type = pipe_ctx->stream->signal; 828 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; 829 /* TODO: un-hardcode*/ 830 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * 831 LINK_RATE_REF_FREQ_IN_KHZ; 832 pixel_clk_params->flags.ENABLE_SS = 0; 833 pixel_clk_params->color_depth = 834 stream->timing.display_color_depth; 835 pixel_clk_params->flags.DISPLAY_BLANKED = 1; 836 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; 837 838 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) 839 pixel_clk_params->color_depth = COLOR_DEPTH_888; 840 841 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 842 pixel_clk_params->requested_pix_clk /= 2; 843 844 } 845 846 static void build_clamping_params(struct dc_stream_state *stream) 847 { 848 stream->clamping.clamping_level = CLAMPING_FULL_RANGE; 849 stream->clamping.c_depth = stream->timing.display_color_depth; 850 stream->clamping.pixel_encoding = stream->timing.pixel_encoding; 851 } 852 853 static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx) 854 { 855 856 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); 857 858 pipe_ctx->clock_source->funcs->get_pix_clk_dividers( 859 pipe_ctx->clock_source, 860 &pipe_ctx->stream_res.pix_clk_params, 861 &pipe_ctx->pll_settings); 862 863 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; 864 865 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 866 &pipe_ctx->stream->bit_depth_params); 867 build_clamping_params(pipe_ctx->stream); 868 } 869 870 static enum dc_status build_mapped_resource( 871 const struct dc *dc, 872 struct dc_state *context, 873 struct dc_stream_state *stream) 874 { 875 struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); 876 877 /*TODO Seems unneeded anymore */ 878 /* if (old_context && resource_is_stream_unchanged(old_context, stream)) { 879 if (stream != NULL && old_context->streams[i] != NULL) { 880 todo: shouldn't have to copy missing parameter here 881 resource_build_bit_depth_reduction_params(stream, 882 &stream->bit_depth_params); 883 stream->clamping.pixel_encoding = 884 stream->timing.pixel_encoding; 885 886 resource_build_bit_depth_reduction_params(stream, 887 &stream->bit_depth_params); 888 build_clamping_params(stream); 889 890 continue; 891 } 892 } 893 */ 894 895 if (!pipe_ctx) 896 return DC_ERROR_UNEXPECTED; 897 898 build_pipe_hw_param(pipe_ctx); 899 return DC_OK; 900 } 901 902 enum dc_status dcn10_add_stream_to_ctx( 903 struct dc *dc, 904 struct dc_state *new_ctx, 905 struct dc_stream_state *dc_stream) 906 { 907 enum dc_status result = DC_ERROR_UNEXPECTED; 908 909 result = resource_map_pool_resources(dc, new_ctx, dc_stream); 910 911 if (result == DC_OK) 912 result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream); 913 914 915 if (result == DC_OK) 916 result = build_mapped_resource(dc, new_ctx, dc_stream); 917 918 return result; 919 } 920 921 enum dc_status dcn10_validate_guaranteed( 922 struct dc *dc, 923 struct dc_stream_state *dc_stream, 924 struct dc_state *context) 925 { 926 enum dc_status result = DC_ERROR_UNEXPECTED; 927 928 context->streams[0] = dc_stream; 929 dc_stream_retain(context->streams[0]); 930 context->stream_count++; 931 932 result = resource_map_pool_resources(dc, context, dc_stream); 933 934 if (result == DC_OK) 935 result = resource_map_phy_clock_resources(dc, context, dc_stream); 936 937 if (result == DC_OK) 938 result = build_mapped_resource(dc, context, dc_stream); 939 940 if (result == DC_OK) { 941 validate_guaranteed_copy_streams( 942 context, dc->caps.max_streams); 943 result = resource_build_scaling_params_for_context(dc, context); 944 } 945 if (result == DC_OK && !dcn_validate_bandwidth(dc, context)) 946 return DC_FAIL_BANDWIDTH_VALIDATE; 947 948 return result; 949 } 950 951 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( 952 struct dc_state *context, 953 const struct resource_pool *pool, 954 struct dc_stream_state *stream) 955 { 956 struct resource_context *res_ctx = &context->res_ctx; 957 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 958 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool); 959 960 if (!head_pipe) { 961 ASSERT(0); 962 return NULL; 963 } 964 965 if (!idle_pipe) 966 return NULL; 967 968 idle_pipe->stream = head_pipe->stream; 969 idle_pipe->stream_res.tg = head_pipe->stream_res.tg; 970 idle_pipe->stream_res.abm = head_pipe->stream_res.abm; 971 idle_pipe->stream_res.opp = head_pipe->stream_res.opp; 972 973 idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; 974 idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; 975 idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; 976 idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; 977 978 return idle_pipe; 979 } 980 981 enum dcc_control { 982 dcc_control__256_256_xxx, 983 dcc_control__128_128_xxx, 984 dcc_control__256_64_64, 985 }; 986 987 enum segment_order { 988 segment_order__na, 989 segment_order__contiguous, 990 segment_order__non_contiguous, 991 }; 992 993 static bool dcc_support_pixel_format( 994 enum surface_pixel_format format, 995 unsigned int *bytes_per_element) 996 { 997 /* DML: get_bytes_per_element */ 998 switch (format) { 999 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: 1000 case SURFACE_PIXEL_FORMAT_GRPH_RGB565: 1001 *bytes_per_element = 2; 1002 return true; 1003 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: 1004 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: 1005 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: 1006 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: 1007 *bytes_per_element = 4; 1008 return true; 1009 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: 1010 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: 1011 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: 1012 *bytes_per_element = 8; 1013 return true; 1014 default: 1015 return false; 1016 } 1017 } 1018 1019 static bool dcc_support_swizzle( 1020 enum swizzle_mode_values swizzle, 1021 unsigned int bytes_per_element, 1022 enum segment_order *segment_order_horz, 1023 enum segment_order *segment_order_vert) 1024 { 1025 bool standard_swizzle = false; 1026 bool display_swizzle = false; 1027 1028 switch (swizzle) { 1029 case DC_SW_4KB_S: 1030 case DC_SW_64KB_S: 1031 case DC_SW_VAR_S: 1032 case DC_SW_4KB_S_X: 1033 case DC_SW_64KB_S_X: 1034 case DC_SW_VAR_S_X: 1035 standard_swizzle = true; 1036 break; 1037 case DC_SW_4KB_D: 1038 case DC_SW_64KB_D: 1039 case DC_SW_VAR_D: 1040 case DC_SW_4KB_D_X: 1041 case DC_SW_64KB_D_X: 1042 case DC_SW_VAR_D_X: 1043 display_swizzle = true; 1044 break; 1045 default: 1046 break; 1047 } 1048 1049 if (bytes_per_element == 1 && standard_swizzle) { 1050 *segment_order_horz = segment_order__contiguous; 1051 *segment_order_vert = segment_order__na; 1052 return true; 1053 } 1054 if (bytes_per_element == 2 && standard_swizzle) { 1055 *segment_order_horz = segment_order__non_contiguous; 1056 *segment_order_vert = segment_order__contiguous; 1057 return true; 1058 } 1059 if (bytes_per_element == 4 && standard_swizzle) { 1060 *segment_order_horz = segment_order__non_contiguous; 1061 *segment_order_vert = segment_order__contiguous; 1062 return true; 1063 } 1064 if (bytes_per_element == 8 && standard_swizzle) { 1065 *segment_order_horz = segment_order__na; 1066 *segment_order_vert = segment_order__contiguous; 1067 return true; 1068 } 1069 if (bytes_per_element == 8 && display_swizzle) { 1070 *segment_order_horz = segment_order__contiguous; 1071 *segment_order_vert = segment_order__non_contiguous; 1072 return true; 1073 } 1074 1075 return false; 1076 } 1077 1078 static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, 1079 unsigned int bytes_per_element) 1080 { 1081 /* copied from DML. might want to refactor DML to leverage from DML */ 1082 /* DML : get_blk256_size */ 1083 if (bytes_per_element == 1) { 1084 *blk256_width = 16; 1085 *blk256_height = 16; 1086 } else if (bytes_per_element == 2) { 1087 *blk256_width = 16; 1088 *blk256_height = 8; 1089 } else if (bytes_per_element == 4) { 1090 *blk256_width = 8; 1091 *blk256_height = 8; 1092 } else if (bytes_per_element == 8) { 1093 *blk256_width = 8; 1094 *blk256_height = 4; 1095 } 1096 } 1097 1098 static void det_request_size( 1099 unsigned int height, 1100 unsigned int width, 1101 unsigned int bpe, 1102 bool *req128_horz_wc, 1103 bool *req128_vert_wc) 1104 { 1105 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ 1106 1107 unsigned int blk256_height = 0; 1108 unsigned int blk256_width = 0; 1109 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; 1110 1111 get_blk256_size(&blk256_width, &blk256_height, bpe); 1112 1113 swath_bytes_horz_wc = height * blk256_height * bpe; 1114 swath_bytes_vert_wc = width * blk256_width * bpe; 1115 1116 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? 1117 false : /* full 256B request */ 1118 true; /* half 128b request */ 1119 1120 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? 1121 false : /* full 256B request */ 1122 true; /* half 128b request */ 1123 } 1124 1125 static bool get_dcc_compression_cap(const struct dc *dc, 1126 const struct dc_dcc_surface_param *input, 1127 struct dc_surface_dcc_cap *output) 1128 { 1129 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ 1130 enum dcc_control dcc_control; 1131 unsigned int bpe; 1132 enum segment_order segment_order_horz, segment_order_vert; 1133 bool req128_horz_wc, req128_vert_wc; 1134 1135 memset(output, 0, sizeof(*output)); 1136 1137 if (dc->debug.disable_dcc == DCC_DISABLE) 1138 return false; 1139 1140 if (!dcc_support_pixel_format(input->format, 1141 &bpe)) 1142 return false; 1143 1144 if (!dcc_support_swizzle(input->swizzle_mode, bpe, 1145 &segment_order_horz, &segment_order_vert)) 1146 return false; 1147 1148 det_request_size(input->surface_size.height, input->surface_size.width, 1149 bpe, &req128_horz_wc, &req128_vert_wc); 1150 1151 if (!req128_horz_wc && !req128_vert_wc) { 1152 dcc_control = dcc_control__256_256_xxx; 1153 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { 1154 if (!req128_horz_wc) 1155 dcc_control = dcc_control__256_256_xxx; 1156 else if (segment_order_horz == segment_order__contiguous) 1157 dcc_control = dcc_control__128_128_xxx; 1158 else 1159 dcc_control = dcc_control__256_64_64; 1160 } else if (input->scan == SCAN_DIRECTION_VERTICAL) { 1161 if (!req128_vert_wc) 1162 dcc_control = dcc_control__256_256_xxx; 1163 else if (segment_order_vert == segment_order__contiguous) 1164 dcc_control = dcc_control__128_128_xxx; 1165 else 1166 dcc_control = dcc_control__256_64_64; 1167 } else { 1168 if ((req128_horz_wc && 1169 segment_order_horz == segment_order__non_contiguous) || 1170 (req128_vert_wc && 1171 segment_order_vert == segment_order__non_contiguous)) 1172 /* access_dir not known, must use most constraining */ 1173 dcc_control = dcc_control__256_64_64; 1174 else 1175 /* reg128 is true for either horz and vert 1176 * but segment_order is contiguous 1177 */ 1178 dcc_control = dcc_control__128_128_xxx; 1179 } 1180 1181 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && 1182 dcc_control != dcc_control__256_256_xxx) 1183 return false; 1184 1185 switch (dcc_control) { 1186 case dcc_control__256_256_xxx: 1187 output->grph.rgb.max_uncompressed_blk_size = 256; 1188 output->grph.rgb.max_compressed_blk_size = 256; 1189 output->grph.rgb.independent_64b_blks = false; 1190 break; 1191 case dcc_control__128_128_xxx: 1192 output->grph.rgb.max_uncompressed_blk_size = 128; 1193 output->grph.rgb.max_compressed_blk_size = 128; 1194 output->grph.rgb.independent_64b_blks = false; 1195 break; 1196 case dcc_control__256_64_64: 1197 output->grph.rgb.max_uncompressed_blk_size = 256; 1198 output->grph.rgb.max_compressed_blk_size = 64; 1199 output->grph.rgb.independent_64b_blks = true; 1200 break; 1201 } 1202 1203 output->capable = true; 1204 output->const_color_support = false; 1205 1206 return true; 1207 } 1208 1209 1210 static void dcn10_destroy_resource_pool(struct resource_pool **pool) 1211 { 1212 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); 1213 1214 destruct(dcn10_pool); 1215 kfree(dcn10_pool); 1216 *pool = NULL; 1217 } 1218 1219 static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) 1220 { 1221 if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN 1222 && caps->max_video_width != 0 1223 && plane_state->src_rect.width > caps->max_video_width) 1224 return DC_FAIL_SURFACE_VALIDATE; 1225 1226 return DC_OK; 1227 } 1228 1229 static struct dc_cap_funcs cap_funcs = { 1230 .get_dcc_compression_cap = get_dcc_compression_cap 1231 }; 1232 1233 static struct resource_funcs dcn10_res_pool_funcs = { 1234 .destroy = dcn10_destroy_resource_pool, 1235 .link_enc_create = dcn10_link_encoder_create, 1236 .validate_guaranteed = dcn10_validate_guaranteed, 1237 .validate_bandwidth = dcn_validate_bandwidth, 1238 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, 1239 .validate_plane = dcn10_validate_plane, 1240 .add_stream_to_ctx = dcn10_add_stream_to_ctx 1241 }; 1242 1243 static uint32_t read_pipe_fuses(struct dc_context *ctx) 1244 { 1245 uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); 1246 /* RV1 support max 4 pipes */ 1247 value = value & 0xf; 1248 return value; 1249 } 1250 1251 static bool construct( 1252 uint8_t num_virtual_links, 1253 struct dc *dc, 1254 struct dcn10_resource_pool *pool) 1255 { 1256 int i; 1257 int j; 1258 struct dc_context *ctx = dc->ctx; 1259 uint32_t pipe_fuses = read_pipe_fuses(ctx); 1260 1261 ctx->dc_bios->regs = &bios_regs; 1262 1263 pool->base.res_cap = &res_cap; 1264 pool->base.funcs = &dcn10_res_pool_funcs; 1265 1266 /* 1267 * TODO fill in from actual raven resource when we create 1268 * more than virtual encoder 1269 */ 1270 1271 /************************************************* 1272 * Resource + asic cap harcoding * 1273 *************************************************/ 1274 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1275 1276 /* max pipe num for ASIC before check pipe fuses */ 1277 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1278 1279 dc->caps.max_video_width = 3840; 1280 dc->caps.max_downscale_ratio = 200; 1281 dc->caps.i2c_speed_in_khz = 100; 1282 dc->caps.max_cursor_size = 256; 1283 dc->caps.max_slave_planes = 1; 1284 dc->caps.is_apu = true; 1285 1286 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) 1287 dc->debug = debug_defaults_drv; 1288 else 1289 dc->debug = debug_defaults_diags; 1290 1291 /************************************************* 1292 * Create resources * 1293 *************************************************/ 1294 1295 pool->base.clock_sources[DCN10_CLK_SRC_PLL0] = 1296 dcn10_clock_source_create(ctx, ctx->dc_bios, 1297 CLOCK_SOURCE_COMBO_PHY_PLL0, 1298 &clk_src_regs[0], false); 1299 pool->base.clock_sources[DCN10_CLK_SRC_PLL1] = 1300 dcn10_clock_source_create(ctx, ctx->dc_bios, 1301 CLOCK_SOURCE_COMBO_PHY_PLL1, 1302 &clk_src_regs[1], false); 1303 pool->base.clock_sources[DCN10_CLK_SRC_PLL2] = 1304 dcn10_clock_source_create(ctx, ctx->dc_bios, 1305 CLOCK_SOURCE_COMBO_PHY_PLL2, 1306 &clk_src_regs[2], false); 1307 pool->base.clock_sources[DCN10_CLK_SRC_PLL3] = 1308 dcn10_clock_source_create(ctx, ctx->dc_bios, 1309 CLOCK_SOURCE_COMBO_PHY_PLL3, 1310 &clk_src_regs[3], false); 1311 1312 pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL; 1313 1314 pool->base.dp_clock_source = 1315 dcn10_clock_source_create(ctx, ctx->dc_bios, 1316 CLOCK_SOURCE_ID_DP_DTO, 1317 /* todo: not reuse phy_pll registers */ 1318 &clk_src_regs[0], true); 1319 1320 for (i = 0; i < pool->base.clk_src_count; i++) { 1321 if (pool->base.clock_sources[i] == NULL) { 1322 dm_error("DC: failed to create clock sources!\n"); 1323 BREAK_TO_DEBUGGER(); 1324 goto fail; 1325 } 1326 } 1327 1328 pool->base.display_clock = dce120_disp_clk_create(ctx); 1329 if (pool->base.display_clock == NULL) { 1330 dm_error("DC: failed to create display clock!\n"); 1331 BREAK_TO_DEBUGGER(); 1332 goto fail; 1333 } 1334 1335 pool->base.dmcu = dcn10_dmcu_create(ctx, 1336 &dmcu_regs, 1337 &dmcu_shift, 1338 &dmcu_mask); 1339 if (pool->base.dmcu == NULL) { 1340 dm_error("DC: failed to create dmcu!\n"); 1341 BREAK_TO_DEBUGGER(); 1342 goto fail; 1343 } 1344 1345 pool->base.abm = dce_abm_create(ctx, 1346 &abm_regs, 1347 &abm_shift, 1348 &abm_mask); 1349 if (pool->base.abm == NULL) { 1350 dm_error("DC: failed to create abm!\n"); 1351 BREAK_TO_DEBUGGER(); 1352 goto fail; 1353 } 1354 1355 dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1); 1356 memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults)); 1357 memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults)); 1358 1359 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { 1360 dc->dcn_soc->urgent_latency = 3; 1361 dc->debug.disable_dmcu = true; 1362 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f; 1363 } 1364 1365 1366 dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width; 1367 ASSERT(dc->dcn_soc->number_of_channels < 3); 1368 if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/ 1369 dc->dcn_soc->number_of_channels = 2; 1370 1371 if (dc->dcn_soc->number_of_channels == 1) { 1372 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f; 1373 dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f; 1374 dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f; 1375 dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f; 1376 if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) { 1377 dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f; 1378 } 1379 } 1380 1381 pool->base.pp_smu = dcn10_pp_smu_create(ctx); 1382 1383 if (!dc->debug.disable_pplib_clock_request) 1384 dcn_bw_update_from_pplib(dc); 1385 dcn_bw_sync_calcs_and_dml(dc); 1386 if (!dc->debug.disable_pplib_wm_range) { 1387 dc->res_pool = &pool->base; 1388 dcn_bw_notify_pplib_of_wm_ranges(dc); 1389 } 1390 1391 { 1392 struct irq_service_init_data init_data; 1393 init_data.ctx = dc->ctx; 1394 pool->base.irqs = dal_irq_service_dcn10_create(&init_data); 1395 if (!pool->base.irqs) 1396 goto fail; 1397 } 1398 1399 /* index to valid pipe resource */ 1400 j = 0; 1401 /* mem input -> ipp -> dpp -> opp -> TG */ 1402 for (i = 0; i < pool->base.pipe_count; i++) { 1403 /* if pipe is disabled, skip instance of HW pipe, 1404 * i.e, skip ASIC register instance 1405 */ 1406 if ((pipe_fuses & (1 << i)) != 0) 1407 continue; 1408 1409 pool->base.hubps[j] = dcn10_hubp_create(ctx, i); 1410 if (pool->base.hubps[j] == NULL) { 1411 BREAK_TO_DEBUGGER(); 1412 dm_error( 1413 "DC: failed to create memory input!\n"); 1414 goto fail; 1415 } 1416 1417 pool->base.ipps[j] = dcn10_ipp_create(ctx, i); 1418 if (pool->base.ipps[j] == NULL) { 1419 BREAK_TO_DEBUGGER(); 1420 dm_error( 1421 "DC: failed to create input pixel processor!\n"); 1422 goto fail; 1423 } 1424 1425 pool->base.dpps[j] = dcn10_dpp_create(ctx, i); 1426 if (pool->base.dpps[j] == NULL) { 1427 BREAK_TO_DEBUGGER(); 1428 dm_error( 1429 "DC: failed to create dpp!\n"); 1430 goto fail; 1431 } 1432 1433 pool->base.opps[j] = dcn10_opp_create(ctx, i); 1434 if (pool->base.opps[j] == NULL) { 1435 BREAK_TO_DEBUGGER(); 1436 dm_error( 1437 "DC: failed to create output pixel processor!\n"); 1438 goto fail; 1439 } 1440 1441 pool->base.timing_generators[j] = dcn10_timing_generator_create( 1442 ctx, i); 1443 if (pool->base.timing_generators[j] == NULL) { 1444 BREAK_TO_DEBUGGER(); 1445 dm_error("DC: failed to create tg!\n"); 1446 goto fail; 1447 } 1448 1449 /* check next valid pipe */ 1450 j++; 1451 } 1452 1453 /* valid pipe num */ 1454 pool->base.pipe_count = j; 1455 pool->base.timing_generator_count = j; 1456 1457 /* within dml lib, it is hard code to 4. If ASIC pipe is fused, 1458 * the value may be changed 1459 */ 1460 dc->dml.ip.max_num_dpp = pool->base.pipe_count; 1461 dc->dcn_ip->max_num_dpp = pool->base.pipe_count; 1462 1463 pool->base.mpc = dcn10_mpc_create(ctx); 1464 if (pool->base.mpc == NULL) { 1465 BREAK_TO_DEBUGGER(); 1466 dm_error("DC: failed to create mpc!\n"); 1467 goto fail; 1468 } 1469 1470 pool->base.hubbub = dcn10_hubbub_create(ctx); 1471 if (pool->base.hubbub == NULL) { 1472 BREAK_TO_DEBUGGER(); 1473 dm_error("DC: failed to create hubbub!\n"); 1474 goto fail; 1475 } 1476 1477 if (!resource_construct(num_virtual_links, dc, &pool->base, 1478 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? 1479 &res_create_funcs : &res_create_maximus_funcs))) 1480 goto fail; 1481 1482 dcn10_hw_sequencer_construct(dc); 1483 dc->caps.max_planes = pool->base.pipe_count; 1484 1485 dc->cap_funcs = cap_funcs; 1486 1487 return true; 1488 1489 fail: 1490 1491 destruct(pool); 1492 1493 return false; 1494 } 1495 1496 struct resource_pool *dcn10_create_resource_pool( 1497 uint8_t num_virtual_links, 1498 struct dc *dc) 1499 { 1500 struct dcn10_resource_pool *pool = 1501 kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL); 1502 1503 if (!pool) 1504 return NULL; 1505 1506 if (construct(num_virtual_links, dc, pool)) 1507 return &pool->base; 1508 1509 BREAK_TO_DEBUGGER(); 1510 return NULL; 1511 } 1512