1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dm_services.h" 28 #include "dc.h" 29 30 #include "dcn30_init.h" 31 32 #include "resource.h" 33 #include "include/irq_service_interface.h" 34 #include "dcn20/dcn20_resource.h" 35 36 #include "dcn30_resource.h" 37 38 #include "dcn10/dcn10_ipp.h" 39 #include "dcn30/dcn30_hubbub.h" 40 #include "dcn30/dcn30_mpc.h" 41 #include "dcn30/dcn30_hubp.h" 42 #include "irq/dcn30/irq_service_dcn30.h" 43 #include "dcn30/dcn30_dpp.h" 44 #include "dcn30/dcn30_optc.h" 45 #include "dcn20/dcn20_hwseq.h" 46 #include "dcn30/dcn30_hwseq.h" 47 #include "dce110/dce110_hw_sequencer.h" 48 #include "dcn30/dcn30_opp.h" 49 #include "dcn20/dcn20_dsc.h" 50 #include "dcn30/dcn30_vpg.h" 51 #include "dcn30/dcn30_afmt.h" 52 #include "dcn30/dcn30_dio_stream_encoder.h" 53 #include "dcn30/dcn30_dio_link_encoder.h" 54 #include "dce/dce_clock_source.h" 55 #include "dce/dce_audio.h" 56 #include "dce/dce_hwseq.h" 57 #include "clk_mgr.h" 58 #include "virtual/virtual_stream_encoder.h" 59 #include "dce110/dce110_resource.h" 60 #include "dml/display_mode_vba.h" 61 #include "dcn30/dcn30_dccg.h" 62 #include "dcn10/dcn10_resource.h" 63 #include "dce/dce_panel_cntl.h" 64 65 #include "dcn30/dcn30_dwb.h" 66 #include "dcn30/dcn30_mmhubbub.h" 67 68 #include "sienna_cichlid_ip_offset.h" 69 #include "dcn/dcn_3_0_0_offset.h" 70 #include "dcn/dcn_3_0_0_sh_mask.h" 71 72 #include "nbio/nbio_7_4_offset.h" 73 74 #include "dcn/dpcs_3_0_0_offset.h" 75 #include "dcn/dpcs_3_0_0_sh_mask.h" 76 77 #include "mmhub/mmhub_2_0_0_offset.h" 78 #include "mmhub/mmhub_2_0_0_sh_mask.h" 79 80 #include "reg_helper.h" 81 #include "dce/dmub_abm.h" 82 #include "dce/dce_aux.h" 83 #include "dce/dce_i2c.h" 84 85 #include "dml/dcn30/display_mode_vba_30.h" 86 #include "vm_helper.h" 87 #include "dcn20/dcn20_vmid.h" 88 #include "amdgpu_socbb.h" 89 90 #define DC_LOGGER_INIT(logger) 91 92 struct _vcs_dpi_ip_params_st dcn3_0_ip = { 93 .use_min_dcfclk = 1, 94 .clamp_min_dcfclk = 0, 95 .odm_capable = 1, 96 .gpuvm_enable = 0, 97 .hostvm_enable = 0, 98 .gpuvm_max_page_table_levels = 4, 99 .hostvm_max_page_table_levels = 4, 100 .hostvm_cached_page_table_levels = 0, 101 .pte_group_size_bytes = 2048, 102 .num_dsc = 6, 103 .rob_buffer_size_kbytes = 184, 104 .det_buffer_size_kbytes = 184, 105 .dpte_buffer_size_in_pte_reqs_luma = 84, 106 .pde_proc_buffer_size_64k_reqs = 48, 107 .dpp_output_buffer_pixels = 2560, 108 .opp_output_buffer_lines = 1, 109 .pixel_chunk_size_kbytes = 8, 110 .pte_enable = 1, 111 .max_page_table_levels = 2, 112 .pte_chunk_size_kbytes = 2, // ? 113 .meta_chunk_size_kbytes = 2, 114 .writeback_chunk_size_kbytes = 8, 115 .line_buffer_size_bits = 789504, 116 .is_line_buffer_bpp_fixed = 0, // ? 117 .line_buffer_fixed_bpp = 0, // ? 118 .dcc_supported = true, 119 .writeback_interface_buffer_size_kbytes = 90, 120 .writeback_line_buffer_buffer_size = 0, 121 .max_line_buffer_lines = 12, 122 .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 123 .writeback_chroma_buffer_size_kbytes = 8, 124 .writeback_chroma_line_buffer_width_pixels = 4, 125 .writeback_max_hscl_ratio = 1, 126 .writeback_max_vscl_ratio = 1, 127 .writeback_min_hscl_ratio = 1, 128 .writeback_min_vscl_ratio = 1, 129 .writeback_max_hscl_taps = 1, 130 .writeback_max_vscl_taps = 1, 131 .writeback_line_buffer_luma_buffer_size = 0, 132 .writeback_line_buffer_chroma_buffer_size = 14643, 133 .cursor_buffer_size = 8, 134 .cursor_chunk_size = 2, 135 .max_num_otg = 6, 136 .max_num_dpp = 6, 137 .max_num_wb = 1, 138 .max_dchub_pscl_bw_pix_per_clk = 4, 139 .max_pscl_lb_bw_pix_per_clk = 2, 140 .max_lb_vscl_bw_pix_per_clk = 4, 141 .max_vscl_hscl_bw_pix_per_clk = 4, 142 .max_hscl_ratio = 6, 143 .max_vscl_ratio = 6, 144 .hscl_mults = 4, 145 .vscl_mults = 4, 146 .max_hscl_taps = 8, 147 .max_vscl_taps = 8, 148 .dispclk_ramp_margin_percent = 1, 149 .underscan_factor = 1.11, 150 .min_vblank_lines = 32, 151 .dppclk_delay_subtotal = 46, 152 .dynamic_metadata_vm_enabled = true, 153 .dppclk_delay_scl_lb_only = 16, 154 .dppclk_delay_scl = 50, 155 .dppclk_delay_cnvc_formatter = 27, 156 .dppclk_delay_cnvc_cursor = 6, 157 .dispclk_delay_subtotal = 119, 158 .dcfclk_cstate_latency = 5.2, // SRExitTime 159 .max_inter_dcn_tile_repeaters = 8, 160 .odm_combine_4to1_supported = true, 161 162 .xfc_supported = false, 163 .xfc_fill_bw_overhead_percent = 10.0, 164 .xfc_fill_constant_bytes = 0, 165 .gfx7_compat_tiling_supported = 0, 166 .number_of_cursors = 1, 167 }; 168 169 struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = { 170 .clock_limits = { 171 { 172 .state = 0, 173 .dispclk_mhz = 562.0, 174 .dppclk_mhz = 300.0, 175 .phyclk_mhz = 300.0, 176 .phyclk_d18_mhz = 667.0, 177 .dscclk_mhz = 405.6, 178 }, 179 }, 180 .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ 181 .num_states = 1, 182 .sr_exit_time_us = 12, 183 .sr_enter_plus_exit_time_us = 20, 184 .urgent_latency_us = 4.0, 185 .urgent_latency_pixel_data_only_us = 4.0, 186 .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 187 .urgent_latency_vm_data_only_us = 4.0, 188 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 189 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 190 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, 191 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, 192 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, 193 .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, 194 .max_avg_sdp_bw_use_normal_percent = 60.0, 195 .max_avg_dram_bw_use_normal_percent = 40.0, 196 .writeback_latency_us = 12.0, 197 .max_request_size_bytes = 256, 198 .fabric_datapath_to_dcn_data_return_bytes = 64, 199 .dcn_downspread_percent = 0.5, 200 .downspread_percent = 0.38, 201 .dram_page_open_time_ns = 50.0, 202 .dram_rw_turnaround_time_ns = 17.5, 203 .dram_return_buffer_per_channel_bytes = 8192, 204 .round_trip_ping_latency_dcfclk_cycles = 191, 205 .urgent_out_of_order_return_per_channel_bytes = 4096, 206 .channel_interleave_bytes = 256, 207 .num_banks = 8, 208 .gpuvm_min_page_size_bytes = 4096, 209 .hostvm_min_page_size_bytes = 4096, 210 .dram_clock_change_latency_us = 404, 211 .dummy_pstate_latency_us = 5, 212 .writeback_dram_clock_change_latency_us = 23.0, 213 .return_bus_width_bytes = 64, 214 .dispclk_dppclk_vco_speed_mhz = 3650, 215 .xfc_bus_transport_time_us = 20, // ? 216 .xfc_xbuf_latency_tolerance_us = 4, // ? 217 .use_urgent_burst_bw = 1, // ? 218 .do_urgent_latency_adjustment = true, 219 .urgent_latency_adjustment_fabric_clock_component_us = 1.0, 220 .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, 221 }; 222 223 enum dcn30_clk_src_array_id { 224 DCN30_CLK_SRC_PLL0, 225 DCN30_CLK_SRC_PLL1, 226 DCN30_CLK_SRC_PLL2, 227 DCN30_CLK_SRC_PLL3, 228 DCN30_CLK_SRC_PLL4, 229 DCN30_CLK_SRC_PLL5, 230 DCN30_CLK_SRC_TOTAL 231 }; 232 233 /* begin ********************* 234 * macros to expend register list macro defined in HW object header file 235 */ 236 237 /* DCN */ 238 /* TODO awful hack. fixup dcn20_dwb.h */ 239 #undef BASE_INNER 240 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg 241 242 #define BASE(seg) BASE_INNER(seg) 243 244 #define SR(reg_name)\ 245 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 246 mm ## reg_name 247 248 #define SRI(reg_name, block, id)\ 249 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 250 mm ## block ## id ## _ ## reg_name 251 252 #define SRI2(reg_name, block, id)\ 253 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 254 mm ## reg_name 255 256 #define SRIR(var_name, reg_name, block, id)\ 257 .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 258 mm ## block ## id ## _ ## reg_name 259 260 #define SRII(reg_name, block, id)\ 261 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 262 mm ## block ## id ## _ ## reg_name 263 264 #define SRII_MPC_RMU(reg_name, block, id)\ 265 .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 266 mm ## block ## id ## _ ## reg_name 267 268 #define SRII_DWB(reg_name, temp_name, block, id)\ 269 .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ 270 mm ## block ## id ## _ ## temp_name 271 272 #define DCCG_SRII(reg_name, block, id)\ 273 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 274 mm ## block ## id ## _ ## reg_name 275 276 #define VUPDATE_SRII(reg_name, block, id)\ 277 .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ 278 mm ## reg_name ## _ ## block ## id 279 280 /* NBIO */ 281 #define NBIO_BASE_INNER(seg) \ 282 NBIO_BASE__INST0_SEG ## seg 283 284 #define NBIO_BASE(seg) \ 285 NBIO_BASE_INNER(seg) 286 287 #define NBIO_SR(reg_name)\ 288 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ 289 mm ## reg_name 290 291 /* MMHUB */ 292 #define MMHUB_BASE_INNER(seg) \ 293 MMHUB_BASE__INST0_SEG ## seg 294 295 #define MMHUB_BASE(seg) \ 296 MMHUB_BASE_INNER(seg) 297 298 #define MMHUB_SR(reg_name)\ 299 .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ 300 mmMM ## reg_name 301 302 /* CLOCK */ 303 #define CLK_BASE_INNER(seg) \ 304 CLK_BASE__INST0_SEG ## seg 305 306 #define CLK_BASE(seg) \ 307 CLK_BASE_INNER(seg) 308 309 #define CLK_SRI(reg_name, block, inst)\ 310 .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ 311 mm ## block ## _ ## inst ## _ ## reg_name 312 313 314 static const struct bios_registers bios_regs = { 315 NBIO_SR(BIOS_SCRATCH_3), 316 NBIO_SR(BIOS_SCRATCH_6) 317 }; 318 319 #define clk_src_regs(index, pllid)\ 320 [index] = {\ 321 CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ 322 } 323 324 static const struct dce110_clk_src_regs clk_src_regs[] = { 325 clk_src_regs(0, A), 326 clk_src_regs(1, B), 327 clk_src_regs(2, C), 328 clk_src_regs(3, D), 329 clk_src_regs(4, E), 330 clk_src_regs(5, F) 331 }; 332 333 static const struct dce110_clk_src_shift cs_shift = { 334 CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) 335 }; 336 337 static const struct dce110_clk_src_mask cs_mask = { 338 CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) 339 }; 340 341 #define abm_regs(id)\ 342 [id] = {\ 343 ABM_DCN301_REG_LIST(id)\ 344 } 345 346 static const struct dce_abm_registers abm_regs[] = { 347 abm_regs(0), 348 abm_regs(1), 349 abm_regs(2), 350 abm_regs(3), 351 abm_regs(4), 352 abm_regs(5), 353 }; 354 355 static const struct dce_abm_shift abm_shift = { 356 ABM_MASK_SH_LIST_DCN301(__SHIFT) 357 }; 358 359 static const struct dce_abm_mask abm_mask = { 360 ABM_MASK_SH_LIST_DCN301(_MASK) 361 }; 362 363 364 365 #define audio_regs(id)\ 366 [id] = {\ 367 AUD_COMMON_REG_LIST(id)\ 368 } 369 370 static const struct dce_audio_registers audio_regs[] = { 371 audio_regs(0), 372 audio_regs(1), 373 audio_regs(2), 374 audio_regs(3), 375 audio_regs(4), 376 audio_regs(5), 377 audio_regs(6) 378 }; 379 380 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ 381 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ 382 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ 383 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) 384 385 static const struct dce_audio_shift audio_shift = { 386 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) 387 }; 388 389 static const struct dce_audio_mask audio_mask = { 390 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) 391 }; 392 393 #define vpg_regs(id)\ 394 [id] = {\ 395 VPG_DCN3_REG_LIST(id)\ 396 } 397 398 static const struct dcn30_vpg_registers vpg_regs[] = { 399 vpg_regs(0), 400 vpg_regs(1), 401 vpg_regs(2), 402 vpg_regs(3), 403 vpg_regs(4), 404 vpg_regs(5), 405 vpg_regs(6), 406 }; 407 408 static const struct dcn30_vpg_shift vpg_shift = { 409 DCN3_VPG_MASK_SH_LIST(__SHIFT) 410 }; 411 412 static const struct dcn30_vpg_mask vpg_mask = { 413 DCN3_VPG_MASK_SH_LIST(_MASK) 414 }; 415 416 #define afmt_regs(id)\ 417 [id] = {\ 418 AFMT_DCN3_REG_LIST(id)\ 419 } 420 421 static const struct dcn30_afmt_registers afmt_regs[] = { 422 afmt_regs(0), 423 afmt_regs(1), 424 afmt_regs(2), 425 afmt_regs(3), 426 afmt_regs(4), 427 afmt_regs(5), 428 afmt_regs(6), 429 }; 430 431 static const struct dcn30_afmt_shift afmt_shift = { 432 DCN3_AFMT_MASK_SH_LIST(__SHIFT) 433 }; 434 435 static const struct dcn30_afmt_mask afmt_mask = { 436 DCN3_AFMT_MASK_SH_LIST(_MASK) 437 }; 438 439 #define stream_enc_regs(id)\ 440 [id] = {\ 441 SE_DCN3_REG_LIST(id)\ 442 } 443 444 static const struct dcn10_stream_enc_registers stream_enc_regs[] = { 445 stream_enc_regs(0), 446 stream_enc_regs(1), 447 stream_enc_regs(2), 448 stream_enc_regs(3), 449 stream_enc_regs(4), 450 stream_enc_regs(5) 451 }; 452 453 static const struct dcn10_stream_encoder_shift se_shift = { 454 SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 455 }; 456 457 static const struct dcn10_stream_encoder_mask se_mask = { 458 SE_COMMON_MASK_SH_LIST_DCN30(_MASK) 459 }; 460 461 462 #define aux_regs(id)\ 463 [id] = {\ 464 DCN2_AUX_REG_LIST(id)\ 465 } 466 467 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { 468 aux_regs(0), 469 aux_regs(1), 470 aux_regs(2), 471 aux_regs(3), 472 aux_regs(4), 473 aux_regs(5) 474 }; 475 476 #define hpd_regs(id)\ 477 [id] = {\ 478 HPD_REG_LIST(id)\ 479 } 480 481 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { 482 hpd_regs(0), 483 hpd_regs(1), 484 hpd_regs(2), 485 hpd_regs(3), 486 hpd_regs(4), 487 hpd_regs(5) 488 }; 489 490 #define link_regs(id, phyid)\ 491 [id] = {\ 492 LE_DCN3_REG_LIST(id), \ 493 UNIPHY_DCN2_REG_LIST(phyid), \ 494 } 495 496 static const struct dce110_aux_registers_shift aux_shift = { 497 DCN_AUX_MASK_SH_LIST(__SHIFT) 498 }; 499 500 static const struct dce110_aux_registers_mask aux_mask = { 501 DCN_AUX_MASK_SH_LIST(_MASK) 502 }; 503 504 static const struct dcn10_link_enc_registers link_enc_regs[] = { 505 link_regs(0, A), 506 link_regs(1, B), 507 link_regs(2, C), 508 link_regs(3, D), 509 link_regs(4, E), 510 link_regs(5, F) 511 }; 512 513 static const struct dcn10_link_enc_shift le_shift = { 514 LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT),\ 515 DPCS_DCN2_MASK_SH_LIST(__SHIFT) 516 }; 517 518 static const struct dcn10_link_enc_mask le_mask = { 519 LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK),\ 520 DPCS_DCN2_MASK_SH_LIST(_MASK) 521 }; 522 523 524 static const struct dce_panel_cntl_registers panel_cntl_regs[] = { 525 { DCN_PANEL_CNTL_REG_LIST() } 526 }; 527 528 static const struct dce_panel_cntl_shift panel_cntl_shift = { 529 DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) 530 }; 531 532 static const struct dce_panel_cntl_mask panel_cntl_mask = { 533 DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) 534 }; 535 536 #define dpp_regs(id)\ 537 [id] = {\ 538 DPP_REG_LIST_DCN30(id),\ 539 } 540 541 static const struct dcn3_dpp_registers dpp_regs[] = { 542 dpp_regs(0), 543 dpp_regs(1), 544 dpp_regs(2), 545 dpp_regs(3), 546 dpp_regs(4), 547 dpp_regs(5), 548 }; 549 550 static const struct dcn3_dpp_shift tf_shift = { 551 DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) 552 }; 553 554 static const struct dcn3_dpp_mask tf_mask = { 555 DPP_REG_LIST_SH_MASK_DCN30(_MASK) 556 }; 557 558 #define opp_regs(id)\ 559 [id] = {\ 560 OPP_REG_LIST_DCN30(id),\ 561 } 562 563 static const struct dcn20_opp_registers opp_regs[] = { 564 opp_regs(0), 565 opp_regs(1), 566 opp_regs(2), 567 opp_regs(3), 568 opp_regs(4), 569 opp_regs(5) 570 }; 571 572 static const struct dcn20_opp_shift opp_shift = { 573 OPP_MASK_SH_LIST_DCN20(__SHIFT) 574 }; 575 576 static const struct dcn20_opp_mask opp_mask = { 577 OPP_MASK_SH_LIST_DCN20(_MASK) 578 }; 579 580 #define aux_engine_regs(id)\ 581 [id] = {\ 582 AUX_COMMON_REG_LIST0(id), \ 583 .AUXN_IMPCAL = 0, \ 584 .AUXP_IMPCAL = 0, \ 585 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ 586 } 587 588 static const struct dce110_aux_registers aux_engine_regs[] = { 589 aux_engine_regs(0), 590 aux_engine_regs(1), 591 aux_engine_regs(2), 592 aux_engine_regs(3), 593 aux_engine_regs(4), 594 aux_engine_regs(5) 595 }; 596 597 #define dwbc_regs_dcn3(id)\ 598 [id] = {\ 599 DWBC_COMMON_REG_LIST_DCN30(id),\ 600 } 601 602 static const struct dcn30_dwbc_registers dwbc30_regs[] = { 603 dwbc_regs_dcn3(0), 604 }; 605 606 static const struct dcn30_dwbc_shift dwbc30_shift = { 607 DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 608 }; 609 610 static const struct dcn30_dwbc_mask dwbc30_mask = { 611 DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) 612 }; 613 614 #define mcif_wb_regs_dcn3(id)\ 615 [id] = {\ 616 MCIF_WB_COMMON_REG_LIST_DCN30(id),\ 617 } 618 619 static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { 620 mcif_wb_regs_dcn3(0) 621 }; 622 623 static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { 624 MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 625 }; 626 627 static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { 628 MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) 629 }; 630 631 #define dsc_regsDCN20(id)\ 632 [id] = {\ 633 DSC_REG_LIST_DCN20(id)\ 634 } 635 636 static const struct dcn20_dsc_registers dsc_regs[] = { 637 dsc_regsDCN20(0), 638 dsc_regsDCN20(1), 639 dsc_regsDCN20(2), 640 dsc_regsDCN20(3), 641 dsc_regsDCN20(4), 642 dsc_regsDCN20(5) 643 }; 644 645 static const struct dcn20_dsc_shift dsc_shift = { 646 DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) 647 }; 648 649 static const struct dcn20_dsc_mask dsc_mask = { 650 DSC_REG_LIST_SH_MASK_DCN20(_MASK) 651 }; 652 653 static const struct dcn30_mpc_registers mpc_regs = { 654 MPC_REG_LIST_DCN3_0(0), 655 MPC_REG_LIST_DCN3_0(1), 656 MPC_REG_LIST_DCN3_0(2), 657 MPC_REG_LIST_DCN3_0(3), 658 MPC_REG_LIST_DCN3_0(4), 659 MPC_REG_LIST_DCN3_0(5), 660 MPC_OUT_MUX_REG_LIST_DCN3_0(0), 661 MPC_OUT_MUX_REG_LIST_DCN3_0(1), 662 MPC_OUT_MUX_REG_LIST_DCN3_0(2), 663 MPC_OUT_MUX_REG_LIST_DCN3_0(3), 664 MPC_OUT_MUX_REG_LIST_DCN3_0(4), 665 MPC_OUT_MUX_REG_LIST_DCN3_0(5), 666 MPC_RMU_GLOBAL_REG_LIST_DCN3AG, 667 MPC_RMU_REG_LIST_DCN3AG(0), 668 MPC_RMU_REG_LIST_DCN3AG(1), 669 MPC_RMU_REG_LIST_DCN3AG(2), 670 MPC_DWB_MUX_REG_LIST_DCN3_0(0), 671 }; 672 673 static const struct dcn30_mpc_shift mpc_shift = { 674 MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 675 }; 676 677 static const struct dcn30_mpc_mask mpc_mask = { 678 MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) 679 }; 680 681 #define optc_regs(id)\ 682 [id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} 683 684 685 static const struct dcn_optc_registers optc_regs[] = { 686 optc_regs(0), 687 optc_regs(1), 688 optc_regs(2), 689 optc_regs(3), 690 optc_regs(4), 691 optc_regs(5) 692 }; 693 694 static const struct dcn_optc_shift optc_shift = { 695 OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 696 }; 697 698 static const struct dcn_optc_mask optc_mask = { 699 OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) 700 }; 701 702 #define hubp_regs(id)\ 703 [id] = {\ 704 HUBP_REG_LIST_DCN30(id)\ 705 } 706 707 static const struct dcn_hubp2_registers hubp_regs[] = { 708 hubp_regs(0), 709 hubp_regs(1), 710 hubp_regs(2), 711 hubp_regs(3), 712 hubp_regs(4), 713 hubp_regs(5) 714 }; 715 716 static const struct dcn_hubp2_shift hubp_shift = { 717 HUBP_MASK_SH_LIST_DCN30(__SHIFT) 718 }; 719 720 static const struct dcn_hubp2_mask hubp_mask = { 721 HUBP_MASK_SH_LIST_DCN30(_MASK) 722 }; 723 724 static const struct dcn_hubbub_registers hubbub_reg = { 725 HUBBUB_REG_LIST_DCN30(0) 726 }; 727 728 static const struct dcn_hubbub_shift hubbub_shift = { 729 HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) 730 }; 731 732 static const struct dcn_hubbub_mask hubbub_mask = { 733 HUBBUB_MASK_SH_LIST_DCN30(_MASK) 734 }; 735 736 static const struct dccg_registers dccg_regs = { 737 DCCG_REG_LIST_DCN30() 738 }; 739 740 static const struct dccg_shift dccg_shift = { 741 DCCG_MASK_SH_LIST_DCN3(__SHIFT) 742 }; 743 744 static const struct dccg_mask dccg_mask = { 745 DCCG_MASK_SH_LIST_DCN3(_MASK) 746 }; 747 748 static const struct dce_hwseq_registers hwseq_reg = { 749 HWSEQ_DCN30_REG_LIST() 750 }; 751 752 static const struct dce_hwseq_shift hwseq_shift = { 753 HWSEQ_DCN30_MASK_SH_LIST(__SHIFT) 754 }; 755 756 static const struct dce_hwseq_mask hwseq_mask = { 757 HWSEQ_DCN30_MASK_SH_LIST(_MASK) 758 }; 759 #define vmid_regs(id)\ 760 [id] = {\ 761 DCN20_VMID_REG_LIST(id)\ 762 } 763 764 static const struct dcn_vmid_registers vmid_regs[] = { 765 vmid_regs(0), 766 vmid_regs(1), 767 vmid_regs(2), 768 vmid_regs(3), 769 vmid_regs(4), 770 vmid_regs(5), 771 vmid_regs(6), 772 vmid_regs(7), 773 vmid_regs(8), 774 vmid_regs(9), 775 vmid_regs(10), 776 vmid_regs(11), 777 vmid_regs(12), 778 vmid_regs(13), 779 vmid_regs(14), 780 vmid_regs(15) 781 }; 782 783 static const struct dcn20_vmid_shift vmid_shifts = { 784 DCN20_VMID_MASK_SH_LIST(__SHIFT) 785 }; 786 787 static const struct dcn20_vmid_mask vmid_masks = { 788 DCN20_VMID_MASK_SH_LIST(_MASK) 789 }; 790 791 static const struct resource_caps res_cap_dcn3 = { 792 .num_timing_generator = 6, 793 .num_opp = 6, 794 .num_video_plane = 6, 795 .num_audio = 6, 796 .num_stream_encoder = 6, 797 .num_pll = 6, 798 .num_dwb = 1, 799 .num_ddc = 6, 800 .num_vmid = 16, 801 .num_mpc_3dlut = 3, 802 .num_dsc = 6, 803 }; 804 805 static const struct dc_plane_cap plane_cap = { 806 .type = DC_PLANE_TYPE_DCN_UNIVERSAL, 807 .blends_with_above = true, 808 .blends_with_below = true, 809 .per_pixel_alpha = true, 810 811 .pixel_format_support = { 812 .argb8888 = true, 813 .nv12 = true, 814 .fp16 = true, 815 .p010 = false, 816 .ayuv = false, 817 }, 818 819 .max_upscale_factor = { 820 .argb8888 = 16000, 821 .nv12 = 16000, 822 .fp16 = 16000 823 }, 824 825 .max_downscale_factor = { 826 .argb8888 = 600, 827 .nv12 = 600, 828 .fp16 = 600 829 } 830 }; 831 832 static const struct dc_debug_options debug_defaults_drv = { 833 .disable_dmcu = true, 834 .force_abm_enable = false, 835 .timing_trace = false, 836 .clock_trace = true, 837 .disable_pplib_clock_request = true, 838 .pipe_split_policy = MPC_SPLIT_DYNAMIC, 839 .force_single_disp_pipe_split = false, 840 .disable_dcc = DCC_ENABLE, 841 .vsr_support = true, 842 .performance_trace = false, 843 .max_downscale_src_width = 7680,/*upto 8K*/ 844 .disable_pplib_wm_range = false, 845 .scl_reset_length10 = true, 846 .sanity_checks = false, 847 .underflow_assert_delay_us = 0xFFFFFFFF, 848 .dwb_fi_phase = -1, // -1 = disable, 849 .dmub_command_table = true, 850 }; 851 852 static const struct dc_debug_options debug_defaults_diags = { 853 .disable_dmcu = true, 854 .force_abm_enable = false, 855 .timing_trace = true, 856 .clock_trace = true, 857 .disable_dpp_power_gate = true, 858 .disable_hubp_power_gate = true, 859 .disable_clock_gate = true, 860 .disable_pplib_clock_request = true, 861 .disable_pplib_wm_range = true, 862 .disable_stutter = false, 863 .scl_reset_length10 = true, 864 .dwb_fi_phase = -1, // -1 = disable 865 .dmub_command_table = true, 866 }; 867 868 void dcn30_dpp_destroy(struct dpp **dpp) 869 { 870 kfree(TO_DCN20_DPP(*dpp)); 871 *dpp = NULL; 872 } 873 874 struct dpp *dcn30_dpp_create( 875 struct dc_context *ctx, 876 uint32_t inst) 877 { 878 struct dcn3_dpp *dpp = 879 kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); 880 881 if (!dpp) 882 return NULL; 883 884 if (dpp3_construct(dpp, ctx, inst, 885 &dpp_regs[inst], &tf_shift, &tf_mask)) 886 return &dpp->base; 887 888 BREAK_TO_DEBUGGER(); 889 kfree(dpp); 890 return NULL; 891 } 892 struct output_pixel_processor *dcn30_opp_create( 893 struct dc_context *ctx, uint32_t inst) 894 { 895 struct dcn20_opp *opp = 896 kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); 897 898 if (!opp) { 899 BREAK_TO_DEBUGGER(); 900 return NULL; 901 } 902 903 dcn20_opp_construct(opp, ctx, inst, 904 &opp_regs[inst], &opp_shift, &opp_mask); 905 return &opp->base; 906 } 907 908 struct dce_aux *dcn30_aux_engine_create( 909 struct dc_context *ctx, 910 uint32_t inst) 911 { 912 struct aux_engine_dce110 *aux_engine = 913 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); 914 915 if (!aux_engine) 916 return NULL; 917 918 dce110_aux_engine_construct(aux_engine, ctx, inst, 919 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 920 &aux_engine_regs[inst], 921 &aux_mask, 922 &aux_shift, 923 ctx->dc->caps.extended_aux_timeout_support); 924 925 return &aux_engine->base; 926 } 927 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } 928 929 static const struct dce_i2c_registers i2c_hw_regs[] = { 930 i2c_inst_regs(1), 931 i2c_inst_regs(2), 932 i2c_inst_regs(3), 933 i2c_inst_regs(4), 934 i2c_inst_regs(5), 935 i2c_inst_regs(6), 936 }; 937 938 static const struct dce_i2c_shift i2c_shifts = { 939 I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) 940 }; 941 942 static const struct dce_i2c_mask i2c_masks = { 943 I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) 944 }; 945 946 struct dce_i2c_hw *dcn30_i2c_hw_create( 947 struct dc_context *ctx, 948 uint32_t inst) 949 { 950 struct dce_i2c_hw *dce_i2c_hw = 951 kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); 952 953 if (!dce_i2c_hw) 954 return NULL; 955 956 dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, 957 &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); 958 959 return dce_i2c_hw; 960 } 961 static struct mpc *dcn30_mpc_create( 962 struct dc_context *ctx, 963 int num_mpcc, 964 int num_rmu) 965 { 966 struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), 967 GFP_KERNEL); 968 969 if (!mpc30) 970 return NULL; 971 972 dcn30_mpc_construct(mpc30, ctx, 973 &mpc_regs, 974 &mpc_shift, 975 &mpc_mask, 976 num_mpcc, 977 num_rmu); 978 979 return &mpc30->base; 980 } 981 982 struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) 983 { 984 int i; 985 986 struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), 987 GFP_KERNEL); 988 989 if (!hubbub3) 990 return NULL; 991 992 hubbub3_construct(hubbub3, ctx, 993 &hubbub_reg, 994 &hubbub_shift, 995 &hubbub_mask); 996 997 998 for (i = 0; i < res_cap_dcn3.num_vmid; i++) { 999 struct dcn20_vmid *vmid = &hubbub3->vmid[i]; 1000 1001 vmid->ctx = ctx; 1002 1003 vmid->regs = &vmid_regs[i]; 1004 vmid->shifts = &vmid_shifts; 1005 vmid->masks = &vmid_masks; 1006 } 1007 1008 return &hubbub3->base; 1009 } 1010 1011 struct timing_generator *dcn30_timing_generator_create( 1012 struct dc_context *ctx, 1013 uint32_t instance) 1014 { 1015 struct optc *tgn10 = 1016 kzalloc(sizeof(struct optc), GFP_KERNEL); 1017 1018 if (!tgn10) 1019 return NULL; 1020 1021 tgn10->base.inst = instance; 1022 tgn10->base.ctx = ctx; 1023 1024 tgn10->tg_regs = &optc_regs[instance]; 1025 tgn10->tg_shift = &optc_shift; 1026 tgn10->tg_mask = &optc_mask; 1027 1028 dcn30_timing_generator_init(tgn10); 1029 1030 return &tgn10->base; 1031 } 1032 1033 static const struct encoder_feature_support link_enc_feature = { 1034 .max_hdmi_deep_color = COLOR_DEPTH_121212, 1035 .max_hdmi_pixel_clock = 600000, 1036 .hdmi_ycbcr420_supported = true, 1037 .dp_ycbcr420_supported = true, 1038 .fec_supported = true, 1039 .flags.bits.IS_HBR2_CAPABLE = true, 1040 .flags.bits.IS_HBR3_CAPABLE = true, 1041 .flags.bits.IS_TPS3_CAPABLE = true, 1042 .flags.bits.IS_TPS4_CAPABLE = true 1043 }; 1044 1045 struct link_encoder *dcn30_link_encoder_create( 1046 const struct encoder_init_data *enc_init_data) 1047 { 1048 struct dcn20_link_encoder *enc20 = 1049 kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); 1050 1051 if (!enc20) 1052 return NULL; 1053 1054 dcn30_link_encoder_construct(enc20, 1055 enc_init_data, 1056 &link_enc_feature, 1057 &link_enc_regs[enc_init_data->transmitter], 1058 &link_enc_aux_regs[enc_init_data->channel - 1], 1059 &link_enc_hpd_regs[enc_init_data->hpd_source], 1060 &le_shift, 1061 &le_mask); 1062 1063 return &enc20->enc10.base; 1064 } 1065 1066 struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data) 1067 { 1068 struct dce_panel_cntl *panel_cntl = 1069 kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); 1070 1071 if (!panel_cntl) 1072 return NULL; 1073 1074 dce_panel_cntl_construct(panel_cntl, 1075 init_data, 1076 &panel_cntl_regs[init_data->inst], 1077 &panel_cntl_shift, 1078 &panel_cntl_mask); 1079 1080 return &panel_cntl->base; 1081 } 1082 1083 static void read_dce_straps( 1084 struct dc_context *ctx, 1085 struct resource_straps *straps) 1086 { 1087 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), 1088 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); 1089 1090 } 1091 1092 static struct audio *dcn30_create_audio( 1093 struct dc_context *ctx, unsigned int inst) 1094 { 1095 return dce_audio_create(ctx, inst, 1096 &audio_regs[inst], &audio_shift, &audio_mask); 1097 } 1098 1099 static struct vpg *dcn30_vpg_create( 1100 struct dc_context *ctx, 1101 uint32_t inst) 1102 { 1103 struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); 1104 1105 if (!vpg3) 1106 return NULL; 1107 1108 vpg3_construct(vpg3, ctx, inst, 1109 &vpg_regs[inst], 1110 &vpg_shift, 1111 &vpg_mask); 1112 1113 return &vpg3->base; 1114 } 1115 1116 static struct afmt *dcn30_afmt_create( 1117 struct dc_context *ctx, 1118 uint32_t inst) 1119 { 1120 struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); 1121 1122 if (!afmt3) 1123 return NULL; 1124 1125 afmt3_construct(afmt3, ctx, inst, 1126 &afmt_regs[inst], 1127 &afmt_shift, 1128 &afmt_mask); 1129 1130 return &afmt3->base; 1131 } 1132 1133 struct stream_encoder *dcn30_stream_encoder_create( 1134 enum engine_id eng_id, 1135 struct dc_context *ctx) 1136 { 1137 struct dcn10_stream_encoder *enc1; 1138 struct vpg *vpg; 1139 struct afmt *afmt; 1140 int vpg_inst; 1141 int afmt_inst; 1142 1143 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1144 if (eng_id <= ENGINE_ID_DIGF) { 1145 vpg_inst = eng_id; 1146 afmt_inst = eng_id; 1147 } else 1148 return NULL; 1149 1150 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1151 vpg = dcn30_vpg_create(ctx, vpg_inst); 1152 afmt = dcn30_afmt_create(ctx, afmt_inst); 1153 1154 if (!enc1 || !vpg || !afmt) 1155 return NULL; 1156 1157 dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, 1158 eng_id, vpg, afmt, 1159 &stream_enc_regs[eng_id], 1160 &se_shift, &se_mask); 1161 1162 return &enc1->base; 1163 } 1164 1165 struct dce_hwseq *dcn30_hwseq_create( 1166 struct dc_context *ctx) 1167 { 1168 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); 1169 1170 if (hws) { 1171 hws->ctx = ctx; 1172 hws->regs = &hwseq_reg; 1173 hws->shifts = &hwseq_shift; 1174 hws->masks = &hwseq_mask; 1175 } 1176 return hws; 1177 } 1178 static const struct resource_create_funcs res_create_funcs = { 1179 .read_dce_straps = read_dce_straps, 1180 .create_audio = dcn30_create_audio, 1181 .create_stream_encoder = dcn30_stream_encoder_create, 1182 .create_hwseq = dcn30_hwseq_create, 1183 }; 1184 1185 static const struct resource_create_funcs res_create_maximus_funcs = { 1186 .read_dce_straps = NULL, 1187 .create_audio = NULL, 1188 .create_stream_encoder = NULL, 1189 .create_hwseq = dcn30_hwseq_create, 1190 }; 1191 1192 static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) 1193 { 1194 unsigned int i; 1195 1196 for (i = 0; i < pool->base.stream_enc_count; i++) { 1197 if (pool->base.stream_enc[i] != NULL) { 1198 if (pool->base.stream_enc[i]->vpg != NULL) { 1199 kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); 1200 pool->base.stream_enc[i]->vpg = NULL; 1201 } 1202 if (pool->base.stream_enc[i]->afmt != NULL) { 1203 kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); 1204 pool->base.stream_enc[i]->afmt = NULL; 1205 } 1206 kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); 1207 pool->base.stream_enc[i] = NULL; 1208 } 1209 } 1210 1211 for (i = 0; i < pool->base.res_cap->num_dsc; i++) { 1212 if (pool->base.dscs[i] != NULL) 1213 dcn20_dsc_destroy(&pool->base.dscs[i]); 1214 } 1215 1216 if (pool->base.mpc != NULL) { 1217 kfree(TO_DCN20_MPC(pool->base.mpc)); 1218 pool->base.mpc = NULL; 1219 } 1220 if (pool->base.hubbub != NULL) { 1221 kfree(pool->base.hubbub); 1222 pool->base.hubbub = NULL; 1223 } 1224 for (i = 0; i < pool->base.pipe_count; i++) { 1225 if (pool->base.dpps[i] != NULL) 1226 dcn30_dpp_destroy(&pool->base.dpps[i]); 1227 1228 if (pool->base.ipps[i] != NULL) 1229 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); 1230 1231 if (pool->base.hubps[i] != NULL) { 1232 kfree(TO_DCN20_HUBP(pool->base.hubps[i])); 1233 pool->base.hubps[i] = NULL; 1234 } 1235 1236 if (pool->base.irqs != NULL) { 1237 dal_irq_service_destroy(&pool->base.irqs); 1238 } 1239 } 1240 1241 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 1242 if (pool->base.engines[i] != NULL) 1243 dce110_engine_destroy(&pool->base.engines[i]); 1244 if (pool->base.hw_i2cs[i] != NULL) { 1245 kfree(pool->base.hw_i2cs[i]); 1246 pool->base.hw_i2cs[i] = NULL; 1247 } 1248 if (pool->base.sw_i2cs[i] != NULL) { 1249 kfree(pool->base.sw_i2cs[i]); 1250 pool->base.sw_i2cs[i] = NULL; 1251 } 1252 } 1253 1254 for (i = 0; i < pool->base.res_cap->num_opp; i++) { 1255 if (pool->base.opps[i] != NULL) 1256 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); 1257 } 1258 1259 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { 1260 if (pool->base.timing_generators[i] != NULL) { 1261 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); 1262 pool->base.timing_generators[i] = NULL; 1263 } 1264 } 1265 1266 for (i = 0; i < pool->base.res_cap->num_dwb; i++) { 1267 if (pool->base.dwbc[i] != NULL) { 1268 kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); 1269 pool->base.dwbc[i] = NULL; 1270 } 1271 if (pool->base.mcif_wb[i] != NULL) { 1272 kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); 1273 pool->base.mcif_wb[i] = NULL; 1274 } 1275 } 1276 1277 for (i = 0; i < pool->base.audio_count; i++) { 1278 if (pool->base.audios[i]) 1279 dce_aud_destroy(&pool->base.audios[i]); 1280 } 1281 1282 for (i = 0; i < pool->base.clk_src_count; i++) { 1283 if (pool->base.clock_sources[i] != NULL) { 1284 dcn20_clock_source_destroy(&pool->base.clock_sources[i]); 1285 pool->base.clock_sources[i] = NULL; 1286 } 1287 } 1288 1289 for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { 1290 if (pool->base.mpc_lut[i] != NULL) { 1291 dc_3dlut_func_release(pool->base.mpc_lut[i]); 1292 pool->base.mpc_lut[i] = NULL; 1293 } 1294 if (pool->base.mpc_shaper[i] != NULL) { 1295 dc_transfer_func_release(pool->base.mpc_shaper[i]); 1296 pool->base.mpc_shaper[i] = NULL; 1297 } 1298 } 1299 1300 if (pool->base.dp_clock_source != NULL) { 1301 dcn20_clock_source_destroy(&pool->base.dp_clock_source); 1302 pool->base.dp_clock_source = NULL; 1303 } 1304 1305 for (i = 0; i < pool->base.pipe_count; i++) { 1306 if (pool->base.multiple_abms[i] != NULL) 1307 dce_abm_destroy(&pool->base.multiple_abms[i]); 1308 } 1309 1310 if (pool->base.dccg != NULL) 1311 dcn_dccg_destroy(&pool->base.dccg); 1312 } 1313 1314 struct hubp *dcn30_hubp_create( 1315 struct dc_context *ctx, 1316 uint32_t inst) 1317 { 1318 struct dcn20_hubp *hubp2 = 1319 kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); 1320 1321 if (!hubp2) 1322 return NULL; 1323 1324 if (hubp3_construct(hubp2, ctx, inst, 1325 &hubp_regs[inst], &hubp_shift, &hubp_mask)) 1326 return &hubp2->base; 1327 1328 BREAK_TO_DEBUGGER(); 1329 kfree(hubp2); 1330 return NULL; 1331 } 1332 1333 bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) 1334 { 1335 int i; 1336 uint32_t pipe_count = pool->res_cap->num_dwb; 1337 1338 for (i = 0; i < pipe_count; i++) { 1339 struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), 1340 GFP_KERNEL); 1341 1342 if (!dwbc30) { 1343 dm_error("DC: failed to create dwbc30!\n"); 1344 return false; 1345 } 1346 1347 dcn30_dwbc_construct(dwbc30, ctx, 1348 &dwbc30_regs[i], 1349 &dwbc30_shift, 1350 &dwbc30_mask, 1351 i); 1352 1353 pool->dwbc[i] = &dwbc30->base; 1354 } 1355 return true; 1356 } 1357 1358 bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) 1359 { 1360 int i; 1361 uint32_t pipe_count = pool->res_cap->num_dwb; 1362 1363 for (i = 0; i < pipe_count; i++) { 1364 struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), 1365 GFP_KERNEL); 1366 1367 if (!mcif_wb30) { 1368 dm_error("DC: failed to create mcif_wb30!\n"); 1369 return false; 1370 } 1371 1372 dcn30_mmhubbub_construct(mcif_wb30, ctx, 1373 &mcif_wb30_regs[i], 1374 &mcif_wb30_shift, 1375 &mcif_wb30_mask, 1376 i); 1377 1378 pool->mcif_wb[i] = &mcif_wb30->base; 1379 } 1380 return true; 1381 } 1382 1383 static struct display_stream_compressor *dcn30_dsc_create( 1384 struct dc_context *ctx, uint32_t inst) 1385 { 1386 struct dcn20_dsc *dsc = 1387 kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); 1388 1389 if (!dsc) { 1390 BREAK_TO_DEBUGGER(); 1391 return NULL; 1392 } 1393 1394 dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); 1395 return &dsc->base; 1396 } 1397 1398 enum dc_status dcn30_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) 1399 { 1400 1401 return dcn20_add_stream_to_ctx(dc, new_ctx, dc_stream); 1402 } 1403 1404 static void dcn30_destroy_resource_pool(struct resource_pool **pool) 1405 { 1406 struct dcn30_resource_pool *dcn30_pool = TO_DCN30_RES_POOL(*pool); 1407 1408 dcn30_resource_destruct(dcn30_pool); 1409 kfree(dcn30_pool); 1410 *pool = NULL; 1411 } 1412 1413 static struct clock_source *dcn30_clock_source_create( 1414 struct dc_context *ctx, 1415 struct dc_bios *bios, 1416 enum clock_source_id id, 1417 const struct dce110_clk_src_regs *regs, 1418 bool dp_clk_src) 1419 { 1420 struct dce110_clk_src *clk_src = 1421 kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); 1422 1423 if (!clk_src) 1424 return NULL; 1425 1426 if (dcn3_clk_src_construct(clk_src, ctx, bios, id, 1427 regs, &cs_shift, &cs_mask)) { 1428 clk_src->base.dp_clk_src = dp_clk_src; 1429 return &clk_src->base; 1430 } 1431 1432 BREAK_TO_DEBUGGER(); 1433 return NULL; 1434 } 1435 1436 int dcn30_populate_dml_pipes_from_context( 1437 struct dc *dc, struct dc_state *context, 1438 display_e2e_pipe_params_st *pipes) 1439 { 1440 int i, pipe_cnt; 1441 struct resource_context *res_ctx = &context->res_ctx; 1442 1443 dcn20_populate_dml_pipes_from_context(dc, context, pipes); 1444 1445 for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { 1446 if (!res_ctx->pipe_ctx[i].stream) 1447 continue; 1448 1449 pipes[pipe_cnt++].pipe.scale_ratio_depth.lb_depth = 1450 dm_lb_16; 1451 } 1452 1453 return pipe_cnt; 1454 } 1455 1456 void dcn30_populate_dml_writeback_from_context( 1457 struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) 1458 { 1459 int pipe_cnt, i, j; 1460 double max_calc_writeback_dispclk; 1461 double writeback_dispclk; 1462 struct writeback_st dout_wb; 1463 1464 for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { 1465 struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream; 1466 1467 if (!stream) 1468 continue; 1469 max_calc_writeback_dispclk = 0; 1470 1471 /* Set writeback information */ 1472 pipes[pipe_cnt].dout.wb_enable = 0; 1473 pipes[pipe_cnt].dout.num_active_wb = 0; 1474 for (j = 0; j < stream->num_wb_info; j++) { 1475 struct dc_writeback_info *wb_info = &stream->writeback_info[j]; 1476 1477 if (wb_info->wb_enabled && wb_info->writeback_source_plane && 1478 (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) { 1479 pipes[pipe_cnt].dout.wb_enable = 1; 1480 pipes[pipe_cnt].dout.num_active_wb++; 1481 dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ? 1482 wb_info->dwb_params.cnv_params.crop_height : 1483 wb_info->dwb_params.cnv_params.src_height; 1484 dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ? 1485 wb_info->dwb_params.cnv_params.crop_width : 1486 wb_info->dwb_params.cnv_params.src_width; 1487 dout_wb.wb_dst_width = wb_info->dwb_params.dest_width; 1488 dout_wb.wb_dst_height = wb_info->dwb_params.dest_height; 1489 1490 /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */ 1491 if (dc->dml.ip.writeback_max_hscl_taps > 1) { 1492 dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps; 1493 dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps; 1494 } else { 1495 dout_wb.wb_htaps_luma = 1; 1496 dout_wb.wb_vtaps_luma = 1; 1497 } 1498 dout_wb.wb_htaps_chroma = 0; 1499 dout_wb.wb_vtaps_chroma = 0; 1500 dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ? 1501 (double)wb_info->dwb_params.cnv_params.crop_width / 1502 (double)wb_info->dwb_params.dest_width : 1503 (double)wb_info->dwb_params.cnv_params.src_width / 1504 (double)wb_info->dwb_params.dest_width; 1505 dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ? 1506 (double)wb_info->dwb_params.cnv_params.crop_height / 1507 (double)wb_info->dwb_params.dest_height : 1508 (double)wb_info->dwb_params.cnv_params.src_height / 1509 (double)wb_info->dwb_params.dest_height; 1510 if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB || 1511 wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA) 1512 dout_wb.wb_pixel_format = dm_444_64; 1513 else 1514 dout_wb.wb_pixel_format = dm_444_32; 1515 1516 /* Workaround for cases where multiple writebacks are connected to same plane 1517 * In which case, need to compute worst case and set the associated writeback parameters 1518 * This workaround is necessary due to DML computation assuming only 1 set of writeback 1519 * parameters per pipe 1520 */ 1521 writeback_dispclk = dml30_CalculateWriteBackDISPCLK( 1522 dout_wb.wb_pixel_format, 1523 pipes[pipe_cnt].pipe.dest.pixel_rate_mhz, 1524 dout_wb.wb_hratio, 1525 dout_wb.wb_vratio, 1526 dout_wb.wb_htaps_luma, 1527 dout_wb.wb_vtaps_luma, 1528 dout_wb.wb_src_width, 1529 dout_wb.wb_dst_width, 1530 pipes[pipe_cnt].pipe.dest.htotal, 1531 dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size); 1532 1533 if (writeback_dispclk > max_calc_writeback_dispclk) { 1534 max_calc_writeback_dispclk = writeback_dispclk; 1535 pipes[pipe_cnt].dout.wb = dout_wb; 1536 } 1537 } 1538 } 1539 1540 pipe_cnt++; 1541 } 1542 1543 } 1544 1545 unsigned int dcn30_calc_max_scaled_time( 1546 unsigned int time_per_pixel, 1547 enum mmhubbub_wbif_mode mode, 1548 unsigned int urgent_watermark) 1549 { 1550 unsigned int time_per_byte = 0; 1551 unsigned int total_free_entry = 0xb40; 1552 unsigned int buf_lh_capability; 1553 unsigned int max_scaled_time; 1554 1555 if (mode == PACKED_444) /* packed mode 32 bpp */ 1556 time_per_byte = time_per_pixel/4; 1557 else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ 1558 time_per_byte = time_per_pixel/8; 1559 1560 if (time_per_byte == 0) 1561 time_per_byte = 1; 1562 1563 buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ 1564 max_scaled_time = buf_lh_capability - urgent_watermark; 1565 return max_scaled_time; 1566 } 1567 1568 void dcn30_set_mcif_arb_params( 1569 struct dc *dc, 1570 struct dc_state *context, 1571 display_e2e_pipe_params_st *pipes, 1572 int pipe_cnt) 1573 { 1574 enum mmhubbub_wbif_mode wbif_mode; 1575 struct display_mode_lib *dml = &context->bw_ctx.dml; 1576 struct mcif_arb_params *wb_arb_params; 1577 int i, j, k, dwb_pipe; 1578 1579 /* Writeback MCIF_WB arbitration parameters */ 1580 dwb_pipe = 0; 1581 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1582 1583 if (!context->res_ctx.pipe_ctx[i].stream) 1584 continue; 1585 1586 for (j = 0; j < MAX_DWB_PIPES; j++) { 1587 struct dc_writeback_info *writeback_info = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j]; 1588 1589 if (writeback_info->wb_enabled == false) 1590 continue; 1591 1592 //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; 1593 wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; 1594 1595 if (writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB || 1596 writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA) 1597 wbif_mode = PACKED_444_FP16; 1598 else 1599 wbif_mode = PACKED_444; 1600 1601 for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) { 1602 wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000; 1603 wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000; 1604 } 1605 wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */ 1606 wb_arb_params->slice_lines = 32; 1607 wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */ 1608 wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel, 1609 wbif_mode, 1610 wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ 1611 wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */ 1612 1613 dwb_pipe++; 1614 1615 if (dwb_pipe >= MAX_DWB_PIPES) 1616 return; 1617 } 1618 if (dwb_pipe >= MAX_DWB_PIPES) 1619 return; 1620 } 1621 1622 } 1623 1624 static struct dc_cap_funcs cap_funcs = { 1625 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap 1626 }; 1627 1628 bool dcn30_acquire_post_bldn_3dlut( 1629 struct resource_context *res_ctx, 1630 const struct resource_pool *pool, 1631 int mpcc_id, 1632 struct dc_3dlut **lut, 1633 struct dc_transfer_func **shaper) 1634 { 1635 int i; 1636 bool ret = false; 1637 union dc_3dlut_state *state; 1638 1639 ASSERT(*lut == NULL && *shaper == NULL); 1640 *lut = NULL; 1641 *shaper = NULL; 1642 1643 for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { 1644 if (!res_ctx->is_mpc_3dlut_acquired[i]) { 1645 *lut = pool->mpc_lut[i]; 1646 *shaper = pool->mpc_shaper[i]; 1647 state = &pool->mpc_lut[i]->state; 1648 res_ctx->is_mpc_3dlut_acquired[i] = true; 1649 state->bits.rmu_idx_valid = 1; 1650 state->bits.rmu_mux_num = i; 1651 if (state->bits.rmu_mux_num == 0) 1652 state->bits.mpc_rmu0_mux = mpcc_id; 1653 else if (state->bits.rmu_mux_num == 1) 1654 state->bits.mpc_rmu1_mux = mpcc_id; 1655 else if (state->bits.rmu_mux_num == 2) 1656 state->bits.mpc_rmu2_mux = mpcc_id; 1657 ret = true; 1658 break; 1659 } 1660 } 1661 return ret; 1662 } 1663 1664 bool dcn30_release_post_bldn_3dlut( 1665 struct resource_context *res_ctx, 1666 const struct resource_pool *pool, 1667 struct dc_3dlut **lut, 1668 struct dc_transfer_func **shaper) 1669 { 1670 int i; 1671 bool ret = false; 1672 1673 for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { 1674 if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { 1675 res_ctx->is_mpc_3dlut_acquired[i] = false; 1676 pool->mpc_lut[i]->state.raw = 0; 1677 *lut = NULL; 1678 *shaper = NULL; 1679 ret = true; 1680 break; 1681 } 1682 } 1683 return ret; 1684 } 1685 1686 #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) 1687 #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) 1688 1689 static bool is_soc_bounding_box_valid(struct dc *dc) 1690 { 1691 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 1692 1693 if (ASICREV_IS_SIENNA_CICHLID_P(hw_internal_rev)) 1694 return true; 1695 1696 return false; 1697 } 1698 1699 static bool init_soc_bounding_box(struct dc *dc, 1700 struct dcn30_resource_pool *pool) 1701 { 1702 const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box; 1703 struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc; 1704 struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip; 1705 1706 DC_LOGGER_INIT(dc->ctx->logger); 1707 1708 if (!bb && !is_soc_bounding_box_valid(dc)) { 1709 DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); 1710 return false; 1711 } 1712 1713 if (bb && !is_soc_bounding_box_valid(dc)) { 1714 int i; 1715 1716 dcn3_0_soc.sr_exit_time_us = 1717 fixed16_to_double_to_cpu(bb->sr_exit_time_us); 1718 dcn3_0_soc.sr_enter_plus_exit_time_us = 1719 fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us); 1720 dcn3_0_soc.urgent_latency_us = 1721 fixed16_to_double_to_cpu(bb->urgent_latency_us); 1722 dcn3_0_soc.urgent_latency_pixel_data_only_us = 1723 fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us); 1724 dcn3_0_soc.urgent_latency_pixel_mixed_with_vm_data_us = 1725 fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us); 1726 dcn3_0_soc.urgent_latency_vm_data_only_us = 1727 fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us); 1728 dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes = 1729 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes); 1730 dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 1731 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes); 1732 dcn3_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes = 1733 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes); 1734 dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 1735 fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only); 1736 dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 1737 fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm); 1738 dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 1739 fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only); 1740 dcn3_0_soc.max_avg_sdp_bw_use_normal_percent = 1741 fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent); 1742 dcn3_0_soc.max_avg_dram_bw_use_normal_percent = 1743 fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent); 1744 dcn3_0_soc.writeback_latency_us = 1745 fixed16_to_double_to_cpu(bb->writeback_latency_us); 1746 dcn3_0_soc.ideal_dram_bw_after_urgent_percent = 1747 fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent); 1748 dcn3_0_soc.max_request_size_bytes = 1749 le32_to_cpu(bb->max_request_size_bytes); 1750 dcn3_0_soc.dram_channel_width_bytes = 1751 le32_to_cpu(bb->dram_channel_width_bytes); 1752 dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes = 1753 le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes); 1754 dcn3_0_soc.dcn_downspread_percent = 1755 fixed16_to_double_to_cpu(bb->dcn_downspread_percent); 1756 dcn3_0_soc.downspread_percent = 1757 fixed16_to_double_to_cpu(bb->downspread_percent); 1758 dcn3_0_soc.dram_page_open_time_ns = 1759 fixed16_to_double_to_cpu(bb->dram_page_open_time_ns); 1760 dcn3_0_soc.dram_rw_turnaround_time_ns = 1761 fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns); 1762 dcn3_0_soc.dram_return_buffer_per_channel_bytes = 1763 le32_to_cpu(bb->dram_return_buffer_per_channel_bytes); 1764 dcn3_0_soc.round_trip_ping_latency_dcfclk_cycles = 1765 le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles); 1766 dcn3_0_soc.urgent_out_of_order_return_per_channel_bytes = 1767 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes); 1768 dcn3_0_soc.channel_interleave_bytes = 1769 le32_to_cpu(bb->channel_interleave_bytes); 1770 dcn3_0_soc.num_banks = 1771 le32_to_cpu(bb->num_banks); 1772 dcn3_0_soc.num_chans = 1773 le32_to_cpu(bb->num_chans); 1774 dcn3_0_soc.gpuvm_min_page_size_bytes = 1775 le32_to_cpu(bb->vmm_page_size_bytes); 1776 dcn3_0_soc.dram_clock_change_latency_us = 1777 fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us); 1778 dcn3_0_soc.writeback_dram_clock_change_latency_us = 1779 fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us); 1780 dcn3_0_soc.return_bus_width_bytes = 1781 le32_to_cpu(bb->return_bus_width_bytes); 1782 dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = 1783 le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz); 1784 dcn3_0_soc.xfc_bus_transport_time_us = 1785 le32_to_cpu(bb->xfc_bus_transport_time_us); 1786 dcn3_0_soc.xfc_xbuf_latency_tolerance_us = 1787 le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us); 1788 dcn3_0_soc.use_urgent_burst_bw = 1789 le32_to_cpu(bb->use_urgent_burst_bw); 1790 dcn3_0_soc.num_states = 1791 le32_to_cpu(bb->num_states); 1792 1793 for (i = 0; i < dcn3_0_soc.num_states; i++) { 1794 dcn3_0_soc.clock_limits[i].state = 1795 le32_to_cpu(bb->clock_limits[i].state); 1796 dcn3_0_soc.clock_limits[i].dcfclk_mhz = 1797 fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz); 1798 dcn3_0_soc.clock_limits[i].fabricclk_mhz = 1799 fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz); 1800 dcn3_0_soc.clock_limits[i].dispclk_mhz = 1801 fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz); 1802 dcn3_0_soc.clock_limits[i].dppclk_mhz = 1803 fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz); 1804 dcn3_0_soc.clock_limits[i].phyclk_mhz = 1805 fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz); 1806 dcn3_0_soc.clock_limits[i].socclk_mhz = 1807 fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz); 1808 dcn3_0_soc.clock_limits[i].dscclk_mhz = 1809 fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz); 1810 dcn3_0_soc.clock_limits[i].dram_speed_mts = 1811 fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts); 1812 } 1813 } 1814 1815 loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; 1816 loaded_ip->max_num_dpp = pool->base.pipe_count; 1817 loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; 1818 dcn20_patch_bounding_box(dc, loaded_bb); 1819 return true; 1820 } 1821 1822 static bool dcn30_split_stream_for_mpc_or_odm( 1823 const struct dc *dc, 1824 struct resource_context *res_ctx, 1825 struct pipe_ctx *pri_pipe, 1826 struct pipe_ctx *sec_pipe, 1827 bool odm) 1828 { 1829 int pipe_idx = sec_pipe->pipe_idx; 1830 const struct resource_pool *pool = dc->res_pool; 1831 1832 *sec_pipe = *pri_pipe; 1833 1834 sec_pipe->pipe_idx = pipe_idx; 1835 sec_pipe->plane_res.mi = pool->mis[pipe_idx]; 1836 sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; 1837 sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; 1838 sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; 1839 sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; 1840 sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; 1841 sec_pipe->stream_res.dsc = NULL; 1842 if (odm) { 1843 if (pri_pipe->next_odm_pipe) { 1844 ASSERT(pri_pipe->next_odm_pipe != sec_pipe); 1845 sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; 1846 sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; 1847 } 1848 pri_pipe->next_odm_pipe = sec_pipe; 1849 sec_pipe->prev_odm_pipe = pri_pipe; 1850 ASSERT(sec_pipe->top_pipe == NULL); 1851 1852 sec_pipe->stream_res.opp = pool->opps[pipe_idx]; 1853 if (sec_pipe->stream->timing.flags.DSC == 1) { 1854 dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); 1855 ASSERT(sec_pipe->stream_res.dsc); 1856 if (sec_pipe->stream_res.dsc == NULL) 1857 return false; 1858 } 1859 } else { 1860 if (pri_pipe->bottom_pipe) { 1861 ASSERT(pri_pipe->bottom_pipe != sec_pipe); 1862 sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; 1863 sec_pipe->bottom_pipe->top_pipe = sec_pipe; 1864 } 1865 pri_pipe->bottom_pipe = sec_pipe; 1866 sec_pipe->top_pipe = pri_pipe; 1867 1868 ASSERT(pri_pipe->plane_state); 1869 } 1870 1871 return true; 1872 } 1873 1874 static bool dcn30_internal_validate_bw( 1875 struct dc *dc, 1876 struct dc_state *context, 1877 display_e2e_pipe_params_st *pipes, 1878 int *pipe_cnt_out, 1879 int *vlevel_out, 1880 bool fast_validate) 1881 { 1882 bool out = false; 1883 bool repopulate_pipes = false; 1884 int split[MAX_PIPES] = { 0 }; 1885 bool merge[MAX_PIPES] = { false }; 1886 bool newly_split[MAX_PIPES] = { false }; 1887 int pipe_cnt, i, pipe_idx, vlevel; 1888 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 1889 1890 ASSERT(pipes); 1891 if (!pipes) 1892 return false; 1893 1894 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); 1895 1896 if (!pipe_cnt) { 1897 out = true; 1898 goto validate_out; 1899 } 1900 1901 dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); 1902 1903 if (!fast_validate) { 1904 /* 1905 * DML favors voltage over p-state, but we're more interested in 1906 * supporting p-state over voltage. We can't support p-state in 1907 * prefetch mode > 0 so try capping the prefetch mode to start. 1908 */ 1909 context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 1910 dm_allow_self_refresh_and_mclk_switch; 1911 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1912 /* This may adjust vlevel and maxMpcComb */ 1913 if (vlevel < context->bw_ctx.dml.soc.num_states) 1914 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); 1915 } 1916 if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || 1917 vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { 1918 /* 1919 * If mode is unsupported or there's still no p-state support then 1920 * fall back to favoring voltage. 1921 * 1922 * We don't actually support prefetch mode 2, so require that we 1923 * at least support prefetch mode 1. 1924 */ 1925 context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 1926 dm_allow_self_refresh; 1927 1928 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1929 if (vlevel < context->bw_ctx.dml.soc.num_states) { 1930 memset(split, 0, sizeof(split)); 1931 memset(merge, 0, sizeof(merge)); 1932 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); 1933 } 1934 } 1935 1936 dml_log_mode_support_params(&context->bw_ctx.dml); 1937 1938 /* TODO: Need to check calculated vlevel why that fails validation of below resolutions */ 1939 if (context->res_ctx.pipe_ctx[0].stream != NULL) { 1940 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 640 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 480) 1941 vlevel = 0; 1942 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 800) 1943 vlevel = 0; 1944 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 768) 1945 vlevel = 0; 1946 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 1024) 1947 vlevel = 0; 1948 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 2048 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 1536) 1949 vlevel = 0; 1950 } 1951 1952 if (vlevel == context->bw_ctx.dml.soc.num_states) 1953 goto validate_fail; 1954 1955 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1956 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1957 struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; 1958 1959 if (!pipe->stream) 1960 continue; 1961 1962 /* We only support full screen mpo with ODM */ 1963 if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled 1964 && pipe->plane_state && mpo_pipe 1965 && memcmp(&mpo_pipe->plane_res.scl_data.recout, 1966 &pipe->plane_res.scl_data.recout, 1967 sizeof(struct rect)) != 0) { 1968 ASSERT(mpo_pipe->plane_state != pipe->plane_state); 1969 goto validate_fail; 1970 } 1971 pipe_idx++; 1972 } 1973 1974 /* merge pipes if necessary */ 1975 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1976 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1977 1978 /*skip pipes that don't need merging*/ 1979 if (!merge[i]) 1980 continue; 1981 1982 /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ 1983 if (pipe->prev_odm_pipe) { 1984 /*split off odm pipe*/ 1985 pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; 1986 if (pipe->next_odm_pipe) 1987 pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; 1988 1989 pipe->bottom_pipe = NULL; 1990 pipe->next_odm_pipe = NULL; 1991 pipe->plane_state = NULL; 1992 pipe->stream = NULL; 1993 pipe->top_pipe = NULL; 1994 pipe->prev_odm_pipe = NULL; 1995 if (pipe->stream_res.dsc) 1996 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); 1997 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 1998 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 1999 } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { 2000 struct pipe_ctx *top_pipe = pipe->top_pipe; 2001 struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; 2002 2003 top_pipe->bottom_pipe = bottom_pipe; 2004 if (bottom_pipe) 2005 bottom_pipe->top_pipe = top_pipe; 2006 2007 pipe->top_pipe = NULL; 2008 pipe->bottom_pipe = NULL; 2009 pipe->plane_state = NULL; 2010 pipe->stream = NULL; 2011 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 2012 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 2013 } else 2014 ASSERT(0); /* Should never try to merge master pipe */ 2015 2016 } 2017 2018 for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { 2019 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2020 struct pipe_ctx *hsplit_pipe = NULL; 2021 bool odm; 2022 2023 if (!pipe->stream || newly_split[i]) 2024 continue; 2025 2026 pipe_idx++; 2027 odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; 2028 2029 if (!pipe->plane_state && !odm) 2030 continue; 2031 2032 if (split[i]) { 2033 hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); 2034 ASSERT(hsplit_pipe); 2035 if (!hsplit_pipe) 2036 goto validate_fail; 2037 2038 if (!dcn30_split_stream_for_mpc_or_odm( 2039 dc, &context->res_ctx, 2040 pipe, hsplit_pipe, odm)) 2041 goto validate_fail; 2042 2043 newly_split[hsplit_pipe->pipe_idx] = true; 2044 repopulate_pipes = true; 2045 } 2046 if (split[i] == 4) { 2047 struct pipe_ctx *pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); 2048 2049 ASSERT(pipe_4to1); 2050 if (!pipe_4to1) 2051 goto validate_fail; 2052 if (!dcn30_split_stream_for_mpc_or_odm( 2053 dc, &context->res_ctx, 2054 pipe, pipe_4to1, odm)) 2055 goto validate_fail; 2056 newly_split[pipe_4to1->pipe_idx] = true; 2057 2058 pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); 2059 ASSERT(pipe_4to1); 2060 if (!pipe_4to1) 2061 goto validate_fail; 2062 if (!dcn30_split_stream_for_mpc_or_odm( 2063 dc, &context->res_ctx, 2064 hsplit_pipe, pipe_4to1, odm)) 2065 goto validate_fail; 2066 newly_split[pipe_4to1->pipe_idx] = true; 2067 } 2068 if (odm) 2069 dcn20_build_mapped_resource(dc, context, pipe->stream); 2070 } 2071 2072 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2073 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2074 2075 if (pipe->plane_state) { 2076 if (!resource_build_scaling_params(pipe)) 2077 goto validate_fail; 2078 } 2079 } 2080 2081 /* Actual dsc count per stream dsc validation*/ 2082 if (!dcn20_validate_dsc(dc, context)) { 2083 vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; 2084 goto validate_fail; 2085 } 2086 2087 if (repopulate_pipes) 2088 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); 2089 *vlevel_out = vlevel; 2090 *pipe_cnt_out = pipe_cnt; 2091 2092 out = true; 2093 goto validate_out; 2094 2095 validate_fail: 2096 out = false; 2097 2098 validate_out: 2099 return out; 2100 } 2101 2102 static void dcn30_calculate_wm( 2103 struct dc *dc, struct dc_state *context, 2104 display_e2e_pipe_params_st *pipes, 2105 int pipe_cnt, 2106 int vlevel) 2107 { 2108 int i, pipe_idx; 2109 double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 2110 2111 if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) 2112 dcfclk = context->bw_ctx.dml.soc.min_dcfclk; 2113 2114 pipes[0].clks_cfg.voltage = vlevel; 2115 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 2116 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; 2117 2118 /* Set B: 2119 * DCFCLK: 1GHz or min required above 1GHz 2120 * FCLK/UCLK: Max 2121 */ 2122 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { 2123 if (vlevel == 0) { 2124 pipes[0].clks_cfg.voltage = 1; 2125 pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz; 2126 } 2127 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; 2128 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; 2129 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; 2130 } 2131 context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2132 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2133 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2134 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2135 context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2136 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2137 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2138 context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2139 2140 pipes[0].clks_cfg.voltage = vlevel; 2141 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 2142 2143 /* Set C: 2144 * DCFCLK: Min Required 2145 * FCLK(proportional to UCLK): 1GHz or Max 2146 * pstate latency overriden to 5us 2147 */ 2148 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { 2149 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us; 2150 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; 2151 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; 2152 } 2153 context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2154 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2155 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2156 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2157 context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2158 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2159 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2160 context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2161 2162 /* Set D: 2163 * DCFCLK: Min Required 2164 * FCLK(proportional to UCLK): 1GHz or Max 2165 * sr_enter_exit = 4, sr_exit = 2us 2166 */ 2167 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { 2168 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; 2169 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; 2170 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; 2171 } 2172 context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2173 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2174 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2175 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2176 context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2177 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2178 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2179 context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2180 2181 /* Set A: 2182 * DCFCLK: Min Required 2183 * FCLK(proportional to UCLK): 1GHz or Max 2184 * 2185 * Set A calculated last so that following calculations are based on Set A 2186 */ 2187 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { 2188 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; 2189 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; 2190 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; 2191 } 2192 context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2193 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2194 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2195 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2196 context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2197 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2198 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2199 context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2200 2201 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 2202 if (!context->res_ctx.pipe_ctx[i].stream) 2203 continue; 2204 2205 pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); 2206 pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 2207 2208 if (dc->config.forced_clocks) { 2209 pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; 2210 pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; 2211 } 2212 if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) 2213 pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; 2214 if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) 2215 pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; 2216 2217 pipe_idx++; 2218 } 2219 } 2220 2221 bool dcn30_validate_bandwidth(struct dc *dc, 2222 struct dc_state *context, 2223 bool fast_validate) 2224 { 2225 bool out = false; 2226 2227 BW_VAL_TRACE_SETUP(); 2228 2229 int vlevel = 0; 2230 int pipe_cnt = 0; 2231 display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); 2232 DC_LOGGER_INIT(dc->ctx->logger); 2233 2234 BW_VAL_TRACE_COUNT(); 2235 2236 out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); 2237 2238 if (pipe_cnt == 0) 2239 goto validate_out; 2240 2241 if (!out) 2242 goto validate_fail; 2243 2244 BW_VAL_TRACE_END_VOLTAGE_LEVEL(); 2245 2246 if (fast_validate) { 2247 BW_VAL_TRACE_SKIP(fast); 2248 goto validate_out; 2249 } 2250 2251 dcn30_calculate_wm(dc, context, pipes, pipe_cnt, vlevel); 2252 dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); 2253 2254 BW_VAL_TRACE_END_WATERMARKS(); 2255 2256 goto validate_out; 2257 2258 validate_fail: 2259 DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", 2260 dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); 2261 2262 BW_VAL_TRACE_SKIP(fail); 2263 out = false; 2264 2265 validate_out: 2266 kfree(pipes); 2267 2268 BW_VAL_TRACE_FINISH(); 2269 2270 return out; 2271 } 2272 2273 static void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, 2274 unsigned int *optimal_dcfclk, 2275 unsigned int *optimal_fclk) 2276 { 2277 double bw_from_dram, bw_from_dram1, bw_from_dram2; 2278 2279 bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans * 2280 dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100); 2281 bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans * 2282 dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100); 2283 2284 bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; 2285 2286 if (optimal_fclk) 2287 *optimal_fclk = bw_from_dram / 2288 (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100)); 2289 2290 if (optimal_dcfclk) 2291 *optimal_dcfclk = bw_from_dram / 2292 (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100)); 2293 } 2294 2295 static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 2296 { 2297 unsigned int i, j; 2298 unsigned int num_states = 0; 2299 2300 unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; 2301 unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; 2302 unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; 2303 unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; 2304 2305 unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; 2306 unsigned int num_dcfclk_sta_targets = 4; 2307 unsigned int num_uclk_states; 2308 2309 if (dc->ctx->dc_bios->vram_info.num_chans) 2310 dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; 2311 2312 if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) 2313 dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; 2314 2315 dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; 2316 dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; 2317 2318 if (bw_params->clk_table.entries[0].memclk_mhz) { 2319 2320 if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { 2321 // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array 2322 dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz; 2323 num_dcfclk_sta_targets++; 2324 } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { 2325 // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates 2326 for (i = 0; i < num_dcfclk_sta_targets; i++) { 2327 if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) { 2328 dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz; 2329 break; 2330 } 2331 } 2332 // Update size of array since we "removed" duplicates 2333 num_dcfclk_sta_targets = i + 1; 2334 } 2335 2336 num_uclk_states = bw_params->clk_table.num_entries; 2337 2338 // Calculate optimal dcfclk for each uclk 2339 for (i = 0; i < num_uclk_states; i++) { 2340 get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, 2341 &optimal_dcfclk_for_uclk[i], NULL); 2342 if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { 2343 optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; 2344 } 2345 } 2346 2347 // Calculate optimal uclk for each dcfclk sta target 2348 for (i = 0; i < num_dcfclk_sta_targets; i++) { 2349 for (j = 0; j < num_uclk_states; j++) { 2350 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { 2351 optimal_uclk_for_dcfclk_sta_targets[i] = 2352 bw_params->clk_table.entries[j].memclk_mhz * 16; 2353 break; 2354 } 2355 } 2356 } 2357 2358 i = 0; 2359 j = 0; 2360 // create the final dcfclk and uclk table 2361 while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { 2362 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { 2363 dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; 2364 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; 2365 } else { 2366 if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) { 2367 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; 2368 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; 2369 } else { 2370 j = num_uclk_states; 2371 } 2372 } 2373 } 2374 2375 while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { 2376 dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; 2377 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; 2378 } 2379 2380 while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && 2381 optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) { 2382 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; 2383 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; 2384 } 2385 2386 for (i = 0; i < dcn3_0_soc.num_states; i++) { 2387 dcn3_0_soc.clock_limits[i].state = i; 2388 dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; 2389 dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; 2390 dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; 2391 2392 /* Fill all states with max values of all other clocks */ 2393 dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz; 2394 dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz; 2395 dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz; 2396 dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz; 2397 /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */ 2398 /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */ 2399 dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz; 2400 dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz; 2401 dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz; 2402 } 2403 /* re-init DML with updated bb */ 2404 dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2405 if (dc->current_state) 2406 dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2407 } 2408 2409 /* re-init DML with updated bb */ 2410 dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2411 if (dc->current_state) 2412 dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2413 } 2414 2415 static struct resource_funcs dcn30_res_pool_funcs = { 2416 .destroy = dcn30_destroy_resource_pool, 2417 .link_enc_create = dcn30_link_encoder_create, 2418 .panel_cntl_create = dcn30_panel_cntl_create, 2419 .validate_bandwidth = dcn30_validate_bandwidth, 2420 .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, 2421 .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, 2422 .add_stream_to_ctx = dcn30_add_stream_to_ctx, 2423 .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, 2424 .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, 2425 .set_mcif_arb_params = dcn30_set_mcif_arb_params, 2426 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, 2427 .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, 2428 .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, 2429 .update_bw_bounding_box = dcn30_update_bw_bounding_box, 2430 .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, 2431 }; 2432 2433 static bool dcn30_resource_construct( 2434 uint8_t num_virtual_links, 2435 struct dc *dc, 2436 struct dcn30_resource_pool *pool) 2437 { 2438 int i; 2439 struct dc_context *ctx = dc->ctx; 2440 struct irq_service_init_data init_data; 2441 2442 ctx->dc_bios->regs = &bios_regs; 2443 2444 pool->base.res_cap = &res_cap_dcn3; 2445 2446 pool->base.funcs = &dcn30_res_pool_funcs; 2447 2448 /************************************************* 2449 * Resource + asic cap harcoding * 2450 *************************************************/ 2451 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 2452 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 2453 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 2454 dc->caps.max_downscale_ratio = 600; 2455 dc->caps.i2c_speed_in_khz = 100; 2456 dc->caps.max_cursor_size = 256; 2457 dc->caps.dmdata_alloc_size = 2048; 2458 2459 dc->caps.max_slave_planes = 1; 2460 dc->caps.post_blend_color_processing = true; 2461 dc->caps.force_dp_tps4_for_cp2520 = true; 2462 dc->caps.extended_aux_timeout_support = true; 2463 dc->caps.dmcub_support = true; 2464 2465 /* Color pipeline capabilities */ 2466 dc->caps.color.dpp.dcn_arch = 1; 2467 dc->caps.color.dpp.input_lut_shared = 0; 2468 dc->caps.color.dpp.icsc = 1; 2469 dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr 2470 dc->caps.color.dpp.dgam_rom_caps.srgb = 1; 2471 dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; 2472 dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; 2473 dc->caps.color.dpp.dgam_rom_caps.pq = 1; 2474 dc->caps.color.dpp.dgam_rom_caps.hlg = 1; 2475 dc->caps.color.dpp.post_csc = 1; 2476 dc->caps.color.dpp.gamma_corr = 1; 2477 2478 dc->caps.color.dpp.hw_3d_lut = 1; 2479 dc->caps.color.dpp.ogam_ram = 1; 2480 // no OGAM ROM on DCN3 2481 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; 2482 dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; 2483 dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; 2484 dc->caps.color.dpp.ogam_rom_caps.pq = 0; 2485 dc->caps.color.dpp.ogam_rom_caps.hlg = 0; 2486 dc->caps.color.dpp.ocsc = 0; 2487 2488 dc->caps.color.mpc.gamut_remap = 1; 2489 dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //3 2490 dc->caps.color.mpc.ogam_ram = 1; 2491 dc->caps.color.mpc.ogam_rom_caps.srgb = 0; 2492 dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; 2493 dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; 2494 dc->caps.color.mpc.ogam_rom_caps.pq = 0; 2495 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 2496 dc->caps.color.mpc.ocsc = 1; 2497 2498 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) 2499 dc->debug = debug_defaults_drv; 2500 else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { 2501 dc->debug = debug_defaults_diags; 2502 } else 2503 dc->debug = debug_defaults_diags; 2504 // Init the vm_helper 2505 if (dc->vm_helper) 2506 vm_helper_init(dc->vm_helper, 16); 2507 2508 /************************************************* 2509 * Create resources * 2510 *************************************************/ 2511 2512 /* Clock Sources for Pixel Clock*/ 2513 pool->base.clock_sources[DCN30_CLK_SRC_PLL0] = 2514 dcn30_clock_source_create(ctx, ctx->dc_bios, 2515 CLOCK_SOURCE_COMBO_PHY_PLL0, 2516 &clk_src_regs[0], false); 2517 pool->base.clock_sources[DCN30_CLK_SRC_PLL1] = 2518 dcn30_clock_source_create(ctx, ctx->dc_bios, 2519 CLOCK_SOURCE_COMBO_PHY_PLL1, 2520 &clk_src_regs[1], false); 2521 pool->base.clock_sources[DCN30_CLK_SRC_PLL2] = 2522 dcn30_clock_source_create(ctx, ctx->dc_bios, 2523 CLOCK_SOURCE_COMBO_PHY_PLL2, 2524 &clk_src_regs[2], false); 2525 pool->base.clock_sources[DCN30_CLK_SRC_PLL3] = 2526 dcn30_clock_source_create(ctx, ctx->dc_bios, 2527 CLOCK_SOURCE_COMBO_PHY_PLL3, 2528 &clk_src_regs[3], false); 2529 pool->base.clock_sources[DCN30_CLK_SRC_PLL4] = 2530 dcn30_clock_source_create(ctx, ctx->dc_bios, 2531 CLOCK_SOURCE_COMBO_PHY_PLL4, 2532 &clk_src_regs[4], false); 2533 pool->base.clock_sources[DCN30_CLK_SRC_PLL5] = 2534 dcn30_clock_source_create(ctx, ctx->dc_bios, 2535 CLOCK_SOURCE_COMBO_PHY_PLL5, 2536 &clk_src_regs[5], false); 2537 2538 pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; 2539 2540 /* todo: not reuse phy_pll registers */ 2541 pool->base.dp_clock_source = 2542 dcn30_clock_source_create(ctx, ctx->dc_bios, 2543 CLOCK_SOURCE_ID_DP_DTO, 2544 &clk_src_regs[0], true); 2545 2546 for (i = 0; i < pool->base.clk_src_count; i++) { 2547 if (pool->base.clock_sources[i] == NULL) { 2548 dm_error("DC: failed to create clock sources!\n"); 2549 BREAK_TO_DEBUGGER(); 2550 goto create_fail; 2551 } 2552 } 2553 2554 /* DCCG */ 2555 pool->base.dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); 2556 if (pool->base.dccg == NULL) { 2557 dm_error("DC: failed to create dccg!\n"); 2558 BREAK_TO_DEBUGGER(); 2559 goto create_fail; 2560 } 2561 2562 /* PP Lib and SMU interfaces */ 2563 init_soc_bounding_box(dc, pool); 2564 2565 dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2566 2567 /* IRQ */ 2568 init_data.ctx = dc->ctx; 2569 pool->base.irqs = dal_irq_service_dcn30_create(&init_data); 2570 if (!pool->base.irqs) 2571 goto create_fail; 2572 2573 /* HUBBUB */ 2574 pool->base.hubbub = dcn30_hubbub_create(ctx); 2575 if (pool->base.hubbub == NULL) { 2576 BREAK_TO_DEBUGGER(); 2577 dm_error("DC: failed to create hubbub!\n"); 2578 goto create_fail; 2579 } 2580 2581 /* HUBPs, DPPs, OPPs and TGs */ 2582 for (i = 0; i < pool->base.pipe_count; i++) { 2583 pool->base.hubps[i] = dcn30_hubp_create(ctx, i); 2584 if (pool->base.hubps[i] == NULL) { 2585 BREAK_TO_DEBUGGER(); 2586 dm_error( 2587 "DC: failed to create hubps!\n"); 2588 goto create_fail; 2589 } 2590 2591 pool->base.dpps[i] = dcn30_dpp_create(ctx, i); 2592 if (pool->base.dpps[i] == NULL) { 2593 BREAK_TO_DEBUGGER(); 2594 dm_error( 2595 "DC: failed to create dpps!\n"); 2596 goto create_fail; 2597 } 2598 } 2599 2600 for (i = 0; i < pool->base.res_cap->num_opp; i++) { 2601 pool->base.opps[i] = dcn30_opp_create(ctx, i); 2602 if (pool->base.opps[i] == NULL) { 2603 BREAK_TO_DEBUGGER(); 2604 dm_error( 2605 "DC: failed to create output pixel processor!\n"); 2606 goto create_fail; 2607 } 2608 } 2609 2610 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { 2611 pool->base.timing_generators[i] = dcn30_timing_generator_create( 2612 ctx, i); 2613 if (pool->base.timing_generators[i] == NULL) { 2614 BREAK_TO_DEBUGGER(); 2615 dm_error("DC: failed to create tg!\n"); 2616 goto create_fail; 2617 } 2618 } 2619 pool->base.timing_generator_count = i; 2620 2621 /* ABM */ 2622 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { 2623 pool->base.multiple_abms[i] = dmub_abm_create(ctx, 2624 &abm_regs[i], 2625 &abm_shift, 2626 &abm_mask); 2627 if (pool->base.multiple_abms[i] == NULL) { 2628 dm_error("DC: failed to create abm for pipe %d!\n", i); 2629 BREAK_TO_DEBUGGER(); 2630 goto create_fail; 2631 } 2632 } 2633 /* MPC and DSC */ 2634 pool->base.mpc = dcn30_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); 2635 if (pool->base.mpc == NULL) { 2636 BREAK_TO_DEBUGGER(); 2637 dm_error("DC: failed to create mpc!\n"); 2638 goto create_fail; 2639 } 2640 2641 for (i = 0; i < pool->base.res_cap->num_dsc; i++) { 2642 pool->base.dscs[i] = dcn30_dsc_create(ctx, i); 2643 if (pool->base.dscs[i] == NULL) { 2644 BREAK_TO_DEBUGGER(); 2645 dm_error("DC: failed to create display stream compressor %d!\n", i); 2646 goto create_fail; 2647 } 2648 } 2649 2650 /* DWB and MMHUBBUB */ 2651 if (!dcn30_dwbc_create(ctx, &pool->base)) { 2652 BREAK_TO_DEBUGGER(); 2653 dm_error("DC: failed to create dwbc!\n"); 2654 goto create_fail; 2655 } 2656 2657 if (!dcn30_mmhubbub_create(ctx, &pool->base)) { 2658 BREAK_TO_DEBUGGER(); 2659 dm_error("DC: failed to create mcif_wb!\n"); 2660 goto create_fail; 2661 } 2662 2663 /* AUX and I2C */ 2664 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 2665 pool->base.engines[i] = dcn30_aux_engine_create(ctx, i); 2666 if (pool->base.engines[i] == NULL) { 2667 BREAK_TO_DEBUGGER(); 2668 dm_error( 2669 "DC:failed to create aux engine!!\n"); 2670 goto create_fail; 2671 } 2672 pool->base.hw_i2cs[i] = dcn30_i2c_hw_create(ctx, i); 2673 if (pool->base.hw_i2cs[i] == NULL) { 2674 BREAK_TO_DEBUGGER(); 2675 dm_error( 2676 "DC:failed to create hw i2c!!\n"); 2677 goto create_fail; 2678 } 2679 pool->base.sw_i2cs[i] = NULL; 2680 } 2681 2682 /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ 2683 if (!resource_construct(num_virtual_links, dc, &pool->base, 2684 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? 2685 &res_create_funcs : &res_create_maximus_funcs))) 2686 goto create_fail; 2687 2688 /* HW Sequencer and Plane caps */ 2689 dcn30_hw_sequencer_construct(dc); 2690 2691 dc->caps.max_planes = pool->base.pipe_count; 2692 2693 for (i = 0; i < dc->caps.max_planes; ++i) 2694 dc->caps.planes[i] = plane_cap; 2695 2696 dc->cap_funcs = cap_funcs; 2697 2698 return true; 2699 2700 create_fail: 2701 2702 dcn30_resource_destruct(pool); 2703 2704 return false; 2705 } 2706 2707 struct resource_pool *dcn30_create_resource_pool( 2708 const struct dc_init_data *init_data, 2709 struct dc *dc) 2710 { 2711 struct dcn30_resource_pool *pool = 2712 kzalloc(sizeof(struct dcn30_resource_pool), GFP_KERNEL); 2713 2714 if (!pool) 2715 return NULL; 2716 2717 if (dcn30_resource_construct(init_data->num_virtual_links, dc, pool)) 2718 return &pool->base; 2719 2720 BREAK_TO_DEBUGGER(); 2721 kfree(pool); 2722 return NULL; 2723 } 2724