1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dm_services.h" 28 #include "dc.h" 29 30 #include "dcn30_init.h" 31 32 #include "resource.h" 33 #include "include/irq_service_interface.h" 34 #include "dcn20/dcn20_resource.h" 35 36 #include "dcn30_resource.h" 37 38 #include "dcn10/dcn10_ipp.h" 39 #include "dcn30/dcn30_hubbub.h" 40 #include "dcn30/dcn30_mpc.h" 41 #include "dcn30/dcn30_hubp.h" 42 #include "irq/dcn30/irq_service_dcn30.h" 43 #include "dcn30/dcn30_dpp.h" 44 #include "dcn30/dcn30_optc.h" 45 #include "dcn20/dcn20_hwseq.h" 46 #include "dcn30/dcn30_hwseq.h" 47 #include "dce110/dce110_hw_sequencer.h" 48 #include "dcn30/dcn30_opp.h" 49 #include "dcn20/dcn20_dsc.h" 50 #include "dcn30/dcn30_vpg.h" 51 #include "dcn30/dcn30_afmt.h" 52 #include "dcn30/dcn30_dio_stream_encoder.h" 53 #include "dcn30/dcn30_dio_link_encoder.h" 54 #include "dce/dce_clock_source.h" 55 #include "dce/dce_audio.h" 56 #include "dce/dce_hwseq.h" 57 #include "clk_mgr.h" 58 #include "virtual/virtual_stream_encoder.h" 59 #include "dce110/dce110_resource.h" 60 #include "dml/display_mode_vba.h" 61 #include "dcn30/dcn30_dccg.h" 62 #include "dcn10/dcn10_resource.h" 63 #include "dce/dce_panel_cntl.h" 64 65 #include "dcn30/dcn30_dwb.h" 66 #include "dcn30/dcn30_mmhubbub.h" 67 68 #include "sienna_cichlid_ip_offset.h" 69 #include "dcn/dcn_3_0_0_offset.h" 70 #include "dcn/dcn_3_0_0_sh_mask.h" 71 72 #include "nbio/nbio_7_4_offset.h" 73 74 #include "dcn/dpcs_3_0_0_offset.h" 75 #include "dcn/dpcs_3_0_0_sh_mask.h" 76 77 #include "mmhub/mmhub_2_0_0_offset.h" 78 #include "mmhub/mmhub_2_0_0_sh_mask.h" 79 80 #include "reg_helper.h" 81 #include "dce/dmub_abm.h" 82 #include "dce/dce_aux.h" 83 #include "dce/dce_i2c.h" 84 85 #include "dml/dcn30/display_mode_vba_30.h" 86 #include "vm_helper.h" 87 #include "dcn20/dcn20_vmid.h" 88 #include "amdgpu_socbb.h" 89 90 #define DC_LOGGER_INIT(logger) 91 92 struct _vcs_dpi_ip_params_st dcn3_0_ip = { 93 .use_min_dcfclk = 1, 94 .clamp_min_dcfclk = 0, 95 .odm_capable = 1, 96 .gpuvm_enable = 0, 97 .hostvm_enable = 0, 98 .gpuvm_max_page_table_levels = 4, 99 .hostvm_max_page_table_levels = 4, 100 .hostvm_cached_page_table_levels = 0, 101 .pte_group_size_bytes = 2048, 102 .num_dsc = 6, 103 .rob_buffer_size_kbytes = 184, 104 .det_buffer_size_kbytes = 184, 105 .dpte_buffer_size_in_pte_reqs_luma = 84, 106 .pde_proc_buffer_size_64k_reqs = 48, 107 .dpp_output_buffer_pixels = 2560, 108 .opp_output_buffer_lines = 1, 109 .pixel_chunk_size_kbytes = 8, 110 .pte_enable = 1, 111 .max_page_table_levels = 2, 112 .pte_chunk_size_kbytes = 2, // ? 113 .meta_chunk_size_kbytes = 2, 114 .writeback_chunk_size_kbytes = 8, 115 .line_buffer_size_bits = 789504, 116 .is_line_buffer_bpp_fixed = 0, // ? 117 .line_buffer_fixed_bpp = 0, // ? 118 .dcc_supported = true, 119 .writeback_interface_buffer_size_kbytes = 90, 120 .writeback_line_buffer_buffer_size = 0, 121 .max_line_buffer_lines = 12, 122 .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 123 .writeback_chroma_buffer_size_kbytes = 8, 124 .writeback_chroma_line_buffer_width_pixels = 4, 125 .writeback_max_hscl_ratio = 1, 126 .writeback_max_vscl_ratio = 1, 127 .writeback_min_hscl_ratio = 1, 128 .writeback_min_vscl_ratio = 1, 129 .writeback_max_hscl_taps = 1, 130 .writeback_max_vscl_taps = 1, 131 .writeback_line_buffer_luma_buffer_size = 0, 132 .writeback_line_buffer_chroma_buffer_size = 14643, 133 .cursor_buffer_size = 8, 134 .cursor_chunk_size = 2, 135 .max_num_otg = 6, 136 .max_num_dpp = 6, 137 .max_num_wb = 1, 138 .max_dchub_pscl_bw_pix_per_clk = 4, 139 .max_pscl_lb_bw_pix_per_clk = 2, 140 .max_lb_vscl_bw_pix_per_clk = 4, 141 .max_vscl_hscl_bw_pix_per_clk = 4, 142 .max_hscl_ratio = 6, 143 .max_vscl_ratio = 6, 144 .hscl_mults = 4, 145 .vscl_mults = 4, 146 .max_hscl_taps = 8, 147 .max_vscl_taps = 8, 148 .dispclk_ramp_margin_percent = 1, 149 .underscan_factor = 1.11, 150 .min_vblank_lines = 32, 151 .dppclk_delay_subtotal = 46, 152 .dynamic_metadata_vm_enabled = true, 153 .dppclk_delay_scl_lb_only = 16, 154 .dppclk_delay_scl = 50, 155 .dppclk_delay_cnvc_formatter = 27, 156 .dppclk_delay_cnvc_cursor = 6, 157 .dispclk_delay_subtotal = 119, 158 .dcfclk_cstate_latency = 5.2, // SRExitTime 159 .max_inter_dcn_tile_repeaters = 8, 160 .odm_combine_4to1_supported = true, 161 162 .xfc_supported = false, 163 .xfc_fill_bw_overhead_percent = 10.0, 164 .xfc_fill_constant_bytes = 0, 165 .gfx7_compat_tiling_supported = 0, 166 .number_of_cursors = 1, 167 }; 168 169 struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = { 170 .clock_limits = { 171 { 172 .state = 0, 173 .dispclk_mhz = 562.0, 174 .dppclk_mhz = 300.0, 175 .phyclk_mhz = 300.0, 176 .phyclk_d18_mhz = 667.0, 177 .dscclk_mhz = 405.6, 178 }, 179 }, 180 .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ 181 .num_states = 1, 182 .sr_exit_time_us = 12, 183 .sr_enter_plus_exit_time_us = 20, 184 .urgent_latency_us = 4.0, 185 .urgent_latency_pixel_data_only_us = 4.0, 186 .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 187 .urgent_latency_vm_data_only_us = 4.0, 188 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 189 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 190 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, 191 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, 192 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, 193 .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, 194 .max_avg_sdp_bw_use_normal_percent = 60.0, 195 .max_avg_dram_bw_use_normal_percent = 40.0, 196 .writeback_latency_us = 12.0, 197 .max_request_size_bytes = 256, 198 .fabric_datapath_to_dcn_data_return_bytes = 64, 199 .dcn_downspread_percent = 0.5, 200 .downspread_percent = 0.38, 201 .dram_page_open_time_ns = 50.0, 202 .dram_rw_turnaround_time_ns = 17.5, 203 .dram_return_buffer_per_channel_bytes = 8192, 204 .round_trip_ping_latency_dcfclk_cycles = 191, 205 .urgent_out_of_order_return_per_channel_bytes = 4096, 206 .channel_interleave_bytes = 256, 207 .num_banks = 8, 208 .gpuvm_min_page_size_bytes = 4096, 209 .hostvm_min_page_size_bytes = 4096, 210 .dram_clock_change_latency_us = 404, 211 .dummy_pstate_latency_us = 5, 212 .writeback_dram_clock_change_latency_us = 23.0, 213 .return_bus_width_bytes = 64, 214 .dispclk_dppclk_vco_speed_mhz = 3650, 215 .xfc_bus_transport_time_us = 20, // ? 216 .xfc_xbuf_latency_tolerance_us = 4, // ? 217 .use_urgent_burst_bw = 1, // ? 218 .do_urgent_latency_adjustment = true, 219 .urgent_latency_adjustment_fabric_clock_component_us = 1.0, 220 .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, 221 }; 222 223 enum dcn30_clk_src_array_id { 224 DCN30_CLK_SRC_PLL0, 225 DCN30_CLK_SRC_PLL1, 226 DCN30_CLK_SRC_PLL2, 227 DCN30_CLK_SRC_PLL3, 228 DCN30_CLK_SRC_PLL4, 229 DCN30_CLK_SRC_PLL5, 230 DCN30_CLK_SRC_TOTAL 231 }; 232 233 /* begin ********************* 234 * macros to expend register list macro defined in HW object header file 235 */ 236 237 /* DCN */ 238 /* TODO awful hack. fixup dcn20_dwb.h */ 239 #undef BASE_INNER 240 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg 241 242 #define BASE(seg) BASE_INNER(seg) 243 244 #define SR(reg_name)\ 245 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 246 mm ## reg_name 247 248 #define SRI(reg_name, block, id)\ 249 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 250 mm ## block ## id ## _ ## reg_name 251 252 #define SRI2(reg_name, block, id)\ 253 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ 254 mm ## reg_name 255 256 #define SRIR(var_name, reg_name, block, id)\ 257 .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 258 mm ## block ## id ## _ ## reg_name 259 260 #define SRII(reg_name, block, id)\ 261 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 262 mm ## block ## id ## _ ## reg_name 263 264 #define SRII_MPC_RMU(reg_name, block, id)\ 265 .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 266 mm ## block ## id ## _ ## reg_name 267 268 #define SRII_DWB(reg_name, temp_name, block, id)\ 269 .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ 270 mm ## block ## id ## _ ## temp_name 271 272 #define DCCG_SRII(reg_name, block, id)\ 273 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 274 mm ## block ## id ## _ ## reg_name 275 276 #define VUPDATE_SRII(reg_name, block, id)\ 277 .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ 278 mm ## reg_name ## _ ## block ## id 279 280 /* NBIO */ 281 #define NBIO_BASE_INNER(seg) \ 282 NBIO_BASE__INST0_SEG ## seg 283 284 #define NBIO_BASE(seg) \ 285 NBIO_BASE_INNER(seg) 286 287 #define NBIO_SR(reg_name)\ 288 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ 289 mm ## reg_name 290 291 /* MMHUB */ 292 #define MMHUB_BASE_INNER(seg) \ 293 MMHUB_BASE__INST0_SEG ## seg 294 295 #define MMHUB_BASE(seg) \ 296 MMHUB_BASE_INNER(seg) 297 298 #define MMHUB_SR(reg_name)\ 299 .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ 300 mmMM ## reg_name 301 302 /* CLOCK */ 303 #define CLK_BASE_INNER(seg) \ 304 CLK_BASE__INST0_SEG ## seg 305 306 #define CLK_BASE(seg) \ 307 CLK_BASE_INNER(seg) 308 309 #define CLK_SRI(reg_name, block, inst)\ 310 .reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \ 311 mm ## block ## _ ## inst ## _ ## reg_name 312 313 314 static const struct bios_registers bios_regs = { 315 NBIO_SR(BIOS_SCRATCH_3), 316 NBIO_SR(BIOS_SCRATCH_6) 317 }; 318 319 #define clk_src_regs(index, pllid)\ 320 [index] = {\ 321 CS_COMMON_REG_LIST_DCN2_0(index, pllid),\ 322 } 323 324 static const struct dce110_clk_src_regs clk_src_regs[] = { 325 clk_src_regs(0, A), 326 clk_src_regs(1, B), 327 clk_src_regs(2, C), 328 clk_src_regs(3, D), 329 clk_src_regs(4, E), 330 clk_src_regs(5, F) 331 }; 332 333 static const struct dce110_clk_src_shift cs_shift = { 334 CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) 335 }; 336 337 static const struct dce110_clk_src_mask cs_mask = { 338 CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) 339 }; 340 341 #define abm_regs(id)\ 342 [id] = {\ 343 ABM_DCN301_REG_LIST(id)\ 344 } 345 346 static const struct dce_abm_registers abm_regs[] = { 347 abm_regs(0), 348 abm_regs(1), 349 abm_regs(2), 350 abm_regs(3), 351 abm_regs(4), 352 abm_regs(5), 353 }; 354 355 static const struct dce_abm_shift abm_shift = { 356 ABM_MASK_SH_LIST_DCN301(__SHIFT) 357 }; 358 359 static const struct dce_abm_mask abm_mask = { 360 ABM_MASK_SH_LIST_DCN301(_MASK) 361 }; 362 363 364 365 #define audio_regs(id)\ 366 [id] = {\ 367 AUD_COMMON_REG_LIST(id)\ 368 } 369 370 static const struct dce_audio_registers audio_regs[] = { 371 audio_regs(0), 372 audio_regs(1), 373 audio_regs(2), 374 audio_regs(3), 375 audio_regs(4), 376 audio_regs(5), 377 audio_regs(6) 378 }; 379 380 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ 381 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ 382 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ 383 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) 384 385 static const struct dce_audio_shift audio_shift = { 386 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) 387 }; 388 389 static const struct dce_audio_mask audio_mask = { 390 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) 391 }; 392 393 #define vpg_regs(id)\ 394 [id] = {\ 395 VPG_DCN3_REG_LIST(id)\ 396 } 397 398 static const struct dcn30_vpg_registers vpg_regs[] = { 399 vpg_regs(0), 400 vpg_regs(1), 401 vpg_regs(2), 402 vpg_regs(3), 403 vpg_regs(4), 404 vpg_regs(5), 405 vpg_regs(6), 406 }; 407 408 static const struct dcn30_vpg_shift vpg_shift = { 409 DCN3_VPG_MASK_SH_LIST(__SHIFT) 410 }; 411 412 static const struct dcn30_vpg_mask vpg_mask = { 413 DCN3_VPG_MASK_SH_LIST(_MASK) 414 }; 415 416 #define afmt_regs(id)\ 417 [id] = {\ 418 AFMT_DCN3_REG_LIST(id)\ 419 } 420 421 static const struct dcn30_afmt_registers afmt_regs[] = { 422 afmt_regs(0), 423 afmt_regs(1), 424 afmt_regs(2), 425 afmt_regs(3), 426 afmt_regs(4), 427 afmt_regs(5), 428 afmt_regs(6), 429 }; 430 431 static const struct dcn30_afmt_shift afmt_shift = { 432 DCN3_AFMT_MASK_SH_LIST(__SHIFT) 433 }; 434 435 static const struct dcn30_afmt_mask afmt_mask = { 436 DCN3_AFMT_MASK_SH_LIST(_MASK) 437 }; 438 439 #define stream_enc_regs(id)\ 440 [id] = {\ 441 SE_DCN3_REG_LIST(id)\ 442 } 443 444 static const struct dcn10_stream_enc_registers stream_enc_regs[] = { 445 stream_enc_regs(0), 446 stream_enc_regs(1), 447 stream_enc_regs(2), 448 stream_enc_regs(3), 449 stream_enc_regs(4), 450 stream_enc_regs(5) 451 }; 452 453 static const struct dcn10_stream_encoder_shift se_shift = { 454 SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 455 }; 456 457 static const struct dcn10_stream_encoder_mask se_mask = { 458 SE_COMMON_MASK_SH_LIST_DCN30(_MASK) 459 }; 460 461 462 #define aux_regs(id)\ 463 [id] = {\ 464 DCN2_AUX_REG_LIST(id)\ 465 } 466 467 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { 468 aux_regs(0), 469 aux_regs(1), 470 aux_regs(2), 471 aux_regs(3), 472 aux_regs(4), 473 aux_regs(5) 474 }; 475 476 #define hpd_regs(id)\ 477 [id] = {\ 478 HPD_REG_LIST(id)\ 479 } 480 481 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { 482 hpd_regs(0), 483 hpd_regs(1), 484 hpd_regs(2), 485 hpd_regs(3), 486 hpd_regs(4), 487 hpd_regs(5) 488 }; 489 490 #define link_regs(id, phyid)\ 491 [id] = {\ 492 LE_DCN3_REG_LIST(id), \ 493 UNIPHY_DCN2_REG_LIST(phyid), \ 494 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ 495 } 496 497 static const struct dce110_aux_registers_shift aux_shift = { 498 DCN_AUX_MASK_SH_LIST(__SHIFT) 499 }; 500 501 static const struct dce110_aux_registers_mask aux_mask = { 502 DCN_AUX_MASK_SH_LIST(_MASK) 503 }; 504 505 static const struct dcn10_link_enc_registers link_enc_regs[] = { 506 link_regs(0, A), 507 link_regs(1, B), 508 link_regs(2, C), 509 link_regs(3, D), 510 link_regs(4, E), 511 link_regs(5, F) 512 }; 513 514 static const struct dcn10_link_enc_shift le_shift = { 515 LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT),\ 516 DPCS_DCN2_MASK_SH_LIST(__SHIFT) 517 }; 518 519 static const struct dcn10_link_enc_mask le_mask = { 520 LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK),\ 521 DPCS_DCN2_MASK_SH_LIST(_MASK) 522 }; 523 524 525 static const struct dce_panel_cntl_registers panel_cntl_regs[] = { 526 { DCN_PANEL_CNTL_REG_LIST() } 527 }; 528 529 static const struct dce_panel_cntl_shift panel_cntl_shift = { 530 DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) 531 }; 532 533 static const struct dce_panel_cntl_mask panel_cntl_mask = { 534 DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) 535 }; 536 537 #define dpp_regs(id)\ 538 [id] = {\ 539 DPP_REG_LIST_DCN30(id),\ 540 } 541 542 static const struct dcn3_dpp_registers dpp_regs[] = { 543 dpp_regs(0), 544 dpp_regs(1), 545 dpp_regs(2), 546 dpp_regs(3), 547 dpp_regs(4), 548 dpp_regs(5), 549 }; 550 551 static const struct dcn3_dpp_shift tf_shift = { 552 DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) 553 }; 554 555 static const struct dcn3_dpp_mask tf_mask = { 556 DPP_REG_LIST_SH_MASK_DCN30(_MASK) 557 }; 558 559 #define opp_regs(id)\ 560 [id] = {\ 561 OPP_REG_LIST_DCN30(id),\ 562 } 563 564 static const struct dcn20_opp_registers opp_regs[] = { 565 opp_regs(0), 566 opp_regs(1), 567 opp_regs(2), 568 opp_regs(3), 569 opp_regs(4), 570 opp_regs(5) 571 }; 572 573 static const struct dcn20_opp_shift opp_shift = { 574 OPP_MASK_SH_LIST_DCN20(__SHIFT) 575 }; 576 577 static const struct dcn20_opp_mask opp_mask = { 578 OPP_MASK_SH_LIST_DCN20(_MASK) 579 }; 580 581 #define aux_engine_regs(id)\ 582 [id] = {\ 583 AUX_COMMON_REG_LIST0(id), \ 584 .AUXN_IMPCAL = 0, \ 585 .AUXP_IMPCAL = 0, \ 586 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ 587 } 588 589 static const struct dce110_aux_registers aux_engine_regs[] = { 590 aux_engine_regs(0), 591 aux_engine_regs(1), 592 aux_engine_regs(2), 593 aux_engine_regs(3), 594 aux_engine_regs(4), 595 aux_engine_regs(5) 596 }; 597 598 #define dwbc_regs_dcn3(id)\ 599 [id] = {\ 600 DWBC_COMMON_REG_LIST_DCN30(id),\ 601 } 602 603 static const struct dcn30_dwbc_registers dwbc30_regs[] = { 604 dwbc_regs_dcn3(0), 605 }; 606 607 static const struct dcn30_dwbc_shift dwbc30_shift = { 608 DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 609 }; 610 611 static const struct dcn30_dwbc_mask dwbc30_mask = { 612 DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) 613 }; 614 615 #define mcif_wb_regs_dcn3(id)\ 616 [id] = {\ 617 MCIF_WB_COMMON_REG_LIST_DCN30(id),\ 618 } 619 620 static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { 621 mcif_wb_regs_dcn3(0) 622 }; 623 624 static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { 625 MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 626 }; 627 628 static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { 629 MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) 630 }; 631 632 #define dsc_regsDCN20(id)\ 633 [id] = {\ 634 DSC_REG_LIST_DCN20(id)\ 635 } 636 637 static const struct dcn20_dsc_registers dsc_regs[] = { 638 dsc_regsDCN20(0), 639 dsc_regsDCN20(1), 640 dsc_regsDCN20(2), 641 dsc_regsDCN20(3), 642 dsc_regsDCN20(4), 643 dsc_regsDCN20(5) 644 }; 645 646 static const struct dcn20_dsc_shift dsc_shift = { 647 DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) 648 }; 649 650 static const struct dcn20_dsc_mask dsc_mask = { 651 DSC_REG_LIST_SH_MASK_DCN20(_MASK) 652 }; 653 654 static const struct dcn30_mpc_registers mpc_regs = { 655 MPC_REG_LIST_DCN3_0(0), 656 MPC_REG_LIST_DCN3_0(1), 657 MPC_REG_LIST_DCN3_0(2), 658 MPC_REG_LIST_DCN3_0(3), 659 MPC_REG_LIST_DCN3_0(4), 660 MPC_REG_LIST_DCN3_0(5), 661 MPC_OUT_MUX_REG_LIST_DCN3_0(0), 662 MPC_OUT_MUX_REG_LIST_DCN3_0(1), 663 MPC_OUT_MUX_REG_LIST_DCN3_0(2), 664 MPC_OUT_MUX_REG_LIST_DCN3_0(3), 665 MPC_OUT_MUX_REG_LIST_DCN3_0(4), 666 MPC_OUT_MUX_REG_LIST_DCN3_0(5), 667 MPC_RMU_GLOBAL_REG_LIST_DCN3AG, 668 MPC_RMU_REG_LIST_DCN3AG(0), 669 MPC_RMU_REG_LIST_DCN3AG(1), 670 MPC_RMU_REG_LIST_DCN3AG(2), 671 MPC_DWB_MUX_REG_LIST_DCN3_0(0), 672 }; 673 674 static const struct dcn30_mpc_shift mpc_shift = { 675 MPC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 676 }; 677 678 static const struct dcn30_mpc_mask mpc_mask = { 679 MPC_COMMON_MASK_SH_LIST_DCN30(_MASK) 680 }; 681 682 #define optc_regs(id)\ 683 [id] = {OPTC_COMMON_REG_LIST_DCN3_0(id)} 684 685 686 static const struct dcn_optc_registers optc_regs[] = { 687 optc_regs(0), 688 optc_regs(1), 689 optc_regs(2), 690 optc_regs(3), 691 optc_regs(4), 692 optc_regs(5) 693 }; 694 695 static const struct dcn_optc_shift optc_shift = { 696 OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) 697 }; 698 699 static const struct dcn_optc_mask optc_mask = { 700 OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) 701 }; 702 703 #define hubp_regs(id)\ 704 [id] = {\ 705 HUBP_REG_LIST_DCN30(id)\ 706 } 707 708 static const struct dcn_hubp2_registers hubp_regs[] = { 709 hubp_regs(0), 710 hubp_regs(1), 711 hubp_regs(2), 712 hubp_regs(3), 713 hubp_regs(4), 714 hubp_regs(5) 715 }; 716 717 static const struct dcn_hubp2_shift hubp_shift = { 718 HUBP_MASK_SH_LIST_DCN30(__SHIFT) 719 }; 720 721 static const struct dcn_hubp2_mask hubp_mask = { 722 HUBP_MASK_SH_LIST_DCN30(_MASK) 723 }; 724 725 static const struct dcn_hubbub_registers hubbub_reg = { 726 HUBBUB_REG_LIST_DCN30(0) 727 }; 728 729 static const struct dcn_hubbub_shift hubbub_shift = { 730 HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) 731 }; 732 733 static const struct dcn_hubbub_mask hubbub_mask = { 734 HUBBUB_MASK_SH_LIST_DCN30(_MASK) 735 }; 736 737 static const struct dccg_registers dccg_regs = { 738 DCCG_REG_LIST_DCN30() 739 }; 740 741 static const struct dccg_shift dccg_shift = { 742 DCCG_MASK_SH_LIST_DCN3(__SHIFT) 743 }; 744 745 static const struct dccg_mask dccg_mask = { 746 DCCG_MASK_SH_LIST_DCN3(_MASK) 747 }; 748 749 static const struct dce_hwseq_registers hwseq_reg = { 750 HWSEQ_DCN30_REG_LIST() 751 }; 752 753 static const struct dce_hwseq_shift hwseq_shift = { 754 HWSEQ_DCN30_MASK_SH_LIST(__SHIFT) 755 }; 756 757 static const struct dce_hwseq_mask hwseq_mask = { 758 HWSEQ_DCN30_MASK_SH_LIST(_MASK) 759 }; 760 #define vmid_regs(id)\ 761 [id] = {\ 762 DCN20_VMID_REG_LIST(id)\ 763 } 764 765 static const struct dcn_vmid_registers vmid_regs[] = { 766 vmid_regs(0), 767 vmid_regs(1), 768 vmid_regs(2), 769 vmid_regs(3), 770 vmid_regs(4), 771 vmid_regs(5), 772 vmid_regs(6), 773 vmid_regs(7), 774 vmid_regs(8), 775 vmid_regs(9), 776 vmid_regs(10), 777 vmid_regs(11), 778 vmid_regs(12), 779 vmid_regs(13), 780 vmid_regs(14), 781 vmid_regs(15) 782 }; 783 784 static const struct dcn20_vmid_shift vmid_shifts = { 785 DCN20_VMID_MASK_SH_LIST(__SHIFT) 786 }; 787 788 static const struct dcn20_vmid_mask vmid_masks = { 789 DCN20_VMID_MASK_SH_LIST(_MASK) 790 }; 791 792 static const struct resource_caps res_cap_dcn3 = { 793 .num_timing_generator = 6, 794 .num_opp = 6, 795 .num_video_plane = 6, 796 .num_audio = 6, 797 .num_stream_encoder = 6, 798 .num_pll = 6, 799 .num_dwb = 1, 800 .num_ddc = 6, 801 .num_vmid = 16, 802 .num_mpc_3dlut = 3, 803 .num_dsc = 6, 804 }; 805 806 static const struct dc_plane_cap plane_cap = { 807 .type = DC_PLANE_TYPE_DCN_UNIVERSAL, 808 .blends_with_above = true, 809 .blends_with_below = true, 810 .per_pixel_alpha = true, 811 812 .pixel_format_support = { 813 .argb8888 = true, 814 .nv12 = true, 815 .fp16 = true, 816 .p010 = false, 817 .ayuv = false, 818 }, 819 820 .max_upscale_factor = { 821 .argb8888 = 16000, 822 .nv12 = 16000, 823 .fp16 = 16000 824 }, 825 826 .max_downscale_factor = { 827 .argb8888 = 600, 828 .nv12 = 600, 829 .fp16 = 600 830 } 831 }; 832 833 static const struct dc_debug_options debug_defaults_drv = { 834 .disable_dmcu = true, 835 .force_abm_enable = false, 836 .timing_trace = false, 837 .clock_trace = true, 838 .disable_pplib_clock_request = true, 839 .pipe_split_policy = MPC_SPLIT_DYNAMIC, 840 .force_single_disp_pipe_split = false, 841 .disable_dcc = DCC_ENABLE, 842 .vsr_support = true, 843 .performance_trace = false, 844 .max_downscale_src_width = 7680,/*upto 8K*/ 845 .disable_pplib_wm_range = false, 846 .scl_reset_length10 = true, 847 .sanity_checks = false, 848 .underflow_assert_delay_us = 0xFFFFFFFF, 849 .dwb_fi_phase = -1, // -1 = disable, 850 .dmub_command_table = true, 851 }; 852 853 static const struct dc_debug_options debug_defaults_diags = { 854 .disable_dmcu = true, 855 .force_abm_enable = false, 856 .timing_trace = true, 857 .clock_trace = true, 858 .disable_dpp_power_gate = true, 859 .disable_hubp_power_gate = true, 860 .disable_clock_gate = true, 861 .disable_pplib_clock_request = true, 862 .disable_pplib_wm_range = true, 863 .disable_stutter = false, 864 .scl_reset_length10 = true, 865 .dwb_fi_phase = -1, // -1 = disable 866 .dmub_command_table = true, 867 }; 868 869 void dcn30_dpp_destroy(struct dpp **dpp) 870 { 871 kfree(TO_DCN20_DPP(*dpp)); 872 *dpp = NULL; 873 } 874 875 struct dpp *dcn30_dpp_create( 876 struct dc_context *ctx, 877 uint32_t inst) 878 { 879 struct dcn3_dpp *dpp = 880 kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); 881 882 if (!dpp) 883 return NULL; 884 885 if (dpp3_construct(dpp, ctx, inst, 886 &dpp_regs[inst], &tf_shift, &tf_mask)) 887 return &dpp->base; 888 889 BREAK_TO_DEBUGGER(); 890 kfree(dpp); 891 return NULL; 892 } 893 struct output_pixel_processor *dcn30_opp_create( 894 struct dc_context *ctx, uint32_t inst) 895 { 896 struct dcn20_opp *opp = 897 kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); 898 899 if (!opp) { 900 BREAK_TO_DEBUGGER(); 901 return NULL; 902 } 903 904 dcn20_opp_construct(opp, ctx, inst, 905 &opp_regs[inst], &opp_shift, &opp_mask); 906 return &opp->base; 907 } 908 909 struct dce_aux *dcn30_aux_engine_create( 910 struct dc_context *ctx, 911 uint32_t inst) 912 { 913 struct aux_engine_dce110 *aux_engine = 914 kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); 915 916 if (!aux_engine) 917 return NULL; 918 919 dce110_aux_engine_construct(aux_engine, ctx, inst, 920 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 921 &aux_engine_regs[inst], 922 &aux_mask, 923 &aux_shift, 924 ctx->dc->caps.extended_aux_timeout_support); 925 926 return &aux_engine->base; 927 } 928 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } 929 930 static const struct dce_i2c_registers i2c_hw_regs[] = { 931 i2c_inst_regs(1), 932 i2c_inst_regs(2), 933 i2c_inst_regs(3), 934 i2c_inst_regs(4), 935 i2c_inst_regs(5), 936 i2c_inst_regs(6), 937 }; 938 939 static const struct dce_i2c_shift i2c_shifts = { 940 I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) 941 }; 942 943 static const struct dce_i2c_mask i2c_masks = { 944 I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) 945 }; 946 947 struct dce_i2c_hw *dcn30_i2c_hw_create( 948 struct dc_context *ctx, 949 uint32_t inst) 950 { 951 struct dce_i2c_hw *dce_i2c_hw = 952 kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); 953 954 if (!dce_i2c_hw) 955 return NULL; 956 957 dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, 958 &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); 959 960 return dce_i2c_hw; 961 } 962 static struct mpc *dcn30_mpc_create( 963 struct dc_context *ctx, 964 int num_mpcc, 965 int num_rmu) 966 { 967 struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), 968 GFP_KERNEL); 969 970 if (!mpc30) 971 return NULL; 972 973 dcn30_mpc_construct(mpc30, ctx, 974 &mpc_regs, 975 &mpc_shift, 976 &mpc_mask, 977 num_mpcc, 978 num_rmu); 979 980 return &mpc30->base; 981 } 982 983 struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) 984 { 985 int i; 986 987 struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), 988 GFP_KERNEL); 989 990 if (!hubbub3) 991 return NULL; 992 993 hubbub3_construct(hubbub3, ctx, 994 &hubbub_reg, 995 &hubbub_shift, 996 &hubbub_mask); 997 998 999 for (i = 0; i < res_cap_dcn3.num_vmid; i++) { 1000 struct dcn20_vmid *vmid = &hubbub3->vmid[i]; 1001 1002 vmid->ctx = ctx; 1003 1004 vmid->regs = &vmid_regs[i]; 1005 vmid->shifts = &vmid_shifts; 1006 vmid->masks = &vmid_masks; 1007 } 1008 1009 return &hubbub3->base; 1010 } 1011 1012 struct timing_generator *dcn30_timing_generator_create( 1013 struct dc_context *ctx, 1014 uint32_t instance) 1015 { 1016 struct optc *tgn10 = 1017 kzalloc(sizeof(struct optc), GFP_KERNEL); 1018 1019 if (!tgn10) 1020 return NULL; 1021 1022 tgn10->base.inst = instance; 1023 tgn10->base.ctx = ctx; 1024 1025 tgn10->tg_regs = &optc_regs[instance]; 1026 tgn10->tg_shift = &optc_shift; 1027 tgn10->tg_mask = &optc_mask; 1028 1029 dcn30_timing_generator_init(tgn10); 1030 1031 return &tgn10->base; 1032 } 1033 1034 static const struct encoder_feature_support link_enc_feature = { 1035 .max_hdmi_deep_color = COLOR_DEPTH_121212, 1036 .max_hdmi_pixel_clock = 600000, 1037 .hdmi_ycbcr420_supported = true, 1038 .dp_ycbcr420_supported = true, 1039 .fec_supported = true, 1040 .flags.bits.IS_HBR2_CAPABLE = true, 1041 .flags.bits.IS_HBR3_CAPABLE = true, 1042 .flags.bits.IS_TPS3_CAPABLE = true, 1043 .flags.bits.IS_TPS4_CAPABLE = true 1044 }; 1045 1046 struct link_encoder *dcn30_link_encoder_create( 1047 const struct encoder_init_data *enc_init_data) 1048 { 1049 struct dcn20_link_encoder *enc20 = 1050 kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); 1051 1052 if (!enc20) 1053 return NULL; 1054 1055 dcn30_link_encoder_construct(enc20, 1056 enc_init_data, 1057 &link_enc_feature, 1058 &link_enc_regs[enc_init_data->transmitter], 1059 &link_enc_aux_regs[enc_init_data->channel - 1], 1060 &link_enc_hpd_regs[enc_init_data->hpd_source], 1061 &le_shift, 1062 &le_mask); 1063 1064 return &enc20->enc10.base; 1065 } 1066 1067 struct panel_cntl *dcn30_panel_cntl_create(const struct panel_cntl_init_data *init_data) 1068 { 1069 struct dce_panel_cntl *panel_cntl = 1070 kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); 1071 1072 if (!panel_cntl) 1073 return NULL; 1074 1075 dce_panel_cntl_construct(panel_cntl, 1076 init_data, 1077 &panel_cntl_regs[init_data->inst], 1078 &panel_cntl_shift, 1079 &panel_cntl_mask); 1080 1081 return &panel_cntl->base; 1082 } 1083 1084 static void read_dce_straps( 1085 struct dc_context *ctx, 1086 struct resource_straps *straps) 1087 { 1088 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), 1089 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); 1090 1091 } 1092 1093 static struct audio *dcn30_create_audio( 1094 struct dc_context *ctx, unsigned int inst) 1095 { 1096 return dce_audio_create(ctx, inst, 1097 &audio_regs[inst], &audio_shift, &audio_mask); 1098 } 1099 1100 static struct vpg *dcn30_vpg_create( 1101 struct dc_context *ctx, 1102 uint32_t inst) 1103 { 1104 struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); 1105 1106 if (!vpg3) 1107 return NULL; 1108 1109 vpg3_construct(vpg3, ctx, inst, 1110 &vpg_regs[inst], 1111 &vpg_shift, 1112 &vpg_mask); 1113 1114 return &vpg3->base; 1115 } 1116 1117 static struct afmt *dcn30_afmt_create( 1118 struct dc_context *ctx, 1119 uint32_t inst) 1120 { 1121 struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); 1122 1123 if (!afmt3) 1124 return NULL; 1125 1126 afmt3_construct(afmt3, ctx, inst, 1127 &afmt_regs[inst], 1128 &afmt_shift, 1129 &afmt_mask); 1130 1131 return &afmt3->base; 1132 } 1133 1134 struct stream_encoder *dcn30_stream_encoder_create( 1135 enum engine_id eng_id, 1136 struct dc_context *ctx) 1137 { 1138 struct dcn10_stream_encoder *enc1; 1139 struct vpg *vpg; 1140 struct afmt *afmt; 1141 int vpg_inst; 1142 int afmt_inst; 1143 1144 /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ 1145 if (eng_id <= ENGINE_ID_DIGF) { 1146 vpg_inst = eng_id; 1147 afmt_inst = eng_id; 1148 } else 1149 return NULL; 1150 1151 enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); 1152 vpg = dcn30_vpg_create(ctx, vpg_inst); 1153 afmt = dcn30_afmt_create(ctx, afmt_inst); 1154 1155 if (!enc1 || !vpg || !afmt) 1156 return NULL; 1157 1158 dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, 1159 eng_id, vpg, afmt, 1160 &stream_enc_regs[eng_id], 1161 &se_shift, &se_mask); 1162 1163 return &enc1->base; 1164 } 1165 1166 struct dce_hwseq *dcn30_hwseq_create( 1167 struct dc_context *ctx) 1168 { 1169 struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); 1170 1171 if (hws) { 1172 hws->ctx = ctx; 1173 hws->regs = &hwseq_reg; 1174 hws->shifts = &hwseq_shift; 1175 hws->masks = &hwseq_mask; 1176 } 1177 return hws; 1178 } 1179 static const struct resource_create_funcs res_create_funcs = { 1180 .read_dce_straps = read_dce_straps, 1181 .create_audio = dcn30_create_audio, 1182 .create_stream_encoder = dcn30_stream_encoder_create, 1183 .create_hwseq = dcn30_hwseq_create, 1184 }; 1185 1186 static const struct resource_create_funcs res_create_maximus_funcs = { 1187 .read_dce_straps = NULL, 1188 .create_audio = NULL, 1189 .create_stream_encoder = NULL, 1190 .create_hwseq = dcn30_hwseq_create, 1191 }; 1192 1193 static void dcn30_resource_destruct(struct dcn30_resource_pool *pool) 1194 { 1195 unsigned int i; 1196 1197 for (i = 0; i < pool->base.stream_enc_count; i++) { 1198 if (pool->base.stream_enc[i] != NULL) { 1199 if (pool->base.stream_enc[i]->vpg != NULL) { 1200 kfree(DCN30_VPG_FROM_VPG(pool->base.stream_enc[i]->vpg)); 1201 pool->base.stream_enc[i]->vpg = NULL; 1202 } 1203 if (pool->base.stream_enc[i]->afmt != NULL) { 1204 kfree(DCN30_AFMT_FROM_AFMT(pool->base.stream_enc[i]->afmt)); 1205 pool->base.stream_enc[i]->afmt = NULL; 1206 } 1207 kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); 1208 pool->base.stream_enc[i] = NULL; 1209 } 1210 } 1211 1212 for (i = 0; i < pool->base.res_cap->num_dsc; i++) { 1213 if (pool->base.dscs[i] != NULL) 1214 dcn20_dsc_destroy(&pool->base.dscs[i]); 1215 } 1216 1217 if (pool->base.mpc != NULL) { 1218 kfree(TO_DCN20_MPC(pool->base.mpc)); 1219 pool->base.mpc = NULL; 1220 } 1221 if (pool->base.hubbub != NULL) { 1222 kfree(pool->base.hubbub); 1223 pool->base.hubbub = NULL; 1224 } 1225 for (i = 0; i < pool->base.pipe_count; i++) { 1226 if (pool->base.dpps[i] != NULL) 1227 dcn30_dpp_destroy(&pool->base.dpps[i]); 1228 1229 if (pool->base.ipps[i] != NULL) 1230 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); 1231 1232 if (pool->base.hubps[i] != NULL) { 1233 kfree(TO_DCN20_HUBP(pool->base.hubps[i])); 1234 pool->base.hubps[i] = NULL; 1235 } 1236 1237 if (pool->base.irqs != NULL) { 1238 dal_irq_service_destroy(&pool->base.irqs); 1239 } 1240 } 1241 1242 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 1243 if (pool->base.engines[i] != NULL) 1244 dce110_engine_destroy(&pool->base.engines[i]); 1245 if (pool->base.hw_i2cs[i] != NULL) { 1246 kfree(pool->base.hw_i2cs[i]); 1247 pool->base.hw_i2cs[i] = NULL; 1248 } 1249 if (pool->base.sw_i2cs[i] != NULL) { 1250 kfree(pool->base.sw_i2cs[i]); 1251 pool->base.sw_i2cs[i] = NULL; 1252 } 1253 } 1254 1255 for (i = 0; i < pool->base.res_cap->num_opp; i++) { 1256 if (pool->base.opps[i] != NULL) 1257 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); 1258 } 1259 1260 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { 1261 if (pool->base.timing_generators[i] != NULL) { 1262 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); 1263 pool->base.timing_generators[i] = NULL; 1264 } 1265 } 1266 1267 for (i = 0; i < pool->base.res_cap->num_dwb; i++) { 1268 if (pool->base.dwbc[i] != NULL) { 1269 kfree(TO_DCN30_DWBC(pool->base.dwbc[i])); 1270 pool->base.dwbc[i] = NULL; 1271 } 1272 if (pool->base.mcif_wb[i] != NULL) { 1273 kfree(TO_DCN30_MMHUBBUB(pool->base.mcif_wb[i])); 1274 pool->base.mcif_wb[i] = NULL; 1275 } 1276 } 1277 1278 for (i = 0; i < pool->base.audio_count; i++) { 1279 if (pool->base.audios[i]) 1280 dce_aud_destroy(&pool->base.audios[i]); 1281 } 1282 1283 for (i = 0; i < pool->base.clk_src_count; i++) { 1284 if (pool->base.clock_sources[i] != NULL) { 1285 dcn20_clock_source_destroy(&pool->base.clock_sources[i]); 1286 pool->base.clock_sources[i] = NULL; 1287 } 1288 } 1289 1290 for (i = 0; i < pool->base.res_cap->num_mpc_3dlut; i++) { 1291 if (pool->base.mpc_lut[i] != NULL) { 1292 dc_3dlut_func_release(pool->base.mpc_lut[i]); 1293 pool->base.mpc_lut[i] = NULL; 1294 } 1295 if (pool->base.mpc_shaper[i] != NULL) { 1296 dc_transfer_func_release(pool->base.mpc_shaper[i]); 1297 pool->base.mpc_shaper[i] = NULL; 1298 } 1299 } 1300 1301 if (pool->base.dp_clock_source != NULL) { 1302 dcn20_clock_source_destroy(&pool->base.dp_clock_source); 1303 pool->base.dp_clock_source = NULL; 1304 } 1305 1306 for (i = 0; i < pool->base.pipe_count; i++) { 1307 if (pool->base.multiple_abms[i] != NULL) 1308 dce_abm_destroy(&pool->base.multiple_abms[i]); 1309 } 1310 1311 if (pool->base.dccg != NULL) 1312 dcn_dccg_destroy(&pool->base.dccg); 1313 } 1314 1315 struct hubp *dcn30_hubp_create( 1316 struct dc_context *ctx, 1317 uint32_t inst) 1318 { 1319 struct dcn20_hubp *hubp2 = 1320 kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); 1321 1322 if (!hubp2) 1323 return NULL; 1324 1325 if (hubp3_construct(hubp2, ctx, inst, 1326 &hubp_regs[inst], &hubp_shift, &hubp_mask)) 1327 return &hubp2->base; 1328 1329 BREAK_TO_DEBUGGER(); 1330 kfree(hubp2); 1331 return NULL; 1332 } 1333 1334 bool dcn30_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) 1335 { 1336 int i; 1337 uint32_t pipe_count = pool->res_cap->num_dwb; 1338 1339 for (i = 0; i < pipe_count; i++) { 1340 struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), 1341 GFP_KERNEL); 1342 1343 if (!dwbc30) { 1344 dm_error("DC: failed to create dwbc30!\n"); 1345 return false; 1346 } 1347 1348 dcn30_dwbc_construct(dwbc30, ctx, 1349 &dwbc30_regs[i], 1350 &dwbc30_shift, 1351 &dwbc30_mask, 1352 i); 1353 1354 pool->dwbc[i] = &dwbc30->base; 1355 } 1356 return true; 1357 } 1358 1359 bool dcn30_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) 1360 { 1361 int i; 1362 uint32_t pipe_count = pool->res_cap->num_dwb; 1363 1364 for (i = 0; i < pipe_count; i++) { 1365 struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), 1366 GFP_KERNEL); 1367 1368 if (!mcif_wb30) { 1369 dm_error("DC: failed to create mcif_wb30!\n"); 1370 return false; 1371 } 1372 1373 dcn30_mmhubbub_construct(mcif_wb30, ctx, 1374 &mcif_wb30_regs[i], 1375 &mcif_wb30_shift, 1376 &mcif_wb30_mask, 1377 i); 1378 1379 pool->mcif_wb[i] = &mcif_wb30->base; 1380 } 1381 return true; 1382 } 1383 1384 static struct display_stream_compressor *dcn30_dsc_create( 1385 struct dc_context *ctx, uint32_t inst) 1386 { 1387 struct dcn20_dsc *dsc = 1388 kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); 1389 1390 if (!dsc) { 1391 BREAK_TO_DEBUGGER(); 1392 return NULL; 1393 } 1394 1395 dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); 1396 return &dsc->base; 1397 } 1398 1399 enum dc_status dcn30_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream) 1400 { 1401 1402 return dcn20_add_stream_to_ctx(dc, new_ctx, dc_stream); 1403 } 1404 1405 static void dcn30_destroy_resource_pool(struct resource_pool **pool) 1406 { 1407 struct dcn30_resource_pool *dcn30_pool = TO_DCN30_RES_POOL(*pool); 1408 1409 dcn30_resource_destruct(dcn30_pool); 1410 kfree(dcn30_pool); 1411 *pool = NULL; 1412 } 1413 1414 static struct clock_source *dcn30_clock_source_create( 1415 struct dc_context *ctx, 1416 struct dc_bios *bios, 1417 enum clock_source_id id, 1418 const struct dce110_clk_src_regs *regs, 1419 bool dp_clk_src) 1420 { 1421 struct dce110_clk_src *clk_src = 1422 kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); 1423 1424 if (!clk_src) 1425 return NULL; 1426 1427 if (dcn3_clk_src_construct(clk_src, ctx, bios, id, 1428 regs, &cs_shift, &cs_mask)) { 1429 clk_src->base.dp_clk_src = dp_clk_src; 1430 return &clk_src->base; 1431 } 1432 1433 BREAK_TO_DEBUGGER(); 1434 return NULL; 1435 } 1436 1437 int dcn30_populate_dml_pipes_from_context( 1438 struct dc *dc, struct dc_state *context, 1439 display_e2e_pipe_params_st *pipes) 1440 { 1441 int i, pipe_cnt; 1442 struct resource_context *res_ctx = &context->res_ctx; 1443 1444 dcn20_populate_dml_pipes_from_context(dc, context, pipes); 1445 1446 for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { 1447 if (!res_ctx->pipe_ctx[i].stream) 1448 continue; 1449 1450 pipes[pipe_cnt++].pipe.scale_ratio_depth.lb_depth = 1451 dm_lb_16; 1452 } 1453 1454 return pipe_cnt; 1455 } 1456 1457 void dcn30_populate_dml_writeback_from_context( 1458 struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) 1459 { 1460 int pipe_cnt, i, j; 1461 double max_calc_writeback_dispclk; 1462 double writeback_dispclk; 1463 struct writeback_st dout_wb; 1464 1465 for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { 1466 struct dc_stream_state *stream = res_ctx->pipe_ctx[i].stream; 1467 1468 if (!stream) 1469 continue; 1470 max_calc_writeback_dispclk = 0; 1471 1472 /* Set writeback information */ 1473 pipes[pipe_cnt].dout.wb_enable = 0; 1474 pipes[pipe_cnt].dout.num_active_wb = 0; 1475 for (j = 0; j < stream->num_wb_info; j++) { 1476 struct dc_writeback_info *wb_info = &stream->writeback_info[j]; 1477 1478 if (wb_info->wb_enabled && wb_info->writeback_source_plane && 1479 (wb_info->writeback_source_plane == res_ctx->pipe_ctx[i].plane_state)) { 1480 pipes[pipe_cnt].dout.wb_enable = 1; 1481 pipes[pipe_cnt].dout.num_active_wb++; 1482 dout_wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_en ? 1483 wb_info->dwb_params.cnv_params.crop_height : 1484 wb_info->dwb_params.cnv_params.src_height; 1485 dout_wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_en ? 1486 wb_info->dwb_params.cnv_params.crop_width : 1487 wb_info->dwb_params.cnv_params.src_width; 1488 dout_wb.wb_dst_width = wb_info->dwb_params.dest_width; 1489 dout_wb.wb_dst_height = wb_info->dwb_params.dest_height; 1490 1491 /* For IP that doesn't support WB scaling, set h/v taps to 1 to avoid DML validation failure */ 1492 if (dc->dml.ip.writeback_max_hscl_taps > 1) { 1493 dout_wb.wb_htaps_luma = wb_info->dwb_params.scaler_taps.h_taps; 1494 dout_wb.wb_vtaps_luma = wb_info->dwb_params.scaler_taps.v_taps; 1495 } else { 1496 dout_wb.wb_htaps_luma = 1; 1497 dout_wb.wb_vtaps_luma = 1; 1498 } 1499 dout_wb.wb_htaps_chroma = 0; 1500 dout_wb.wb_vtaps_chroma = 0; 1501 dout_wb.wb_hratio = wb_info->dwb_params.cnv_params.crop_en ? 1502 (double)wb_info->dwb_params.cnv_params.crop_width / 1503 (double)wb_info->dwb_params.dest_width : 1504 (double)wb_info->dwb_params.cnv_params.src_width / 1505 (double)wb_info->dwb_params.dest_width; 1506 dout_wb.wb_vratio = wb_info->dwb_params.cnv_params.crop_en ? 1507 (double)wb_info->dwb_params.cnv_params.crop_height / 1508 (double)wb_info->dwb_params.dest_height : 1509 (double)wb_info->dwb_params.cnv_params.src_height / 1510 (double)wb_info->dwb_params.dest_height; 1511 if (wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB || 1512 wb_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA) 1513 dout_wb.wb_pixel_format = dm_444_64; 1514 else 1515 dout_wb.wb_pixel_format = dm_444_32; 1516 1517 /* Workaround for cases where multiple writebacks are connected to same plane 1518 * In which case, need to compute worst case and set the associated writeback parameters 1519 * This workaround is necessary due to DML computation assuming only 1 set of writeback 1520 * parameters per pipe 1521 */ 1522 writeback_dispclk = dml30_CalculateWriteBackDISPCLK( 1523 dout_wb.wb_pixel_format, 1524 pipes[pipe_cnt].pipe.dest.pixel_rate_mhz, 1525 dout_wb.wb_hratio, 1526 dout_wb.wb_vratio, 1527 dout_wb.wb_htaps_luma, 1528 dout_wb.wb_vtaps_luma, 1529 dout_wb.wb_src_width, 1530 dout_wb.wb_dst_width, 1531 pipes[pipe_cnt].pipe.dest.htotal, 1532 dc->current_state->bw_ctx.dml.ip.writeback_line_buffer_buffer_size); 1533 1534 if (writeback_dispclk > max_calc_writeback_dispclk) { 1535 max_calc_writeback_dispclk = writeback_dispclk; 1536 pipes[pipe_cnt].dout.wb = dout_wb; 1537 } 1538 } 1539 } 1540 1541 pipe_cnt++; 1542 } 1543 1544 } 1545 1546 unsigned int dcn30_calc_max_scaled_time( 1547 unsigned int time_per_pixel, 1548 enum mmhubbub_wbif_mode mode, 1549 unsigned int urgent_watermark) 1550 { 1551 unsigned int time_per_byte = 0; 1552 unsigned int total_free_entry = 0xb40; 1553 unsigned int buf_lh_capability; 1554 unsigned int max_scaled_time; 1555 1556 if (mode == PACKED_444) /* packed mode 32 bpp */ 1557 time_per_byte = time_per_pixel/4; 1558 else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ 1559 time_per_byte = time_per_pixel/8; 1560 1561 if (time_per_byte == 0) 1562 time_per_byte = 1; 1563 1564 buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ 1565 max_scaled_time = buf_lh_capability - urgent_watermark; 1566 return max_scaled_time; 1567 } 1568 1569 void dcn30_set_mcif_arb_params( 1570 struct dc *dc, 1571 struct dc_state *context, 1572 display_e2e_pipe_params_st *pipes, 1573 int pipe_cnt) 1574 { 1575 enum mmhubbub_wbif_mode wbif_mode; 1576 struct display_mode_lib *dml = &context->bw_ctx.dml; 1577 struct mcif_arb_params *wb_arb_params; 1578 int i, j, k, dwb_pipe; 1579 1580 /* Writeback MCIF_WB arbitration parameters */ 1581 dwb_pipe = 0; 1582 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1583 1584 if (!context->res_ctx.pipe_ctx[i].stream) 1585 continue; 1586 1587 for (j = 0; j < MAX_DWB_PIPES; j++) { 1588 struct dc_writeback_info *writeback_info = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j]; 1589 1590 if (writeback_info->wb_enabled == false) 1591 continue; 1592 1593 //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params; 1594 wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe]; 1595 1596 if (writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_ARGB || 1597 writeback_info->dwb_params.cnv_params.fc_out_format == DWB_OUT_FORMAT_64BPP_RGBA) 1598 wbif_mode = PACKED_444_FP16; 1599 else 1600 wbif_mode = PACKED_444; 1601 1602 for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) { 1603 wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(dml, pipes, pipe_cnt) * 1000; 1604 wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(dml, pipes, pipe_cnt) * 1000; 1605 } 1606 wb_arb_params->time_per_pixel = (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; /* time_per_pixel should be in u6.6 format */ 1607 wb_arb_params->slice_lines = 32; 1608 wb_arb_params->arbitration_slice = 2; /* irrelevant since there is no YUV output */ 1609 wb_arb_params->max_scaled_time = dcn30_calc_max_scaled_time(wb_arb_params->time_per_pixel, 1610 wbif_mode, 1611 wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */ 1612 wb_arb_params->dram_speed_change_duration = dml->vba.WritebackAllowDRAMClockChangeEndPosition[j] * pipes[0].clks_cfg.refclk_mhz; /* num_clock_cycles = us * MHz */ 1613 1614 dwb_pipe++; 1615 1616 if (dwb_pipe >= MAX_DWB_PIPES) 1617 return; 1618 } 1619 if (dwb_pipe >= MAX_DWB_PIPES) 1620 return; 1621 } 1622 1623 } 1624 1625 static struct dc_cap_funcs cap_funcs = { 1626 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap 1627 }; 1628 1629 bool dcn30_acquire_post_bldn_3dlut( 1630 struct resource_context *res_ctx, 1631 const struct resource_pool *pool, 1632 int mpcc_id, 1633 struct dc_3dlut **lut, 1634 struct dc_transfer_func **shaper) 1635 { 1636 int i; 1637 bool ret = false; 1638 union dc_3dlut_state *state; 1639 1640 ASSERT(*lut == NULL && *shaper == NULL); 1641 *lut = NULL; 1642 *shaper = NULL; 1643 1644 for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { 1645 if (!res_ctx->is_mpc_3dlut_acquired[i]) { 1646 *lut = pool->mpc_lut[i]; 1647 *shaper = pool->mpc_shaper[i]; 1648 state = &pool->mpc_lut[i]->state; 1649 res_ctx->is_mpc_3dlut_acquired[i] = true; 1650 state->bits.rmu_idx_valid = 1; 1651 state->bits.rmu_mux_num = i; 1652 if (state->bits.rmu_mux_num == 0) 1653 state->bits.mpc_rmu0_mux = mpcc_id; 1654 else if (state->bits.rmu_mux_num == 1) 1655 state->bits.mpc_rmu1_mux = mpcc_id; 1656 else if (state->bits.rmu_mux_num == 2) 1657 state->bits.mpc_rmu2_mux = mpcc_id; 1658 ret = true; 1659 break; 1660 } 1661 } 1662 return ret; 1663 } 1664 1665 bool dcn30_release_post_bldn_3dlut( 1666 struct resource_context *res_ctx, 1667 const struct resource_pool *pool, 1668 struct dc_3dlut **lut, 1669 struct dc_transfer_func **shaper) 1670 { 1671 int i; 1672 bool ret = false; 1673 1674 for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { 1675 if (pool->mpc_lut[i] == *lut && pool->mpc_shaper[i] == *shaper) { 1676 res_ctx->is_mpc_3dlut_acquired[i] = false; 1677 pool->mpc_lut[i]->state.raw = 0; 1678 *lut = NULL; 1679 *shaper = NULL; 1680 ret = true; 1681 break; 1682 } 1683 } 1684 return ret; 1685 } 1686 1687 #define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) 1688 #define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) 1689 1690 static bool is_soc_bounding_box_valid(struct dc *dc) 1691 { 1692 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 1693 1694 if (ASICREV_IS_SIENNA_CICHLID_P(hw_internal_rev)) 1695 return true; 1696 1697 return false; 1698 } 1699 1700 static bool init_soc_bounding_box(struct dc *dc, 1701 struct dcn30_resource_pool *pool) 1702 { 1703 const struct gpu_info_soc_bounding_box_v1_0 *bb = dc->soc_bounding_box; 1704 struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_0_soc; 1705 struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_0_ip; 1706 1707 DC_LOGGER_INIT(dc->ctx->logger); 1708 1709 if (!bb && !is_soc_bounding_box_valid(dc)) { 1710 DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); 1711 return false; 1712 } 1713 1714 if (bb && !is_soc_bounding_box_valid(dc)) { 1715 int i; 1716 1717 dcn3_0_soc.sr_exit_time_us = 1718 fixed16_to_double_to_cpu(bb->sr_exit_time_us); 1719 dcn3_0_soc.sr_enter_plus_exit_time_us = 1720 fixed16_to_double_to_cpu(bb->sr_enter_plus_exit_time_us); 1721 dcn3_0_soc.urgent_latency_us = 1722 fixed16_to_double_to_cpu(bb->urgent_latency_us); 1723 dcn3_0_soc.urgent_latency_pixel_data_only_us = 1724 fixed16_to_double_to_cpu(bb->urgent_latency_pixel_data_only_us); 1725 dcn3_0_soc.urgent_latency_pixel_mixed_with_vm_data_us = 1726 fixed16_to_double_to_cpu(bb->urgent_latency_pixel_mixed_with_vm_data_us); 1727 dcn3_0_soc.urgent_latency_vm_data_only_us = 1728 fixed16_to_double_to_cpu(bb->urgent_latency_vm_data_only_us); 1729 dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_only_bytes = 1730 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_only_bytes); 1731 dcn3_0_soc.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 1732 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes); 1733 dcn3_0_soc.urgent_out_of_order_return_per_channel_vm_only_bytes = 1734 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_vm_only_bytes); 1735 dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 1736 fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_only); 1737 dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 1738 fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm); 1739 dcn3_0_soc.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 1740 fixed16_to_double_to_cpu(bb->pct_ideal_dram_sdp_bw_after_urgent_vm_only); 1741 dcn3_0_soc.max_avg_sdp_bw_use_normal_percent = 1742 fixed16_to_double_to_cpu(bb->max_avg_sdp_bw_use_normal_percent); 1743 dcn3_0_soc.max_avg_dram_bw_use_normal_percent = 1744 fixed16_to_double_to_cpu(bb->max_avg_dram_bw_use_normal_percent); 1745 dcn3_0_soc.writeback_latency_us = 1746 fixed16_to_double_to_cpu(bb->writeback_latency_us); 1747 dcn3_0_soc.ideal_dram_bw_after_urgent_percent = 1748 fixed16_to_double_to_cpu(bb->ideal_dram_bw_after_urgent_percent); 1749 dcn3_0_soc.max_request_size_bytes = 1750 le32_to_cpu(bb->max_request_size_bytes); 1751 dcn3_0_soc.dram_channel_width_bytes = 1752 le32_to_cpu(bb->dram_channel_width_bytes); 1753 dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes = 1754 le32_to_cpu(bb->fabric_datapath_to_dcn_data_return_bytes); 1755 dcn3_0_soc.dcn_downspread_percent = 1756 fixed16_to_double_to_cpu(bb->dcn_downspread_percent); 1757 dcn3_0_soc.downspread_percent = 1758 fixed16_to_double_to_cpu(bb->downspread_percent); 1759 dcn3_0_soc.dram_page_open_time_ns = 1760 fixed16_to_double_to_cpu(bb->dram_page_open_time_ns); 1761 dcn3_0_soc.dram_rw_turnaround_time_ns = 1762 fixed16_to_double_to_cpu(bb->dram_rw_turnaround_time_ns); 1763 dcn3_0_soc.dram_return_buffer_per_channel_bytes = 1764 le32_to_cpu(bb->dram_return_buffer_per_channel_bytes); 1765 dcn3_0_soc.round_trip_ping_latency_dcfclk_cycles = 1766 le32_to_cpu(bb->round_trip_ping_latency_dcfclk_cycles); 1767 dcn3_0_soc.urgent_out_of_order_return_per_channel_bytes = 1768 le32_to_cpu(bb->urgent_out_of_order_return_per_channel_bytes); 1769 dcn3_0_soc.channel_interleave_bytes = 1770 le32_to_cpu(bb->channel_interleave_bytes); 1771 dcn3_0_soc.num_banks = 1772 le32_to_cpu(bb->num_banks); 1773 dcn3_0_soc.num_chans = 1774 le32_to_cpu(bb->num_chans); 1775 dcn3_0_soc.gpuvm_min_page_size_bytes = 1776 le32_to_cpu(bb->vmm_page_size_bytes); 1777 dcn3_0_soc.dram_clock_change_latency_us = 1778 fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us); 1779 dcn3_0_soc.writeback_dram_clock_change_latency_us = 1780 fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us); 1781 dcn3_0_soc.return_bus_width_bytes = 1782 le32_to_cpu(bb->return_bus_width_bytes); 1783 dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = 1784 le32_to_cpu(bb->dispclk_dppclk_vco_speed_mhz); 1785 dcn3_0_soc.xfc_bus_transport_time_us = 1786 le32_to_cpu(bb->xfc_bus_transport_time_us); 1787 dcn3_0_soc.xfc_xbuf_latency_tolerance_us = 1788 le32_to_cpu(bb->xfc_xbuf_latency_tolerance_us); 1789 dcn3_0_soc.use_urgent_burst_bw = 1790 le32_to_cpu(bb->use_urgent_burst_bw); 1791 dcn3_0_soc.num_states = 1792 le32_to_cpu(bb->num_states); 1793 1794 for (i = 0; i < dcn3_0_soc.num_states; i++) { 1795 dcn3_0_soc.clock_limits[i].state = 1796 le32_to_cpu(bb->clock_limits[i].state); 1797 dcn3_0_soc.clock_limits[i].dcfclk_mhz = 1798 fixed16_to_double_to_cpu(bb->clock_limits[i].dcfclk_mhz); 1799 dcn3_0_soc.clock_limits[i].fabricclk_mhz = 1800 fixed16_to_double_to_cpu(bb->clock_limits[i].fabricclk_mhz); 1801 dcn3_0_soc.clock_limits[i].dispclk_mhz = 1802 fixed16_to_double_to_cpu(bb->clock_limits[i].dispclk_mhz); 1803 dcn3_0_soc.clock_limits[i].dppclk_mhz = 1804 fixed16_to_double_to_cpu(bb->clock_limits[i].dppclk_mhz); 1805 dcn3_0_soc.clock_limits[i].phyclk_mhz = 1806 fixed16_to_double_to_cpu(bb->clock_limits[i].phyclk_mhz); 1807 dcn3_0_soc.clock_limits[i].socclk_mhz = 1808 fixed16_to_double_to_cpu(bb->clock_limits[i].socclk_mhz); 1809 dcn3_0_soc.clock_limits[i].dscclk_mhz = 1810 fixed16_to_double_to_cpu(bb->clock_limits[i].dscclk_mhz); 1811 dcn3_0_soc.clock_limits[i].dram_speed_mts = 1812 fixed16_to_double_to_cpu(bb->clock_limits[i].dram_speed_mts); 1813 } 1814 } 1815 1816 loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; 1817 loaded_ip->max_num_dpp = pool->base.pipe_count; 1818 loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; 1819 dcn20_patch_bounding_box(dc, loaded_bb); 1820 return true; 1821 } 1822 1823 static bool dcn30_split_stream_for_mpc_or_odm( 1824 const struct dc *dc, 1825 struct resource_context *res_ctx, 1826 struct pipe_ctx *pri_pipe, 1827 struct pipe_ctx *sec_pipe, 1828 bool odm) 1829 { 1830 int pipe_idx = sec_pipe->pipe_idx; 1831 const struct resource_pool *pool = dc->res_pool; 1832 1833 *sec_pipe = *pri_pipe; 1834 1835 sec_pipe->pipe_idx = pipe_idx; 1836 sec_pipe->plane_res.mi = pool->mis[pipe_idx]; 1837 sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; 1838 sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; 1839 sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; 1840 sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; 1841 sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; 1842 sec_pipe->stream_res.dsc = NULL; 1843 if (odm) { 1844 if (pri_pipe->next_odm_pipe) { 1845 ASSERT(pri_pipe->next_odm_pipe != sec_pipe); 1846 sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; 1847 sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; 1848 } 1849 pri_pipe->next_odm_pipe = sec_pipe; 1850 sec_pipe->prev_odm_pipe = pri_pipe; 1851 ASSERT(sec_pipe->top_pipe == NULL); 1852 1853 sec_pipe->stream_res.opp = pool->opps[pipe_idx]; 1854 if (sec_pipe->stream->timing.flags.DSC == 1) { 1855 dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); 1856 ASSERT(sec_pipe->stream_res.dsc); 1857 if (sec_pipe->stream_res.dsc == NULL) 1858 return false; 1859 } 1860 } else { 1861 if (pri_pipe->bottom_pipe) { 1862 ASSERT(pri_pipe->bottom_pipe != sec_pipe); 1863 sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; 1864 sec_pipe->bottom_pipe->top_pipe = sec_pipe; 1865 } 1866 pri_pipe->bottom_pipe = sec_pipe; 1867 sec_pipe->top_pipe = pri_pipe; 1868 1869 ASSERT(pri_pipe->plane_state); 1870 } 1871 1872 return true; 1873 } 1874 1875 static bool dcn30_internal_validate_bw( 1876 struct dc *dc, 1877 struct dc_state *context, 1878 display_e2e_pipe_params_st *pipes, 1879 int *pipe_cnt_out, 1880 int *vlevel_out, 1881 bool fast_validate) 1882 { 1883 bool out = false; 1884 bool repopulate_pipes = false; 1885 int split[MAX_PIPES] = { 0 }; 1886 bool merge[MAX_PIPES] = { false }; 1887 bool newly_split[MAX_PIPES] = { false }; 1888 int pipe_cnt, i, pipe_idx, vlevel; 1889 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 1890 1891 ASSERT(pipes); 1892 if (!pipes) 1893 return false; 1894 1895 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); 1896 1897 if (!pipe_cnt) { 1898 out = true; 1899 goto validate_out; 1900 } 1901 1902 dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); 1903 1904 if (!fast_validate) { 1905 /* 1906 * DML favors voltage over p-state, but we're more interested in 1907 * supporting p-state over voltage. We can't support p-state in 1908 * prefetch mode > 0 so try capping the prefetch mode to start. 1909 */ 1910 context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 1911 dm_allow_self_refresh_and_mclk_switch; 1912 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1913 /* This may adjust vlevel and maxMpcComb */ 1914 if (vlevel < context->bw_ctx.dml.soc.num_states) 1915 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); 1916 } 1917 if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || 1918 vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { 1919 /* 1920 * If mode is unsupported or there's still no p-state support then 1921 * fall back to favoring voltage. 1922 * 1923 * We don't actually support prefetch mode 2, so require that we 1924 * at least support prefetch mode 1. 1925 */ 1926 context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 1927 dm_allow_self_refresh; 1928 1929 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1930 if (vlevel < context->bw_ctx.dml.soc.num_states) { 1931 memset(split, 0, sizeof(split)); 1932 memset(merge, 0, sizeof(merge)); 1933 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); 1934 } 1935 } 1936 1937 dml_log_mode_support_params(&context->bw_ctx.dml); 1938 1939 /* TODO: Need to check calculated vlevel why that fails validation of below resolutions */ 1940 if (context->res_ctx.pipe_ctx[0].stream != NULL) { 1941 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 640 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 480) 1942 vlevel = 0; 1943 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 800) 1944 vlevel = 0; 1945 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 768) 1946 vlevel = 0; 1947 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 1280 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 1024) 1948 vlevel = 0; 1949 if (context->res_ctx.pipe_ctx[0].stream->timing.h_addressable == 2048 && context->res_ctx.pipe_ctx[0].stream->timing.v_addressable == 1536) 1950 vlevel = 0; 1951 } 1952 1953 if (vlevel == context->bw_ctx.dml.soc.num_states) 1954 goto validate_fail; 1955 1956 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1957 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1958 struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; 1959 1960 if (!pipe->stream) 1961 continue; 1962 1963 /* We only support full screen mpo with ODM */ 1964 if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled 1965 && pipe->plane_state && mpo_pipe 1966 && memcmp(&mpo_pipe->plane_res.scl_data.recout, 1967 &pipe->plane_res.scl_data.recout, 1968 sizeof(struct rect)) != 0) { 1969 ASSERT(mpo_pipe->plane_state != pipe->plane_state); 1970 goto validate_fail; 1971 } 1972 pipe_idx++; 1973 } 1974 1975 /* merge pipes if necessary */ 1976 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1977 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1978 1979 /*skip pipes that don't need merging*/ 1980 if (!merge[i]) 1981 continue; 1982 1983 /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ 1984 if (pipe->prev_odm_pipe) { 1985 /*split off odm pipe*/ 1986 pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; 1987 if (pipe->next_odm_pipe) 1988 pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; 1989 1990 pipe->bottom_pipe = NULL; 1991 pipe->next_odm_pipe = NULL; 1992 pipe->plane_state = NULL; 1993 pipe->stream = NULL; 1994 pipe->top_pipe = NULL; 1995 pipe->prev_odm_pipe = NULL; 1996 if (pipe->stream_res.dsc) 1997 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); 1998 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 1999 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 2000 } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { 2001 struct pipe_ctx *top_pipe = pipe->top_pipe; 2002 struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; 2003 2004 top_pipe->bottom_pipe = bottom_pipe; 2005 if (bottom_pipe) 2006 bottom_pipe->top_pipe = top_pipe; 2007 2008 pipe->top_pipe = NULL; 2009 pipe->bottom_pipe = NULL; 2010 pipe->plane_state = NULL; 2011 pipe->stream = NULL; 2012 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 2013 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 2014 } else 2015 ASSERT(0); /* Should never try to merge master pipe */ 2016 2017 } 2018 2019 for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { 2020 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2021 struct pipe_ctx *hsplit_pipe = NULL; 2022 bool odm; 2023 2024 if (!pipe->stream || newly_split[i]) 2025 continue; 2026 2027 pipe_idx++; 2028 odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; 2029 2030 if (!pipe->plane_state && !odm) 2031 continue; 2032 2033 if (split[i]) { 2034 hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); 2035 ASSERT(hsplit_pipe); 2036 if (!hsplit_pipe) 2037 goto validate_fail; 2038 2039 if (!dcn30_split_stream_for_mpc_or_odm( 2040 dc, &context->res_ctx, 2041 pipe, hsplit_pipe, odm)) 2042 goto validate_fail; 2043 2044 newly_split[hsplit_pipe->pipe_idx] = true; 2045 repopulate_pipes = true; 2046 } 2047 if (split[i] == 4) { 2048 struct pipe_ctx *pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); 2049 2050 ASSERT(pipe_4to1); 2051 if (!pipe_4to1) 2052 goto validate_fail; 2053 if (!dcn30_split_stream_for_mpc_or_odm( 2054 dc, &context->res_ctx, 2055 pipe, pipe_4to1, odm)) 2056 goto validate_fail; 2057 newly_split[pipe_4to1->pipe_idx] = true; 2058 2059 pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); 2060 ASSERT(pipe_4to1); 2061 if (!pipe_4to1) 2062 goto validate_fail; 2063 if (!dcn30_split_stream_for_mpc_or_odm( 2064 dc, &context->res_ctx, 2065 hsplit_pipe, pipe_4to1, odm)) 2066 goto validate_fail; 2067 newly_split[pipe_4to1->pipe_idx] = true; 2068 } 2069 if (odm) 2070 dcn20_build_mapped_resource(dc, context, pipe->stream); 2071 } 2072 2073 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2074 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2075 2076 if (pipe->plane_state) { 2077 if (!resource_build_scaling_params(pipe)) 2078 goto validate_fail; 2079 } 2080 } 2081 2082 /* Actual dsc count per stream dsc validation*/ 2083 if (!dcn20_validate_dsc(dc, context)) { 2084 vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; 2085 goto validate_fail; 2086 } 2087 2088 if (repopulate_pipes) 2089 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes); 2090 *vlevel_out = vlevel; 2091 *pipe_cnt_out = pipe_cnt; 2092 2093 out = true; 2094 goto validate_out; 2095 2096 validate_fail: 2097 out = false; 2098 2099 validate_out: 2100 return out; 2101 } 2102 2103 static void dcn30_calculate_wm( 2104 struct dc *dc, struct dc_state *context, 2105 display_e2e_pipe_params_st *pipes, 2106 int pipe_cnt, 2107 int vlevel) 2108 { 2109 int i, pipe_idx; 2110 double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 2111 2112 if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) 2113 dcfclk = context->bw_ctx.dml.soc.min_dcfclk; 2114 2115 pipes[0].clks_cfg.voltage = vlevel; 2116 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 2117 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; 2118 2119 /* Set B: 2120 * DCFCLK: 1GHz or min required above 1GHz 2121 * FCLK/UCLK: Max 2122 */ 2123 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { 2124 if (vlevel == 0) { 2125 pipes[0].clks_cfg.voltage = 1; 2126 pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz; 2127 } 2128 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; 2129 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; 2130 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; 2131 } 2132 context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2133 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2134 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2135 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2136 context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2137 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2138 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2139 context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2140 2141 pipes[0].clks_cfg.voltage = vlevel; 2142 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 2143 2144 /* Set C: 2145 * DCFCLK: Min Required 2146 * FCLK(proportional to UCLK): 1GHz or Max 2147 * pstate latency overriden to 5us 2148 */ 2149 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { 2150 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us; 2151 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; 2152 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; 2153 } 2154 context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2155 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2156 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2157 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2158 context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2159 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2160 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2161 context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2162 2163 /* Set D: 2164 * DCFCLK: Min Required 2165 * FCLK(proportional to UCLK): 1GHz or Max 2166 * sr_enter_exit = 4, sr_exit = 2us 2167 */ 2168 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { 2169 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; 2170 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; 2171 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; 2172 } 2173 context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2174 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2175 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2176 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2177 context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2178 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2179 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2180 context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2181 2182 /* Set A: 2183 * DCFCLK: Min Required 2184 * FCLK(proportional to UCLK): 1GHz or Max 2185 * 2186 * Set A calculated last so that following calculations are based on Set A 2187 */ 2188 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { 2189 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; 2190 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; 2191 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; 2192 } 2193 context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2194 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2195 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2196 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2197 context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2198 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2199 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2200 context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2201 2202 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 2203 if (!context->res_ctx.pipe_ctx[i].stream) 2204 continue; 2205 2206 pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); 2207 pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 2208 2209 if (dc->config.forced_clocks) { 2210 pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; 2211 pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; 2212 } 2213 if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) 2214 pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; 2215 if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) 2216 pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; 2217 2218 pipe_idx++; 2219 } 2220 } 2221 2222 bool dcn30_validate_bandwidth(struct dc *dc, 2223 struct dc_state *context, 2224 bool fast_validate) 2225 { 2226 bool out = false; 2227 2228 BW_VAL_TRACE_SETUP(); 2229 2230 int vlevel = 0; 2231 int pipe_cnt = 0; 2232 display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); 2233 DC_LOGGER_INIT(dc->ctx->logger); 2234 2235 BW_VAL_TRACE_COUNT(); 2236 2237 out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); 2238 2239 if (pipe_cnt == 0) 2240 goto validate_out; 2241 2242 if (!out) 2243 goto validate_fail; 2244 2245 BW_VAL_TRACE_END_VOLTAGE_LEVEL(); 2246 2247 if (fast_validate) { 2248 BW_VAL_TRACE_SKIP(fast); 2249 goto validate_out; 2250 } 2251 2252 dcn30_calculate_wm(dc, context, pipes, pipe_cnt, vlevel); 2253 dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); 2254 2255 BW_VAL_TRACE_END_WATERMARKS(); 2256 2257 goto validate_out; 2258 2259 validate_fail: 2260 DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", 2261 dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); 2262 2263 BW_VAL_TRACE_SKIP(fail); 2264 out = false; 2265 2266 validate_out: 2267 kfree(pipes); 2268 2269 BW_VAL_TRACE_FINISH(); 2270 2271 return out; 2272 } 2273 2274 static void get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, 2275 unsigned int *optimal_dcfclk, 2276 unsigned int *optimal_fclk) 2277 { 2278 double bw_from_dram, bw_from_dram1, bw_from_dram2; 2279 2280 bw_from_dram1 = uclk_mts * dcn3_0_soc.num_chans * 2281 dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_dram_bw_use_normal_percent / 100); 2282 bw_from_dram2 = uclk_mts * dcn3_0_soc.num_chans * 2283 dcn3_0_soc.dram_channel_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100); 2284 2285 bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; 2286 2287 if (optimal_fclk) 2288 *optimal_fclk = bw_from_dram / 2289 (dcn3_0_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100)); 2290 2291 if (optimal_dcfclk) 2292 *optimal_dcfclk = bw_from_dram / 2293 (dcn3_0_soc.return_bus_width_bytes * (dcn3_0_soc.max_avg_sdp_bw_use_normal_percent / 100)); 2294 } 2295 2296 static void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 2297 { 2298 unsigned int i, j; 2299 unsigned int num_states = 0; 2300 2301 unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; 2302 unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; 2303 unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; 2304 unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; 2305 2306 unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; 2307 unsigned int num_dcfclk_sta_targets = 4; 2308 unsigned int num_uclk_states; 2309 2310 if (dc->ctx->dc_bios->vram_info.num_chans) 2311 dcn3_0_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; 2312 2313 if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) 2314 dcn3_0_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; 2315 2316 dcn3_0_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; 2317 dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; 2318 2319 if (bw_params->clk_table.entries[0].memclk_mhz) { 2320 2321 if (bw_params->clk_table.entries[1].dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { 2322 // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array 2323 dcfclk_sta_targets[num_dcfclk_sta_targets] = bw_params->clk_table.entries[1].dcfclk_mhz; 2324 num_dcfclk_sta_targets++; 2325 } else if (bw_params->clk_table.entries[1].dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { 2326 // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates 2327 for (i = 0; i < num_dcfclk_sta_targets; i++) { 2328 if (dcfclk_sta_targets[i] > bw_params->clk_table.entries[1].dcfclk_mhz) { 2329 dcfclk_sta_targets[i] = bw_params->clk_table.entries[1].dcfclk_mhz; 2330 break; 2331 } 2332 } 2333 // Update size of array since we "removed" duplicates 2334 num_dcfclk_sta_targets = i + 1; 2335 } 2336 2337 num_uclk_states = bw_params->clk_table.num_entries; 2338 2339 // Calculate optimal dcfclk for each uclk 2340 for (i = 0; i < num_uclk_states; i++) { 2341 get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, 2342 &optimal_dcfclk_for_uclk[i], NULL); 2343 if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { 2344 optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; 2345 } 2346 } 2347 2348 // Calculate optimal uclk for each dcfclk sta target 2349 for (i = 0; i < num_dcfclk_sta_targets; i++) { 2350 for (j = 0; j < num_uclk_states; j++) { 2351 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { 2352 optimal_uclk_for_dcfclk_sta_targets[i] = 2353 bw_params->clk_table.entries[j].memclk_mhz * 16; 2354 break; 2355 } 2356 } 2357 } 2358 2359 i = 0; 2360 j = 0; 2361 // create the final dcfclk and uclk table 2362 while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { 2363 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { 2364 dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; 2365 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; 2366 } else { 2367 if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) { 2368 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; 2369 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; 2370 } else { 2371 j = num_uclk_states; 2372 } 2373 } 2374 } 2375 2376 while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { 2377 dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; 2378 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; 2379 } 2380 2381 while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && 2382 optimal_dcfclk_for_uclk[j] <= bw_params->clk_table.entries[1].dcfclk_mhz) { 2383 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; 2384 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; 2385 } 2386 2387 for (i = 0; i < dcn3_0_soc.num_states; i++) { 2388 dcn3_0_soc.clock_limits[i].state = i; 2389 dcn3_0_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; 2390 dcn3_0_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; 2391 dcn3_0_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; 2392 2393 /* Fill all states with max values of all other clocks */ 2394 dcn3_0_soc.clock_limits[i].dispclk_mhz = bw_params->clk_table.entries[1].dispclk_mhz; 2395 dcn3_0_soc.clock_limits[i].dppclk_mhz = bw_params->clk_table.entries[1].dppclk_mhz; 2396 dcn3_0_soc.clock_limits[i].phyclk_mhz = bw_params->clk_table.entries[1].phyclk_mhz; 2397 dcn3_0_soc.clock_limits[i].dtbclk_mhz = dcn3_0_soc.clock_limits[0].dtbclk_mhz; 2398 /* These clocks cannot come from bw_params, always fill from dcn3_0_soc[1] */ 2399 /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */ 2400 dcn3_0_soc.clock_limits[i].phyclk_d18_mhz = dcn3_0_soc.clock_limits[0].phyclk_d18_mhz; 2401 dcn3_0_soc.clock_limits[i].socclk_mhz = dcn3_0_soc.clock_limits[0].socclk_mhz; 2402 dcn3_0_soc.clock_limits[i].dscclk_mhz = dcn3_0_soc.clock_limits[0].dscclk_mhz; 2403 } 2404 /* re-init DML with updated bb */ 2405 dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2406 if (dc->current_state) 2407 dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2408 } 2409 2410 /* re-init DML with updated bb */ 2411 dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2412 if (dc->current_state) 2413 dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2414 } 2415 2416 static struct resource_funcs dcn30_res_pool_funcs = { 2417 .destroy = dcn30_destroy_resource_pool, 2418 .link_enc_create = dcn30_link_encoder_create, 2419 .panel_cntl_create = dcn30_panel_cntl_create, 2420 .validate_bandwidth = dcn30_validate_bandwidth, 2421 .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, 2422 .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, 2423 .add_stream_to_ctx = dcn30_add_stream_to_ctx, 2424 .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, 2425 .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, 2426 .set_mcif_arb_params = dcn30_set_mcif_arb_params, 2427 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, 2428 .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, 2429 .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, 2430 .update_bw_bounding_box = dcn30_update_bw_bounding_box, 2431 .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, 2432 }; 2433 2434 static bool dcn30_resource_construct( 2435 uint8_t num_virtual_links, 2436 struct dc *dc, 2437 struct dcn30_resource_pool *pool) 2438 { 2439 int i; 2440 struct dc_context *ctx = dc->ctx; 2441 struct irq_service_init_data init_data; 2442 2443 ctx->dc_bios->regs = &bios_regs; 2444 2445 pool->base.res_cap = &res_cap_dcn3; 2446 2447 pool->base.funcs = &dcn30_res_pool_funcs; 2448 2449 /************************************************* 2450 * Resource + asic cap harcoding * 2451 *************************************************/ 2452 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 2453 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 2454 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 2455 dc->caps.max_downscale_ratio = 600; 2456 dc->caps.i2c_speed_in_khz = 100; 2457 dc->caps.max_cursor_size = 256; 2458 dc->caps.dmdata_alloc_size = 2048; 2459 2460 dc->caps.max_slave_planes = 1; 2461 dc->caps.post_blend_color_processing = true; 2462 dc->caps.force_dp_tps4_for_cp2520 = true; 2463 dc->caps.extended_aux_timeout_support = true; 2464 dc->caps.dmcub_support = true; 2465 2466 /* Color pipeline capabilities */ 2467 dc->caps.color.dpp.dcn_arch = 1; 2468 dc->caps.color.dpp.input_lut_shared = 0; 2469 dc->caps.color.dpp.icsc = 1; 2470 dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr 2471 dc->caps.color.dpp.dgam_rom_caps.srgb = 1; 2472 dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; 2473 dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; 2474 dc->caps.color.dpp.dgam_rom_caps.pq = 1; 2475 dc->caps.color.dpp.dgam_rom_caps.hlg = 1; 2476 dc->caps.color.dpp.post_csc = 1; 2477 dc->caps.color.dpp.gamma_corr = 1; 2478 2479 dc->caps.color.dpp.hw_3d_lut = 1; 2480 dc->caps.color.dpp.ogam_ram = 1; 2481 // no OGAM ROM on DCN3 2482 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; 2483 dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; 2484 dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; 2485 dc->caps.color.dpp.ogam_rom_caps.pq = 0; 2486 dc->caps.color.dpp.ogam_rom_caps.hlg = 0; 2487 dc->caps.color.dpp.ocsc = 0; 2488 2489 dc->caps.color.mpc.gamut_remap = 1; 2490 dc->caps.color.mpc.num_3dluts = pool->base.res_cap->num_mpc_3dlut; //3 2491 dc->caps.color.mpc.ogam_ram = 1; 2492 dc->caps.color.mpc.ogam_rom_caps.srgb = 0; 2493 dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; 2494 dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; 2495 dc->caps.color.mpc.ogam_rom_caps.pq = 0; 2496 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 2497 dc->caps.color.mpc.ocsc = 1; 2498 2499 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) 2500 dc->debug = debug_defaults_drv; 2501 else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { 2502 dc->debug = debug_defaults_diags; 2503 } else 2504 dc->debug = debug_defaults_diags; 2505 // Init the vm_helper 2506 if (dc->vm_helper) 2507 vm_helper_init(dc->vm_helper, 16); 2508 2509 /************************************************* 2510 * Create resources * 2511 *************************************************/ 2512 2513 /* Clock Sources for Pixel Clock*/ 2514 pool->base.clock_sources[DCN30_CLK_SRC_PLL0] = 2515 dcn30_clock_source_create(ctx, ctx->dc_bios, 2516 CLOCK_SOURCE_COMBO_PHY_PLL0, 2517 &clk_src_regs[0], false); 2518 pool->base.clock_sources[DCN30_CLK_SRC_PLL1] = 2519 dcn30_clock_source_create(ctx, ctx->dc_bios, 2520 CLOCK_SOURCE_COMBO_PHY_PLL1, 2521 &clk_src_regs[1], false); 2522 pool->base.clock_sources[DCN30_CLK_SRC_PLL2] = 2523 dcn30_clock_source_create(ctx, ctx->dc_bios, 2524 CLOCK_SOURCE_COMBO_PHY_PLL2, 2525 &clk_src_regs[2], false); 2526 pool->base.clock_sources[DCN30_CLK_SRC_PLL3] = 2527 dcn30_clock_source_create(ctx, ctx->dc_bios, 2528 CLOCK_SOURCE_COMBO_PHY_PLL3, 2529 &clk_src_regs[3], false); 2530 pool->base.clock_sources[DCN30_CLK_SRC_PLL4] = 2531 dcn30_clock_source_create(ctx, ctx->dc_bios, 2532 CLOCK_SOURCE_COMBO_PHY_PLL4, 2533 &clk_src_regs[4], false); 2534 pool->base.clock_sources[DCN30_CLK_SRC_PLL5] = 2535 dcn30_clock_source_create(ctx, ctx->dc_bios, 2536 CLOCK_SOURCE_COMBO_PHY_PLL5, 2537 &clk_src_regs[5], false); 2538 2539 pool->base.clk_src_count = DCN30_CLK_SRC_TOTAL; 2540 2541 /* todo: not reuse phy_pll registers */ 2542 pool->base.dp_clock_source = 2543 dcn30_clock_source_create(ctx, ctx->dc_bios, 2544 CLOCK_SOURCE_ID_DP_DTO, 2545 &clk_src_regs[0], true); 2546 2547 for (i = 0; i < pool->base.clk_src_count; i++) { 2548 if (pool->base.clock_sources[i] == NULL) { 2549 dm_error("DC: failed to create clock sources!\n"); 2550 BREAK_TO_DEBUGGER(); 2551 goto create_fail; 2552 } 2553 } 2554 2555 /* DCCG */ 2556 pool->base.dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); 2557 if (pool->base.dccg == NULL) { 2558 dm_error("DC: failed to create dccg!\n"); 2559 BREAK_TO_DEBUGGER(); 2560 goto create_fail; 2561 } 2562 2563 /* PP Lib and SMU interfaces */ 2564 init_soc_bounding_box(dc, pool); 2565 2566 dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30); 2567 2568 /* IRQ */ 2569 init_data.ctx = dc->ctx; 2570 pool->base.irqs = dal_irq_service_dcn30_create(&init_data); 2571 if (!pool->base.irqs) 2572 goto create_fail; 2573 2574 /* HUBBUB */ 2575 pool->base.hubbub = dcn30_hubbub_create(ctx); 2576 if (pool->base.hubbub == NULL) { 2577 BREAK_TO_DEBUGGER(); 2578 dm_error("DC: failed to create hubbub!\n"); 2579 goto create_fail; 2580 } 2581 2582 /* HUBPs, DPPs, OPPs and TGs */ 2583 for (i = 0; i < pool->base.pipe_count; i++) { 2584 pool->base.hubps[i] = dcn30_hubp_create(ctx, i); 2585 if (pool->base.hubps[i] == NULL) { 2586 BREAK_TO_DEBUGGER(); 2587 dm_error( 2588 "DC: failed to create hubps!\n"); 2589 goto create_fail; 2590 } 2591 2592 pool->base.dpps[i] = dcn30_dpp_create(ctx, i); 2593 if (pool->base.dpps[i] == NULL) { 2594 BREAK_TO_DEBUGGER(); 2595 dm_error( 2596 "DC: failed to create dpps!\n"); 2597 goto create_fail; 2598 } 2599 } 2600 2601 for (i = 0; i < pool->base.res_cap->num_opp; i++) { 2602 pool->base.opps[i] = dcn30_opp_create(ctx, i); 2603 if (pool->base.opps[i] == NULL) { 2604 BREAK_TO_DEBUGGER(); 2605 dm_error( 2606 "DC: failed to create output pixel processor!\n"); 2607 goto create_fail; 2608 } 2609 } 2610 2611 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { 2612 pool->base.timing_generators[i] = dcn30_timing_generator_create( 2613 ctx, i); 2614 if (pool->base.timing_generators[i] == NULL) { 2615 BREAK_TO_DEBUGGER(); 2616 dm_error("DC: failed to create tg!\n"); 2617 goto create_fail; 2618 } 2619 } 2620 pool->base.timing_generator_count = i; 2621 2622 /* ABM */ 2623 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { 2624 pool->base.multiple_abms[i] = dmub_abm_create(ctx, 2625 &abm_regs[i], 2626 &abm_shift, 2627 &abm_mask); 2628 if (pool->base.multiple_abms[i] == NULL) { 2629 dm_error("DC: failed to create abm for pipe %d!\n", i); 2630 BREAK_TO_DEBUGGER(); 2631 goto create_fail; 2632 } 2633 } 2634 /* MPC and DSC */ 2635 pool->base.mpc = dcn30_mpc_create(ctx, pool->base.mpcc_count, pool->base.res_cap->num_mpc_3dlut); 2636 if (pool->base.mpc == NULL) { 2637 BREAK_TO_DEBUGGER(); 2638 dm_error("DC: failed to create mpc!\n"); 2639 goto create_fail; 2640 } 2641 2642 for (i = 0; i < pool->base.res_cap->num_dsc; i++) { 2643 pool->base.dscs[i] = dcn30_dsc_create(ctx, i); 2644 if (pool->base.dscs[i] == NULL) { 2645 BREAK_TO_DEBUGGER(); 2646 dm_error("DC: failed to create display stream compressor %d!\n", i); 2647 goto create_fail; 2648 } 2649 } 2650 2651 /* DWB and MMHUBBUB */ 2652 if (!dcn30_dwbc_create(ctx, &pool->base)) { 2653 BREAK_TO_DEBUGGER(); 2654 dm_error("DC: failed to create dwbc!\n"); 2655 goto create_fail; 2656 } 2657 2658 if (!dcn30_mmhubbub_create(ctx, &pool->base)) { 2659 BREAK_TO_DEBUGGER(); 2660 dm_error("DC: failed to create mcif_wb!\n"); 2661 goto create_fail; 2662 } 2663 2664 /* AUX and I2C */ 2665 for (i = 0; i < pool->base.res_cap->num_ddc; i++) { 2666 pool->base.engines[i] = dcn30_aux_engine_create(ctx, i); 2667 if (pool->base.engines[i] == NULL) { 2668 BREAK_TO_DEBUGGER(); 2669 dm_error( 2670 "DC:failed to create aux engine!!\n"); 2671 goto create_fail; 2672 } 2673 pool->base.hw_i2cs[i] = dcn30_i2c_hw_create(ctx, i); 2674 if (pool->base.hw_i2cs[i] == NULL) { 2675 BREAK_TO_DEBUGGER(); 2676 dm_error( 2677 "DC:failed to create hw i2c!!\n"); 2678 goto create_fail; 2679 } 2680 pool->base.sw_i2cs[i] = NULL; 2681 } 2682 2683 /* Audio, Stream Encoders including DIG and virtual, MPC 3D LUTs */ 2684 if (!resource_construct(num_virtual_links, dc, &pool->base, 2685 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? 2686 &res_create_funcs : &res_create_maximus_funcs))) 2687 goto create_fail; 2688 2689 /* HW Sequencer and Plane caps */ 2690 dcn30_hw_sequencer_construct(dc); 2691 2692 dc->caps.max_planes = pool->base.pipe_count; 2693 2694 for (i = 0; i < dc->caps.max_planes; ++i) 2695 dc->caps.planes[i] = plane_cap; 2696 2697 dc->cap_funcs = cap_funcs; 2698 2699 return true; 2700 2701 create_fail: 2702 2703 dcn30_resource_destruct(pool); 2704 2705 return false; 2706 } 2707 2708 struct resource_pool *dcn30_create_resource_pool( 2709 const struct dc_init_data *init_data, 2710 struct dc *dc) 2711 { 2712 struct dcn30_resource_pool *pool = 2713 kzalloc(sizeof(struct dcn30_resource_pool), GFP_KERNEL); 2714 2715 if (!pool) 2716 return NULL; 2717 2718 if (dcn30_resource_construct(init_data->num_virtual_links, dc, pool)) 2719 return &pool->base; 2720 2721 BREAK_TO_DEBUGGER(); 2722 kfree(pool); 2723 return NULL; 2724 } 2725