1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: AMD 24 * 25 */ 26 #include "dcn32_fpu.h" 27 #include "dcn32/dcn32_resource.h" 28 #include "dcn20/dcn20_resource.h" 29 #include "display_mode_vba_util_32.h" 30 #include "dml/dcn32/display_mode_vba_32.h" 31 // We need this includes for WATERMARKS_* defines 32 #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" 33 #include "dcn30/dcn30_resource.h" 34 #include "link.h" 35 36 #define DC_LOGGER_INIT(logger) 37 38 struct _vcs_dpi_ip_params_st dcn3_2_ip = { 39 .gpuvm_enable = 0, 40 .gpuvm_max_page_table_levels = 4, 41 .hostvm_enable = 0, 42 .rob_buffer_size_kbytes = 128, 43 .det_buffer_size_kbytes = DCN3_2_DEFAULT_DET_SIZE, 44 .config_return_buffer_size_in_kbytes = 1280, 45 .compressed_buffer_segment_size_in_kbytes = 64, 46 .meta_fifo_size_in_kentries = 22, 47 .zero_size_buffer_entries = 512, 48 .compbuf_reserved_space_64b = 256, 49 .compbuf_reserved_space_zs = 64, 50 .dpp_output_buffer_pixels = 2560, 51 .opp_output_buffer_lines = 1, 52 .pixel_chunk_size_kbytes = 8, 53 .alpha_pixel_chunk_size_kbytes = 4, 54 .min_pixel_chunk_size_bytes = 1024, 55 .dcc_meta_buffer_size_bytes = 6272, 56 .meta_chunk_size_kbytes = 2, 57 .min_meta_chunk_size_bytes = 256, 58 .writeback_chunk_size_kbytes = 8, 59 .ptoi_supported = false, 60 .num_dsc = 4, 61 .maximum_dsc_bits_per_component = 12, 62 .maximum_pixels_per_line_per_dsc_unit = 6016, 63 .dsc422_native_support = true, 64 .is_line_buffer_bpp_fixed = true, 65 .line_buffer_fixed_bpp = 57, 66 .line_buffer_size_bits = 1171920, 67 .max_line_buffer_lines = 32, 68 .writeback_interface_buffer_size_kbytes = 90, 69 .max_num_dpp = 4, 70 .max_num_otg = 4, 71 .max_num_hdmi_frl_outputs = 1, 72 .max_num_wb = 1, 73 .max_dchub_pscl_bw_pix_per_clk = 4, 74 .max_pscl_lb_bw_pix_per_clk = 2, 75 .max_lb_vscl_bw_pix_per_clk = 4, 76 .max_vscl_hscl_bw_pix_per_clk = 4, 77 .max_hscl_ratio = 6, 78 .max_vscl_ratio = 6, 79 .max_hscl_taps = 8, 80 .max_vscl_taps = 8, 81 .dpte_buffer_size_in_pte_reqs_luma = 64, 82 .dpte_buffer_size_in_pte_reqs_chroma = 34, 83 .dispclk_ramp_margin_percent = 1, 84 .max_inter_dcn_tile_repeaters = 8, 85 .cursor_buffer_size = 16, 86 .cursor_chunk_size = 2, 87 .writeback_line_buffer_buffer_size = 0, 88 .writeback_min_hscl_ratio = 1, 89 .writeback_min_vscl_ratio = 1, 90 .writeback_max_hscl_ratio = 1, 91 .writeback_max_vscl_ratio = 1, 92 .writeback_max_hscl_taps = 1, 93 .writeback_max_vscl_taps = 1, 94 .dppclk_delay_subtotal = 47, 95 .dppclk_delay_scl = 50, 96 .dppclk_delay_scl_lb_only = 16, 97 .dppclk_delay_cnvc_formatter = 28, 98 .dppclk_delay_cnvc_cursor = 6, 99 .dispclk_delay_subtotal = 125, 100 .dynamic_metadata_vm_enabled = false, 101 .odm_combine_4to1_supported = false, 102 .dcc_supported = true, 103 .max_num_dp2p0_outputs = 2, 104 .max_num_dp2p0_streams = 4, 105 }; 106 107 struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { 108 .clock_limits = { 109 { 110 .state = 0, 111 .dcfclk_mhz = 1564.0, 112 .fabricclk_mhz = 2500.0, 113 .dispclk_mhz = 2150.0, 114 .dppclk_mhz = 2150.0, 115 .phyclk_mhz = 810.0, 116 .phyclk_d18_mhz = 667.0, 117 .phyclk_d32_mhz = 625.0, 118 .socclk_mhz = 1200.0, 119 .dscclk_mhz = 716.667, 120 .dram_speed_mts = 18000.0, 121 .dtbclk_mhz = 1564.0, 122 }, 123 }, 124 .num_states = 1, 125 .sr_exit_time_us = 42.97, 126 .sr_enter_plus_exit_time_us = 49.94, 127 .sr_exit_z8_time_us = 285.0, 128 .sr_enter_plus_exit_z8_time_us = 320, 129 .writeback_latency_us = 12.0, 130 .round_trip_ping_latency_dcfclk_cycles = 263, 131 .urgent_latency_pixel_data_only_us = 4.0, 132 .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 133 .urgent_latency_vm_data_only_us = 4.0, 134 .fclk_change_latency_us = 25, 135 .usr_retraining_latency_us = 2, 136 .smn_latency_us = 2, 137 .mall_allocated_for_dcn_mbytes = 64, 138 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 139 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 140 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, 141 .pct_ideal_sdp_bw_after_urgent = 100.0, 142 .pct_ideal_fabric_bw_after_urgent = 67.0, 143 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, 144 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented 145 .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0, // N/A, for now keep as is until DML implemented 146 .pct_ideal_dram_bw_after_urgent_strobe = 67.0, 147 .max_avg_sdp_bw_use_normal_percent = 80.0, 148 .max_avg_fabric_bw_use_normal_percent = 60.0, 149 .max_avg_dram_bw_use_normal_strobe_percent = 50.0, 150 .max_avg_dram_bw_use_normal_percent = 15.0, 151 .num_chans = 24, 152 .dram_channel_width_bytes = 2, 153 .fabric_datapath_to_dcn_data_return_bytes = 64, 154 .return_bus_width_bytes = 64, 155 .downspread_percent = 0.38, 156 .dcn_downspread_percent = 0.5, 157 .dram_clock_change_latency_us = 400, 158 .dispclk_dppclk_vco_speed_mhz = 4300.0, 159 .do_urgent_latency_adjustment = true, 160 .urgent_latency_adjustment_fabric_clock_component_us = 1.0, 161 .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, 162 }; 163 164 void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) 165 { 166 /* defaults */ 167 double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us; 168 double fclk_change_latency_us = clk_mgr->base.ctx->dc->dml.soc.fclk_change_latency_us; 169 double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us; 170 double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us; 171 /* For min clocks use as reported by PM FW and report those as min */ 172 uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz; 173 uint16_t min_dcfclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz; 174 uint16_t setb_min_uclk_mhz = min_uclk_mhz; 175 uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->base.ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz; 176 177 dc_assert_fp_enabled(); 178 179 /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */ 180 if (dcfclk_mhz_for_the_second_state) 181 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state; 182 else 183 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz; 184 185 if (clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz) 186 setb_min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz; 187 188 /* Set A - Normal - default values */ 189 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true; 190 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us; 191 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us; 192 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us; 193 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; 194 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; 195 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; 196 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; 197 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; 198 clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; 199 200 /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */ 201 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true; 202 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us; 203 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us; 204 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us; 205 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; 206 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; 207 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF; 208 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz; 209 clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF; 210 211 /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */ 212 /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ 213 if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { 214 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true; 215 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; 216 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; 217 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; 218 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; 219 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; 220 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; 221 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF; 222 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; 223 clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; 224 clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16; 225 clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; 226 clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16; 227 clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; 228 clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16; 229 clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8; 230 clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[3].memclk_mhz * 16; 231 clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5; 232 } 233 /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */ 234 /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */ 235 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true; 236 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us; 237 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us; 238 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD 239 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD 240 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL; 241 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; 242 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF; 243 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz; 244 clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF; 245 } 246 247 /* 248 * Finds dummy_latency_index when MCLK switching using firmware based 249 * vblank stretch is enabled. This function will iterate through the 250 * table of dummy pstate latencies until the lowest value that allows 251 * dm_allow_self_refresh_and_mclk_switch to happen is found 252 */ 253 int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, 254 struct dc_state *context, 255 display_e2e_pipe_params_st *pipes, 256 int pipe_cnt, 257 int vlevel) 258 { 259 const int max_latency_table_entries = 4; 260 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 261 int dummy_latency_index = 0; 262 enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 263 264 dc_assert_fp_enabled(); 265 266 while (dummy_latency_index < max_latency_table_entries) { 267 if (temp_clock_change_support != dm_dram_clock_change_unsupported) 268 vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; 269 context->bw_ctx.dml.soc.dram_clock_change_latency_us = 270 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; 271 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); 272 273 /* for subvp + DRR case, if subvp pipes are still present we support pstate */ 274 if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && 275 dcn32_subvp_in_use(dc, context)) 276 vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; 277 278 if (vlevel < context->bw_ctx.dml.vba.soc.num_states && 279 vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) 280 break; 281 282 dummy_latency_index++; 283 } 284 285 if (dummy_latency_index == max_latency_table_entries) { 286 ASSERT(dummy_latency_index != max_latency_table_entries); 287 /* If the execution gets here, it means dummy p_states are 288 * not possible. This should never happen and would mean 289 * something is severely wrong. 290 * Here we reset dummy_latency_index to 3, because it is 291 * better to have underflows than system crashes. 292 */ 293 dummy_latency_index = max_latency_table_entries - 1; 294 } 295 296 return dummy_latency_index; 297 } 298 299 /** 300 * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes 301 * and populate pipe_ctx with those params. 302 * @dc: [in] current dc state 303 * @context: [in] new dc state 304 * @pipes: [in] DML pipe params array 305 * @pipe_cnt: [in] DML pipe count 306 * 307 * This function must be called AFTER the phantom pipes are added to context 308 * and run through DML (so that the DLG params for the phantom pipes can be 309 * populated), and BEFORE we program the timing for the phantom pipes. 310 */ 311 void dcn32_helper_populate_phantom_dlg_params(struct dc *dc, 312 struct dc_state *context, 313 display_e2e_pipe_params_st *pipes, 314 int pipe_cnt) 315 { 316 uint32_t i, pipe_idx; 317 318 dc_assert_fp_enabled(); 319 320 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 321 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 322 323 if (!pipe->stream) 324 continue; 325 326 if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 327 pipes[pipe_idx].pipe.dest.vstartup_start = 328 get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 329 pipes[pipe_idx].pipe.dest.vupdate_offset = 330 get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 331 pipes[pipe_idx].pipe.dest.vupdate_width = 332 get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 333 pipes[pipe_idx].pipe.dest.vready_offset = 334 get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 335 pipe->pipe_dlg_param = pipes[pipe_idx].pipe.dest; 336 } 337 pipe_idx++; 338 } 339 } 340 341 /** 342 * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe 343 * @context: [in] New DC state to be programmed 344 * @pipe_e2e: [in] DML pipe end to end context 345 * 346 * This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both 347 * ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is 348 * determined by DPPClk requirements 349 * 350 * This function follows the same policy as DML: 351 * - Check for ODM combine requirements / policy first 352 * - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and 353 * MPC is required 354 * 355 * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits). 356 */ 357 uint8_t dcn32_predict_pipe_split(struct dc_state *context, 358 display_e2e_pipe_params_st *pipe_e2e) 359 { 360 double pscl_throughput; 361 double pscl_throughput_chroma; 362 double dpp_clk_single_dpp, clock; 363 double clk_frequency = 0.0; 364 double vco_speed = context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz; 365 bool total_available_pipes_support = false; 366 uint32_t number_of_dpp = 0; 367 enum odm_combine_mode odm_mode = dm_odm_combine_mode_disabled; 368 double req_dispclk_per_surface = 0; 369 uint8_t num_splits = 0; 370 371 dc_assert_fp_enabled(); 372 373 dml32_CalculateODMMode(context->bw_ctx.dml.ip.maximum_pixels_per_line_per_dsc_unit, 374 pipe_e2e->pipe.dest.hactive, 375 pipe_e2e->dout.output_format, 376 pipe_e2e->dout.output_type, 377 pipe_e2e->pipe.dest.odm_combine_policy, 378 context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz, 379 context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz, 380 pipe_e2e->dout.dsc_enable != 0, 381 0, /* TotalNumberOfActiveDPP can be 0 since we're predicting pipe split requirement */ 382 context->bw_ctx.dml.ip.max_num_dpp, 383 pipe_e2e->pipe.dest.pixel_rate_mhz, 384 context->bw_ctx.dml.soc.dcn_downspread_percent, 385 context->bw_ctx.dml.ip.dispclk_ramp_margin_percent, 386 context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz, 387 pipe_e2e->dout.dsc_slices, 388 /* Output */ 389 &total_available_pipes_support, 390 &number_of_dpp, 391 &odm_mode, 392 &req_dispclk_per_surface); 393 394 dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe_e2e->pipe.scale_ratio_depth.hscl_ratio, 395 pipe_e2e->pipe.scale_ratio_depth.hscl_ratio_c, 396 pipe_e2e->pipe.scale_ratio_depth.vscl_ratio, 397 pipe_e2e->pipe.scale_ratio_depth.vscl_ratio_c, 398 context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk, 399 context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk, 400 pipe_e2e->pipe.dest.pixel_rate_mhz, 401 pipe_e2e->pipe.src.source_format, 402 pipe_e2e->pipe.scale_taps.htaps, 403 pipe_e2e->pipe.scale_taps.htaps_c, 404 pipe_e2e->pipe.scale_taps.vtaps, 405 pipe_e2e->pipe.scale_taps.vtaps_c, 406 /* Output */ 407 &pscl_throughput, &pscl_throughput_chroma, 408 &dpp_clk_single_dpp); 409 410 clock = dpp_clk_single_dpp * (1 + context->bw_ctx.dml.soc.dcn_downspread_percent / 100); 411 412 if (clock > 0) 413 clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0) / clock); 414 415 if (odm_mode == dm_odm_combine_mode_2to1) 416 num_splits = 1; 417 else if (odm_mode == dm_odm_combine_mode_4to1) 418 num_splits = 3; 419 else if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dppclk_mhz) 420 num_splits = 1; 421 422 return num_splits; 423 } 424 425 static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry) 426 { 427 float memory_bw_kbytes_sec; 428 float fabric_bw_kbytes_sec; 429 float sdp_bw_kbytes_sec; 430 float limiting_bw_kbytes_sec; 431 432 memory_bw_kbytes_sec = entry->dram_speed_mts * 433 dcn3_2_soc.num_chans * 434 dcn3_2_soc.dram_channel_width_bytes * 435 ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100); 436 437 fabric_bw_kbytes_sec = entry->fabricclk_mhz * 438 dcn3_2_soc.return_bus_width_bytes * 439 ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100); 440 441 sdp_bw_kbytes_sec = entry->dcfclk_mhz * 442 dcn3_2_soc.return_bus_width_bytes * 443 ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100); 444 445 limiting_bw_kbytes_sec = memory_bw_kbytes_sec; 446 447 if (fabric_bw_kbytes_sec < limiting_bw_kbytes_sec) 448 limiting_bw_kbytes_sec = fabric_bw_kbytes_sec; 449 450 if (sdp_bw_kbytes_sec < limiting_bw_kbytes_sec) 451 limiting_bw_kbytes_sec = sdp_bw_kbytes_sec; 452 453 return limiting_bw_kbytes_sec; 454 } 455 456 static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry) 457 { 458 if (entry->dcfclk_mhz > 0) { 459 float bw_on_sdp = entry->dcfclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100); 460 461 entry->fabricclk_mhz = bw_on_sdp / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100)); 462 entry->dram_speed_mts = bw_on_sdp / (dcn3_2_soc.num_chans * 463 dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100)); 464 } else if (entry->fabricclk_mhz > 0) { 465 float bw_on_fabric = entry->fabricclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100); 466 467 entry->dcfclk_mhz = bw_on_fabric / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100)); 468 entry->dram_speed_mts = bw_on_fabric / (dcn3_2_soc.num_chans * 469 dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100)); 470 } else if (entry->dram_speed_mts > 0) { 471 float bw_on_dram = entry->dram_speed_mts * dcn3_2_soc.num_chans * 472 dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100); 473 474 entry->fabricclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100)); 475 entry->dcfclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100)); 476 } 477 } 478 479 void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table, 480 unsigned int *num_entries, 481 struct _vcs_dpi_voltage_scaling_st *entry) 482 { 483 int i = 0; 484 int index = 0; 485 float net_bw_of_new_state = 0; 486 487 dc_assert_fp_enabled(); 488 489 get_optimal_ntuple(entry); 490 491 if (*num_entries == 0) { 492 table[0] = *entry; 493 (*num_entries)++; 494 } else { 495 net_bw_of_new_state = calculate_net_bw_in_kbytes_sec(entry); 496 while (net_bw_of_new_state > calculate_net_bw_in_kbytes_sec(&table[index])) { 497 index++; 498 if (index >= *num_entries) 499 break; 500 } 501 502 for (i = *num_entries; i > index; i--) 503 table[i] = table[i - 1]; 504 505 table[index] = *entry; 506 (*num_entries)++; 507 } 508 } 509 510 /** 511 * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream 512 * @dc: current dc state 513 * @context: new dc state 514 * @ref_pipe: Main pipe for the phantom stream 515 * @phantom_stream: target phantom stream state 516 * @pipes: DML pipe params 517 * @pipe_cnt: number of DML pipes 518 * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe) 519 * 520 * Set timing params of the phantom stream based on calculated output from DML. 521 * This function first gets the DML pipe index using the DC pipe index, then 522 * calls into DML (get_subviewport_lines_needed_in_mall) to get the number of 523 * lines required for SubVP MCLK switching and assigns to the phantom stream 524 * accordingly. 525 * 526 * - The number of SubVP lines calculated in DML does not take into account 527 * FW processing delays and required pstate allow width, so we must include 528 * that separately. 529 * 530 * - Set phantom backporch = vstartup of main pipe 531 */ 532 void dcn32_set_phantom_stream_timing(struct dc *dc, 533 struct dc_state *context, 534 struct pipe_ctx *ref_pipe, 535 struct dc_stream_state *phantom_stream, 536 display_e2e_pipe_params_st *pipes, 537 unsigned int pipe_cnt, 538 unsigned int dc_pipe_idx) 539 { 540 unsigned int i, pipe_idx; 541 struct pipe_ctx *pipe; 542 uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines; 543 unsigned int num_dpp; 544 unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel; 545 unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 546 unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel]; 547 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 548 struct dc_stream_state *main_stream = ref_pipe->stream; 549 550 dc_assert_fp_enabled(); 551 552 // Find DML pipe index (pipe_idx) using dc_pipe_idx 553 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 554 pipe = &context->res_ctx.pipe_ctx[i]; 555 556 if (!pipe->stream) 557 continue; 558 559 if (i == dc_pipe_idx) 560 break; 561 562 pipe_idx++; 563 } 564 565 // Calculate lines required for pstate allow width and FW processing delays 566 pstate_width_fw_delay_lines = ((double)(dc->caps.subvp_fw_processing_delay_us + 567 dc->caps.subvp_pstate_allow_width_us) / 1000000) * 568 (ref_pipe->stream->timing.pix_clk_100hz * 100) / 569 (double)ref_pipe->stream->timing.h_total; 570 571 // Update clks_cfg for calling into recalculate 572 pipes[0].clks_cfg.voltage = vlevel; 573 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 574 pipes[0].clks_cfg.socclk_mhz = socclk; 575 576 // DML calculation for MALL region doesn't take into account FW delay 577 // and required pstate allow width for multi-display cases 578 /* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned 579 * to 2 swaths (i.e. 16 lines) 580 */ 581 phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) + 582 pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines; 583 584 // W/A for DCC corruption with certain high resolution timings. 585 // Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive. 586 num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]]; 587 phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0; 588 589 /* dc->debug.subvp_extra_lines 0 by default*/ 590 phantom_vactive += dc->debug.subvp_extra_lines; 591 592 // For backporch of phantom pipe, use vstartup of the main pipe 593 phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 594 595 phantom_stream->dst.y = 0; 596 phantom_stream->dst.height = phantom_vactive; 597 /* When scaling, DML provides the end to end required number of lines for MALL. 598 * dst.height is always correct for this case, but src.height is not which causes a 599 * delta between main and phantom pipe scaling outputs. Need to adjust src.height on 600 * phantom for this case. 601 */ 602 phantom_stream->src.y = 0; 603 phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height; 604 605 phantom_stream->timing.v_addressable = phantom_vactive; 606 phantom_stream->timing.v_front_porch = 1; 607 phantom_stream->timing.v_total = phantom_stream->timing.v_addressable + 608 phantom_stream->timing.v_front_porch + 609 phantom_stream->timing.v_sync_width + 610 phantom_bp; 611 phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing 612 } 613 614 /** 615 * dcn32_get_num_free_pipes - Calculate number of free pipes 616 * @dc: current dc state 617 * @context: new dc state 618 * 619 * This function assumes that a "used" pipe is a pipe that has 620 * both a stream and a plane assigned to it. 621 * 622 * Return: Number of free pipes available in the context 623 */ 624 static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context) 625 { 626 unsigned int i; 627 unsigned int free_pipes = 0; 628 unsigned int num_pipes = 0; 629 630 for (i = 0; i < dc->res_pool->pipe_count; i++) { 631 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 632 633 if (pipe->stream && !pipe->top_pipe) { 634 while (pipe) { 635 num_pipes++; 636 pipe = pipe->bottom_pipe; 637 } 638 } 639 } 640 641 free_pipes = dc->res_pool->pipe_count - num_pipes; 642 return free_pipes; 643 } 644 645 /** 646 * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP. 647 * @dc: current dc state 648 * @context: new dc state 649 * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned 650 * 651 * We enter this function if we are Sub-VP capable (i.e. enough pipes available) 652 * and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if 653 * we are forcing SubVP P-State switching on the current config. 654 * 655 * The number of pipes used for the chosen surface must be less than or equal to the 656 * number of free pipes available. 657 * 658 * In general we choose surfaces with the longest frame time first (better for SubVP + VBLANK). 659 * For multi-display cases the ActiveDRAMClockChangeMargin doesn't provide enough info on its own 660 * for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't 661 * support MCLK switching naturally [i.e. ACTIVE or VBLANK]). 662 * 663 * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false. 664 */ 665 static bool dcn32_assign_subvp_pipe(struct dc *dc, 666 struct dc_state *context, 667 unsigned int *index) 668 { 669 unsigned int i, pipe_idx; 670 unsigned int max_frame_time = 0; 671 bool valid_assignment_found = false; 672 unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context); 673 bool current_assignment_freesync = false; 674 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 675 676 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 677 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 678 unsigned int num_pipes = 0; 679 unsigned int refresh_rate = 0; 680 681 if (!pipe->stream) 682 continue; 683 684 // Round up 685 refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + 686 pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) 687 / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); 688 /* SubVP pipe candidate requirements: 689 * - Refresh rate < 120hz 690 * - Not able to switch in vactive naturally (switching in active means the 691 * DET provides enough buffer to hide the P-State switch latency -- trying 692 * to combine this with SubVP can cause issues with the scheduling). 693 * - Not TMZ surface 694 */ 695 if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) && 696 pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && 697 (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || 698 (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && 699 dcn32_allow_subvp_with_active_margin(pipe)))) { 700 while (pipe) { 701 num_pipes++; 702 pipe = pipe->bottom_pipe; 703 } 704 705 pipe = &context->res_ctx.pipe_ctx[i]; 706 if (num_pipes <= free_pipes) { 707 struct dc_stream_state *stream = pipe->stream; 708 unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total / 709 (double)(stream->timing.pix_clk_100hz * 100)) * 1000000; 710 if (frame_us > max_frame_time && !stream->ignore_msa_timing_param) { 711 *index = i; 712 max_frame_time = frame_us; 713 valid_assignment_found = true; 714 current_assignment_freesync = false; 715 /* For the 2-Freesync display case, still choose the one with the 716 * longest frame time 717 */ 718 } else if (stream->ignore_msa_timing_param && (!valid_assignment_found || 719 (current_assignment_freesync && frame_us > max_frame_time))) { 720 *index = i; 721 valid_assignment_found = true; 722 current_assignment_freesync = true; 723 } 724 } 725 } 726 pipe_idx++; 727 } 728 return valid_assignment_found; 729 } 730 731 /** 732 * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP. 733 * @dc: current dc state 734 * @context: new dc state 735 * 736 * This function returns true if there are enough free pipes 737 * to create the required phantom pipes for any given stream 738 * (that does not already have phantom pipe assigned). 739 * 740 * e.g. For a 2 stream config where the first stream uses one 741 * pipe and the second stream uses 2 pipes (i.e. pipe split), 742 * this function will return true because there is 1 remaining 743 * pipe which can be used as the phantom pipe for the non pipe 744 * split pipe. 745 * 746 * Return: 747 * True if there are enough free pipes to assign phantom pipes to at least one 748 * stream that does not already have phantom pipes assigned. Otherwise false. 749 */ 750 static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context) 751 { 752 unsigned int i, split_cnt, free_pipes; 753 unsigned int min_pipe_split = dc->res_pool->pipe_count + 1; // init as max number of pipes + 1 754 bool subvp_possible = false; 755 756 for (i = 0; i < dc->res_pool->pipe_count; i++) { 757 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 758 759 // Find the minimum pipe split count for non SubVP pipes 760 if (pipe->stream && !pipe->top_pipe && 761 pipe->stream->mall_stream_config.type == SUBVP_NONE) { 762 split_cnt = 0; 763 while (pipe) { 764 split_cnt++; 765 pipe = pipe->bottom_pipe; 766 } 767 768 if (split_cnt < min_pipe_split) 769 min_pipe_split = split_cnt; 770 } 771 } 772 773 free_pipes = dcn32_get_num_free_pipes(dc, context); 774 775 // SubVP only possible if at least one pipe is being used (i.e. free_pipes 776 // should not equal to the pipe_count) 777 if (free_pipes >= min_pipe_split && free_pipes < dc->res_pool->pipe_count) 778 subvp_possible = true; 779 780 return subvp_possible; 781 } 782 783 /** 784 * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable 785 * @dc: current dc state 786 * @context: new dc state 787 * 788 * High level algorithm: 789 * 1. Find longest microschedule length (in us) between the two SubVP pipes 790 * 2. Check if the worst case overlap (VBLANK in middle of ACTIVE) for both 791 * pipes still allows for the maximum microschedule to fit in the active 792 * region for both pipes. 793 * 794 * Return: True if the SubVP + SubVP config is schedulable, false otherwise 795 */ 796 static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context) 797 { 798 struct pipe_ctx *subvp_pipes[2]; 799 struct dc_stream_state *phantom = NULL; 800 uint32_t microschedule_lines = 0; 801 uint32_t index = 0; 802 uint32_t i; 803 uint32_t max_microschedule_us = 0; 804 int32_t vactive1_us, vactive2_us, vblank1_us, vblank2_us; 805 806 for (i = 0; i < dc->res_pool->pipe_count; i++) { 807 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 808 uint32_t time_us = 0; 809 810 /* Loop to calculate the maximum microschedule time between the two SubVP pipes, 811 * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. 812 */ 813 if (pipe->stream && pipe->plane_state && !pipe->top_pipe && 814 pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 815 phantom = pipe->stream->mall_stream_config.paired_stream; 816 microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + 817 phantom->timing.v_addressable; 818 819 // Round up when calculating microschedule time (+ 1 at the end) 820 time_us = (microschedule_lines * phantom->timing.h_total) / 821 (double)(phantom->timing.pix_clk_100hz * 100) * 1000000 + 822 dc->caps.subvp_prefetch_end_to_mall_start_us + 823 dc->caps.subvp_fw_processing_delay_us + 1; 824 if (time_us > max_microschedule_us) 825 max_microschedule_us = time_us; 826 827 subvp_pipes[index] = pipe; 828 index++; 829 830 // Maximum 2 SubVP pipes 831 if (index == 2) 832 break; 833 } 834 } 835 vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) / 836 (double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000; 837 vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) / 838 (double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000; 839 vblank1_us = (((subvp_pipes[0]->stream->timing.v_total - subvp_pipes[0]->stream->timing.v_addressable) * 840 subvp_pipes[0]->stream->timing.h_total) / 841 (double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000; 842 vblank2_us = (((subvp_pipes[1]->stream->timing.v_total - subvp_pipes[1]->stream->timing.v_addressable) * 843 subvp_pipes[1]->stream->timing.h_total) / 844 (double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000; 845 846 if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us && 847 (vactive2_us - vblank1_us) / 2 > max_microschedule_us) 848 return true; 849 850 return false; 851 } 852 853 /** 854 * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable 855 * @dc: current dc state 856 * @context: new dc state 857 * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config 858 * 859 * High level algorithm: 860 * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe 861 * 2. Determine the frame time for the DRR display when adding required margin for MCLK switching 862 * (the margin is equal to the MALL region + DRR margin (500us)) 863 * 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame)) 864 * then report the configuration as supported 865 * 866 * Return: True if the SubVP + DRR config is schedulable, false otherwise 867 */ 868 static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe) 869 { 870 bool schedulable = false; 871 uint32_t i; 872 struct pipe_ctx *pipe = NULL; 873 struct dc_crtc_timing *main_timing = NULL; 874 struct dc_crtc_timing *phantom_timing = NULL; 875 struct dc_crtc_timing *drr_timing = NULL; 876 int16_t prefetch_us = 0; 877 int16_t mall_region_us = 0; 878 int16_t drr_frame_us = 0; // nominal frame time 879 int16_t subvp_active_us = 0; 880 int16_t stretched_drr_us = 0; 881 int16_t drr_stretched_vblank_us = 0; 882 int16_t max_vblank_mallregion = 0; 883 const struct dc_config *config = &dc->config; 884 885 if (config->disable_subvp_drr) 886 return false; 887 888 // Find SubVP pipe 889 for (i = 0; i < dc->res_pool->pipe_count; i++) { 890 pipe = &context->res_ctx.pipe_ctx[i]; 891 892 // We check for master pipe, but it shouldn't matter since we only need 893 // the pipe for timing info (stream should be same for any pipe splits) 894 if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) 895 continue; 896 897 // Find the SubVP pipe 898 if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) 899 break; 900 } 901 902 main_timing = &pipe->stream->timing; 903 phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; 904 drr_timing = &drr_pipe->stream->timing; 905 prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / 906 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + 907 dc->caps.subvp_prefetch_end_to_mall_start_us; 908 subvp_active_us = main_timing->v_addressable * main_timing->h_total / 909 (double)(main_timing->pix_clk_100hz * 100) * 1000000; 910 drr_frame_us = drr_timing->v_total * drr_timing->h_total / 911 (double)(drr_timing->pix_clk_100hz * 100) * 1000000; 912 // P-State allow width and FW delays already included phantom_timing->v_addressable 913 mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / 914 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; 915 stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; 916 drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total / 917 (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us); 918 max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us; 919 920 /* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the 921 * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis 922 * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, 923 * and the max of (VBLANK blanking time, MALL region)). 924 */ 925 if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 && 926 subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) 927 schedulable = true; 928 929 return schedulable; 930 } 931 932 933 /** 934 * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable 935 * @dc: current dc state 936 * @context: new dc state 937 * 938 * High level algorithm: 939 * 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe 940 * 2. If (SubVP Active - Prefetch > Vblank Frame Time + max(MALL region, Vblank blanking time)) 941 * then report the configuration as supported 942 * 3. If the VBLANK display is DRR, then take the DRR static schedulability path 943 * 944 * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise 945 */ 946 static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) 947 { 948 struct pipe_ctx *pipe = NULL; 949 struct pipe_ctx *subvp_pipe = NULL; 950 bool found = false; 951 bool schedulable = false; 952 uint32_t i = 0; 953 uint8_t vblank_index = 0; 954 uint16_t prefetch_us = 0; 955 uint16_t mall_region_us = 0; 956 uint16_t vblank_frame_us = 0; 957 uint16_t subvp_active_us = 0; 958 uint16_t vblank_blank_us = 0; 959 uint16_t max_vblank_mallregion = 0; 960 struct dc_crtc_timing *main_timing = NULL; 961 struct dc_crtc_timing *phantom_timing = NULL; 962 struct dc_crtc_timing *vblank_timing = NULL; 963 964 /* For SubVP + VBLANK/DRR cases, we assume there can only be 965 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK 966 * is supported, it is either a single VBLANK case or two VBLANK 967 * displays which are synchronized (in which case they have identical 968 * timings). 969 */ 970 for (i = 0; i < dc->res_pool->pipe_count; i++) { 971 pipe = &context->res_ctx.pipe_ctx[i]; 972 973 // We check for master pipe, but it shouldn't matter since we only need 974 // the pipe for timing info (stream should be same for any pipe splits) 975 if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) 976 continue; 977 978 if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { 979 // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). 980 vblank_index = i; 981 found = true; 982 } 983 984 if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) 985 subvp_pipe = pipe; 986 } 987 // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On 988 if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param && 989 (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync || 990 context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) { 991 // SUBVP + DRR case -- only allowed if run through DRR validation path 992 schedulable = false; 993 } else if (found) { 994 main_timing = &subvp_pipe->stream->timing; 995 phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; 996 vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; 997 // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe 998 // Also include the prefetch end to mallstart delay time 999 prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / 1000 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + 1001 dc->caps.subvp_prefetch_end_to_mall_start_us; 1002 // P-State allow width and FW delays already included phantom_timing->v_addressable 1003 mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total / 1004 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000; 1005 vblank_frame_us = vblank_timing->v_total * vblank_timing->h_total / 1006 (double)(vblank_timing->pix_clk_100hz * 100) * 1000000; 1007 vblank_blank_us = (vblank_timing->v_total - vblank_timing->v_addressable) * vblank_timing->h_total / 1008 (double)(vblank_timing->pix_clk_100hz * 100) * 1000000; 1009 subvp_active_us = main_timing->v_addressable * main_timing->h_total / 1010 (double)(main_timing->pix_clk_100hz * 100) * 1000000; 1011 max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us; 1012 1013 // Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, 1014 // and the max of (VBLANK blanking time, MALL region) 1015 // TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0) 1016 if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0) 1017 schedulable = true; 1018 } 1019 return schedulable; 1020 } 1021 1022 /** 1023 * subvp_validate_static_schedulability - Check which SubVP case is calculated 1024 * and handle static analysis based on the case. 1025 * @dc: current dc state 1026 * @context: new dc state 1027 * @vlevel: Voltage level calculated by DML 1028 * 1029 * Three cases: 1030 * 1. SubVP + SubVP 1031 * 2. SubVP + VBLANK (DRR checked internally) 1032 * 3. SubVP + VACTIVE (currently unsupported) 1033 * 1034 * Return: True if statically schedulable, false otherwise 1035 */ 1036 static bool subvp_validate_static_schedulability(struct dc *dc, 1037 struct dc_state *context, 1038 int vlevel) 1039 { 1040 bool schedulable = true; // true by default for single display case 1041 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 1042 uint32_t i, pipe_idx; 1043 uint8_t subvp_count = 0; 1044 uint8_t vactive_count = 0; 1045 1046 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1047 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1048 1049 if (!pipe->stream) 1050 continue; 1051 1052 if (pipe->plane_state && !pipe->top_pipe && 1053 pipe->stream->mall_stream_config.type == SUBVP_MAIN) 1054 subvp_count++; 1055 1056 // Count how many planes that aren't SubVP/phantom are capable of VACTIVE 1057 // switching (SubVP + VACTIVE unsupported). In situations where we force 1058 // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. 1059 if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 && 1060 pipe->stream->mall_stream_config.type == SUBVP_NONE) { 1061 vactive_count++; 1062 } 1063 pipe_idx++; 1064 } 1065 1066 if (subvp_count == 2) { 1067 // Static schedulability check for SubVP + SubVP case 1068 schedulable = subvp_subvp_schedulable(dc, context); 1069 } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp) { 1070 // Static schedulability check for SubVP + VBLANK case. Also handle the case where 1071 // DML outputs SubVP + VBLANK + VACTIVE (DML will report as SubVP + VBLANK) 1072 if (vactive_count > 0) 1073 schedulable = false; 1074 else 1075 schedulable = subvp_vblank_schedulable(dc, context); 1076 } else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp && 1077 vactive_count > 0) { 1078 // For single display SubVP cases, DML will output dm_dram_clock_change_vactive_w_mall_sub_vp by default. 1079 // We tell the difference between SubVP vs. SubVP + VACTIVE by checking the vactive_count. 1080 // SubVP + VACTIVE currently unsupported 1081 schedulable = false; 1082 } 1083 return schedulable; 1084 } 1085 1086 static void dcn32_full_validate_bw_helper(struct dc *dc, 1087 struct dc_state *context, 1088 display_e2e_pipe_params_st *pipes, 1089 int *vlevel, 1090 int *split, 1091 bool *merge, 1092 int *pipe_cnt) 1093 { 1094 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 1095 unsigned int dc_pipe_idx = 0; 1096 int i = 0; 1097 bool found_supported_config = false; 1098 struct pipe_ctx *pipe = NULL; 1099 uint32_t non_subvp_pipes = 0; 1100 bool drr_pipe_found = false; 1101 uint32_t drr_pipe_index = 0; 1102 1103 dc_assert_fp_enabled(); 1104 1105 /* 1106 * DML favors voltage over p-state, but we're more interested in 1107 * supporting p-state over voltage. We can't support p-state in 1108 * prefetch mode > 0 so try capping the prefetch mode to start. 1109 * Override present for testing. 1110 */ 1111 if (dc->debug.dml_disallow_alternate_prefetch_modes) 1112 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 1113 dm_prefetch_support_uclk_fclk_and_stutter; 1114 else 1115 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 1116 dm_prefetch_support_uclk_fclk_and_stutter_if_possible; 1117 1118 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); 1119 /* This may adjust vlevel and maxMpcComb */ 1120 if (*vlevel < context->bw_ctx.dml.soc.num_states) { 1121 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); 1122 vba->VoltageLevel = *vlevel; 1123 } 1124 1125 /* Conditions for setting up phantom pipes for SubVP: 1126 * 1. Not force disable SubVP 1127 * 2. Full update (i.e. !fast_validate) 1128 * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) 1129 * 4. Display configuration passes validation 1130 * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) 1131 */ 1132 if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) && 1133 !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && 1134 (*vlevel == context->bw_ctx.dml.soc.num_states || 1135 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || 1136 dc->debug.force_subvp_mclk_switch)) { 1137 1138 dcn32_merge_pipes_for_subvp(dc, context); 1139 memset(merge, 0, MAX_PIPES * sizeof(bool)); 1140 1141 /* to re-initialize viewport after the pipe merge */ 1142 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1143 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1144 1145 if (!pipe_ctx->plane_state || !pipe_ctx->stream) 1146 continue; 1147 1148 resource_build_scaling_params(pipe_ctx); 1149 } 1150 1151 while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) && 1152 dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) { 1153 /* For the case where *vlevel = num_states, bandwidth validation has failed for this config. 1154 * Adding phantom pipes won't change the validation result, so change the DML input param 1155 * for P-State support before adding phantom pipes and recalculating the DML result. 1156 * However, this case is only applicable for SubVP + DRR cases because the prefetch mode 1157 * will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched 1158 * enough to support MCLK switching. 1159 */ 1160 if (*vlevel == context->bw_ctx.dml.soc.num_states && 1161 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final == 1162 dm_prefetch_support_uclk_fclk_and_stutter) { 1163 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 1164 dm_prefetch_support_fclk_and_stutter; 1165 /* There are params (such as FabricClock) that need to be recalculated 1166 * after validation fails (otherwise it will be 0). Calculation for 1167 * phantom vactive requires call into DML, so we must ensure all the 1168 * vba params are valid otherwise we'll get incorrect phantom vactive. 1169 */ 1170 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); 1171 } 1172 1173 dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx); 1174 1175 *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); 1176 // Populate dppclk to trigger a recalculate in dml_get_voltage_level 1177 // so the phantom pipe DLG params can be assigned correctly. 1178 pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); 1179 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); 1180 1181 /* Check that vlevel requested supports pstate or not 1182 * if not, select the lowest vlevel that supports it 1183 */ 1184 for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { 1185 if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { 1186 *vlevel = i; 1187 break; 1188 } 1189 } 1190 1191 if (*vlevel < context->bw_ctx.dml.soc.num_states && 1192 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported 1193 && subvp_validate_static_schedulability(dc, context, *vlevel)) { 1194 found_supported_config = true; 1195 } else if (*vlevel < context->bw_ctx.dml.soc.num_states) { 1196 /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed. 1197 * This handles the case for SubVP + DRR, where the DRR display does not support MCLK 1198 * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp 1199 * display. 1200 */ 1201 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1202 pipe = &context->res_ctx.pipe_ctx[i]; 1203 if (pipe->stream && pipe->plane_state && !pipe->top_pipe && 1204 pipe->stream->mall_stream_config.type == SUBVP_NONE) { 1205 non_subvp_pipes++; 1206 // Use ignore_msa_timing_param flag to identify as DRR 1207 if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) { 1208 drr_pipe_found = true; 1209 drr_pipe_index = i; 1210 } 1211 } 1212 } 1213 // If there is only 1 remaining non SubVP pipe that is DRR, check static 1214 // schedulability for SubVP + DRR. 1215 if (non_subvp_pipes == 1 && drr_pipe_found) { 1216 /* find lowest vlevel that supports the config */ 1217 for (i = *vlevel; i >= 0; i--) { 1218 if (vba->ModeSupport[i][vba->maxMpcComb]) { 1219 *vlevel = i; 1220 } else { 1221 break; 1222 } 1223 } 1224 1225 found_supported_config = subvp_drr_schedulable(dc, context, 1226 &context->res_ctx.pipe_ctx[drr_pipe_index]); 1227 } 1228 } 1229 } 1230 1231 // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) 1232 // remove phantom pipes and repopulate dml pipes 1233 if (!found_supported_config) { 1234 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 1235 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; 1236 *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); 1237 1238 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); 1239 /* This may adjust vlevel and maxMpcComb */ 1240 if (*vlevel < context->bw_ctx.dml.soc.num_states) { 1241 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); 1242 vba->VoltageLevel = *vlevel; 1243 } 1244 } else { 1245 // Most populate phantom DLG params before programming hardware / timing for phantom pipe 1246 dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt); 1247 1248 /* Call validate_apply_pipe_split flags after calling DML getters for 1249 * phantom dlg params, or some of the VBA params indicating pipe split 1250 * can be overwritten by the getters. 1251 * 1252 * When setting up SubVP config, all pipes are merged before attempting to 1253 * add phantom pipes. If pipe split (ODM / MPC) is required, both the main 1254 * and phantom pipes will be split in the regular pipe splitting sequence. 1255 */ 1256 memset(split, 0, MAX_PIPES * sizeof(int)); 1257 memset(merge, 0, MAX_PIPES * sizeof(bool)); 1258 *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); 1259 vba->VoltageLevel = *vlevel; 1260 // Note: We can't apply the phantom pipes to hardware at this time. We have to wait 1261 // until driver has acquired the DMCUB lock to do it safely. 1262 } 1263 } 1264 } 1265 1266 static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) 1267 { 1268 int i; 1269 1270 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1271 if (!context->res_ctx.pipe_ctx[i].stream) 1272 continue; 1273 if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) 1274 return true; 1275 } 1276 return false; 1277 } 1278 1279 static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) 1280 { 1281 struct dc_crtc_timing patched_crtc_timing; 1282 uint32_t asic_blank_end = 0; 1283 uint32_t asic_blank_start = 0; 1284 uint32_t newVstartup = 0; 1285 1286 patched_crtc_timing = *dc_crtc_timing; 1287 1288 if (patched_crtc_timing.flags.INTERLACE == 1) { 1289 if (patched_crtc_timing.v_front_porch < 2) 1290 patched_crtc_timing.v_front_porch = 2; 1291 } else { 1292 if (patched_crtc_timing.v_front_porch < 1) 1293 patched_crtc_timing.v_front_porch = 1; 1294 } 1295 1296 /* blank_start = frame end - front porch */ 1297 asic_blank_start = patched_crtc_timing.v_total - 1298 patched_crtc_timing.v_front_porch; 1299 1300 /* blank_end = blank_start - active */ 1301 asic_blank_end = asic_blank_start - 1302 patched_crtc_timing.v_border_bottom - 1303 patched_crtc_timing.v_addressable - 1304 patched_crtc_timing.v_border_top; 1305 1306 newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); 1307 1308 *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); 1309 } 1310 1311 static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, 1312 display_e2e_pipe_params_st *pipes, 1313 int pipe_cnt, int vlevel) 1314 { 1315 int i, pipe_idx, active_hubp_count = 0; 1316 bool usr_retraining_support = false; 1317 bool unbounded_req_enabled = false; 1318 1319 dc_assert_fp_enabled(); 1320 1321 /* Writeback MCIF_WB arbitration parameters */ 1322 dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); 1323 1324 context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; 1325 context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; 1326 context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; 1327 context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; 1328 context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; 1329 context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; 1330 context->bw_ctx.bw.dcn.clk.p_state_change_support = 1331 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] 1332 != dm_dram_clock_change_unsupported; 1333 1334 /* Pstate change might not be supported by hardware, but it might be 1335 * possible with firmware driven vertical blank stretching. 1336 */ 1337 context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; 1338 1339 context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; 1340 context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); 1341 context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = context->bw_ctx.dml.vba.DTBCLKPerState[vlevel] * 1000; 1342 if (context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_fclock_change_unsupported) 1343 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false; 1344 else 1345 context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true; 1346 1347 usr_retraining_support = context->bw_ctx.dml.vba.USRRetrainingSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 1348 ASSERT(usr_retraining_support); 1349 1350 if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) 1351 context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; 1352 1353 unbounded_req_enabled = get_unbounded_request_enabled(&context->bw_ctx.dml, pipes, pipe_cnt); 1354 1355 if (unbounded_req_enabled && pipe_cnt > 1) { 1356 // Unbounded requesting should not ever be used when more than 1 pipe is enabled. 1357 ASSERT(false); 1358 unbounded_req_enabled = false; 1359 } 1360 1361 context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; 1362 context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; 1363 context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; 1364 1365 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1366 if (!context->res_ctx.pipe_ctx[i].stream) 1367 continue; 1368 if (context->res_ctx.pipe_ctx[i].plane_state) 1369 active_hubp_count++; 1370 pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, 1371 pipe_idx); 1372 pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, 1373 pipe_idx); 1374 pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, 1375 pipe_idx); 1376 pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, 1377 pipe_idx); 1378 1379 if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { 1380 // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests 1381 context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; 1382 context->res_ctx.pipe_ctx[i].unbounded_req = false; 1383 } else { 1384 context->res_ctx.pipe_ctx[i].det_buffer_size_kb = get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, 1385 pipe_idx); 1386 context->res_ctx.pipe_ctx[i].unbounded_req = unbounded_req_enabled; 1387 } 1388 1389 if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) 1390 context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; 1391 if (context->res_ctx.pipe_ctx[i].plane_state) 1392 context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; 1393 else 1394 context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0; 1395 context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; 1396 1397 context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 1398 1399 /* MALL Allocation Sizes */ 1400 /* count from active, top pipes per plane only */ 1401 if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state && 1402 (context->res_ctx.pipe_ctx[i].top_pipe == NULL || 1403 context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && 1404 context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { 1405 /* SS: all active surfaces stored in MALL */ 1406 if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { 1407 context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; 1408 1409 if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) { 1410 /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */ 1411 context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; 1412 } 1413 } else { 1414 /* SUBVP: phantom surfaces only stored in MALL */ 1415 context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; 1416 } 1417 } 1418 1419 if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) 1420 dcn20_adjust_freesync_v_startup( 1421 &context->res_ctx.pipe_ctx[i].stream->timing, 1422 &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); 1423 1424 pipe_idx++; 1425 } 1426 /* If DCN isn't making memory requests we can allow pstate change and lower clocks */ 1427 if (!active_hubp_count) { 1428 context->bw_ctx.bw.dcn.clk.socclk_khz = 0; 1429 context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; 1430 context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0; 1431 context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0; 1432 context->bw_ctx.bw.dcn.clk.dramclk_khz = 0; 1433 context->bw_ctx.bw.dcn.clk.fclk_khz = 0; 1434 context->bw_ctx.bw.dcn.clk.p_state_change_support = true; 1435 } 1436 /*save a original dppclock copy*/ 1437 context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; 1438 context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; 1439 context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz 1440 * 1000; 1441 context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz 1442 * 1000; 1443 1444 context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context); 1445 1446 context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes; 1447 1448 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1449 if (context->res_ctx.pipe_ctx[i].stream) 1450 context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[i].det_buffer_size_kb; 1451 } 1452 1453 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1454 1455 if (!context->res_ctx.pipe_ctx[i].stream) 1456 continue; 1457 1458 context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg_v2(&context->bw_ctx.dml, 1459 &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes, 1460 pipe_cnt, pipe_idx); 1461 1462 context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg_v2(&context->res_ctx.pipe_ctx[i].rq_regs, 1463 &context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 1464 pipe_idx++; 1465 } 1466 } 1467 1468 static struct pipe_ctx *dcn32_find_split_pipe( 1469 struct dc *dc, 1470 struct dc_state *context, 1471 int old_index) 1472 { 1473 struct pipe_ctx *pipe = NULL; 1474 int i; 1475 1476 if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { 1477 pipe = &context->res_ctx.pipe_ctx[old_index]; 1478 pipe->pipe_idx = old_index; 1479 } 1480 1481 if (!pipe) 1482 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { 1483 if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL 1484 && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { 1485 if (context->res_ctx.pipe_ctx[i].stream == NULL) { 1486 pipe = &context->res_ctx.pipe_ctx[i]; 1487 pipe->pipe_idx = i; 1488 break; 1489 } 1490 } 1491 } 1492 1493 /* 1494 * May need to fix pipes getting tossed from 1 opp to another on flip 1495 * Add for debugging transient underflow during topology updates: 1496 * ASSERT(pipe); 1497 */ 1498 if (!pipe) 1499 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { 1500 if (context->res_ctx.pipe_ctx[i].stream == NULL) { 1501 pipe = &context->res_ctx.pipe_ctx[i]; 1502 pipe->pipe_idx = i; 1503 break; 1504 } 1505 } 1506 1507 return pipe; 1508 } 1509 1510 static bool dcn32_split_stream_for_mpc_or_odm( 1511 const struct dc *dc, 1512 struct resource_context *res_ctx, 1513 struct pipe_ctx *pri_pipe, 1514 struct pipe_ctx *sec_pipe, 1515 bool odm) 1516 { 1517 int pipe_idx = sec_pipe->pipe_idx; 1518 const struct resource_pool *pool = dc->res_pool; 1519 1520 DC_LOGGER_INIT(dc->ctx->logger); 1521 1522 if (odm && pri_pipe->plane_state) { 1523 /* ODM + window MPO, where MPO window is on left half only */ 1524 if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <= 1525 pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) { 1526 1527 DC_LOG_SCALER("%s - ODM + window MPO(left). pri_pipe:%d\n", 1528 __func__, 1529 pri_pipe->pipe_idx); 1530 return true; 1531 } 1532 1533 /* ODM + window MPO, where MPO window is on right half only */ 1534 if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) { 1535 1536 DC_LOG_SCALER("%s - ODM + window MPO(right). pri_pipe:%d\n", 1537 __func__, 1538 pri_pipe->pipe_idx); 1539 return true; 1540 } 1541 } 1542 1543 *sec_pipe = *pri_pipe; 1544 1545 sec_pipe->pipe_idx = pipe_idx; 1546 sec_pipe->plane_res.mi = pool->mis[pipe_idx]; 1547 sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; 1548 sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; 1549 sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; 1550 sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; 1551 sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; 1552 sec_pipe->stream_res.dsc = NULL; 1553 if (odm) { 1554 if (pri_pipe->next_odm_pipe) { 1555 ASSERT(pri_pipe->next_odm_pipe != sec_pipe); 1556 sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; 1557 sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; 1558 } 1559 if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { 1560 pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; 1561 sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; 1562 } 1563 if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { 1564 pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; 1565 sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; 1566 } 1567 pri_pipe->next_odm_pipe = sec_pipe; 1568 sec_pipe->prev_odm_pipe = pri_pipe; 1569 ASSERT(sec_pipe->top_pipe == NULL); 1570 1571 if (!sec_pipe->top_pipe) 1572 sec_pipe->stream_res.opp = pool->opps[pipe_idx]; 1573 else 1574 sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; 1575 if (sec_pipe->stream->timing.flags.DSC == 1) { 1576 dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); 1577 ASSERT(sec_pipe->stream_res.dsc); 1578 if (sec_pipe->stream_res.dsc == NULL) 1579 return false; 1580 } 1581 } else { 1582 if (pri_pipe->bottom_pipe) { 1583 ASSERT(pri_pipe->bottom_pipe != sec_pipe); 1584 sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; 1585 sec_pipe->bottom_pipe->top_pipe = sec_pipe; 1586 } 1587 pri_pipe->bottom_pipe = sec_pipe; 1588 sec_pipe->top_pipe = pri_pipe; 1589 1590 ASSERT(pri_pipe->plane_state); 1591 } 1592 1593 return true; 1594 } 1595 1596 bool dcn32_internal_validate_bw(struct dc *dc, 1597 struct dc_state *context, 1598 display_e2e_pipe_params_st *pipes, 1599 int *pipe_cnt_out, 1600 int *vlevel_out, 1601 bool fast_validate) 1602 { 1603 bool out = false; 1604 bool repopulate_pipes = false; 1605 int split[MAX_PIPES] = { 0 }; 1606 bool merge[MAX_PIPES] = { false }; 1607 bool newly_split[MAX_PIPES] = { false }; 1608 int pipe_cnt, i, pipe_idx; 1609 int vlevel = context->bw_ctx.dml.soc.num_states; 1610 struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 1611 1612 dc_assert_fp_enabled(); 1613 1614 ASSERT(pipes); 1615 if (!pipes) 1616 return false; 1617 1618 // For each full update, remove all existing phantom pipes first 1619 dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate); 1620 1621 dc->res_pool->funcs->update_soc_for_wm_a(dc, context); 1622 1623 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); 1624 1625 if (!pipe_cnt) { 1626 out = true; 1627 goto validate_out; 1628 } 1629 1630 dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); 1631 context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); 1632 1633 if (!fast_validate) 1634 dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); 1635 1636 if (fast_validate || 1637 (dc->debug.dml_disallow_alternate_prefetch_modes && 1638 (vlevel == context->bw_ctx.dml.soc.num_states || 1639 vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { 1640 /* 1641 * If dml_disallow_alternate_prefetch_modes is false, then we have already 1642 * tried alternate prefetch modes during full validation. 1643 * 1644 * If mode is unsupported or there is no p-state support, then 1645 * fall back to favouring voltage. 1646 * 1647 * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try 1648 * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) 1649 */ 1650 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 1651 dm_prefetch_support_none; 1652 1653 context->bw_ctx.dml.validate_max_state = fast_validate; 1654 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1655 1656 context->bw_ctx.dml.validate_max_state = false; 1657 1658 if (vlevel < context->bw_ctx.dml.soc.num_states) { 1659 memset(split, 0, sizeof(split)); 1660 memset(merge, 0, sizeof(merge)); 1661 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); 1662 // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML 1663 vba->VoltageLevel = vlevel; 1664 } 1665 } 1666 1667 dml_log_mode_support_params(&context->bw_ctx.dml); 1668 1669 if (vlevel == context->bw_ctx.dml.soc.num_states) 1670 goto validate_fail; 1671 1672 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1673 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1674 struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; 1675 1676 if (!pipe->stream) 1677 continue; 1678 1679 if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled 1680 && !dc->config.enable_windowed_mpo_odm 1681 && pipe->plane_state && mpo_pipe 1682 && memcmp(&mpo_pipe->plane_res.scl_data.recout, 1683 &pipe->plane_res.scl_data.recout, 1684 sizeof(struct rect)) != 0) { 1685 ASSERT(mpo_pipe->plane_state != pipe->plane_state); 1686 goto validate_fail; 1687 } 1688 pipe_idx++; 1689 } 1690 1691 /* merge pipes if necessary */ 1692 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1693 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1694 1695 /*skip pipes that don't need merging*/ 1696 if (!merge[i]) 1697 continue; 1698 1699 /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ 1700 if (pipe->prev_odm_pipe) { 1701 /*split off odm pipe*/ 1702 pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; 1703 if (pipe->next_odm_pipe) 1704 pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; 1705 1706 /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/ 1707 if (pipe->bottom_pipe) { 1708 if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) { 1709 /*MPC split rules will handle this case*/ 1710 pipe->bottom_pipe->top_pipe = NULL; 1711 } else { 1712 /* when merging an ODM pipes, the bottom MPC pipe must now point to 1713 * the previous ODM pipe and its associated stream assets 1714 */ 1715 if (pipe->prev_odm_pipe->bottom_pipe) { 1716 /* 3 plane MPO*/ 1717 pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe; 1718 pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe; 1719 } else { 1720 /* 2 plane MPO*/ 1721 pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe; 1722 pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe; 1723 } 1724 1725 memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource)); 1726 } 1727 } 1728 1729 if (pipe->top_pipe) { 1730 pipe->top_pipe->bottom_pipe = NULL; 1731 } 1732 1733 pipe->bottom_pipe = NULL; 1734 pipe->next_odm_pipe = NULL; 1735 pipe->plane_state = NULL; 1736 pipe->stream = NULL; 1737 pipe->top_pipe = NULL; 1738 pipe->prev_odm_pipe = NULL; 1739 if (pipe->stream_res.dsc) 1740 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); 1741 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 1742 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 1743 memset(&pipe->link_res, 0, sizeof(pipe->link_res)); 1744 repopulate_pipes = true; 1745 } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { 1746 struct pipe_ctx *top_pipe = pipe->top_pipe; 1747 struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; 1748 1749 top_pipe->bottom_pipe = bottom_pipe; 1750 if (bottom_pipe) 1751 bottom_pipe->top_pipe = top_pipe; 1752 1753 pipe->top_pipe = NULL; 1754 pipe->bottom_pipe = NULL; 1755 pipe->plane_state = NULL; 1756 pipe->stream = NULL; 1757 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 1758 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 1759 memset(&pipe->link_res, 0, sizeof(pipe->link_res)); 1760 repopulate_pipes = true; 1761 } else 1762 ASSERT(0); /* Should never try to merge master pipe */ 1763 1764 } 1765 1766 for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { 1767 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1768 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1769 struct pipe_ctx *hsplit_pipe = NULL; 1770 bool odm; 1771 int old_index = -1; 1772 1773 if (!pipe->stream || newly_split[i]) 1774 continue; 1775 1776 pipe_idx++; 1777 odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; 1778 1779 if (!pipe->plane_state && !odm) 1780 continue; 1781 1782 if (split[i]) { 1783 if (odm) { 1784 if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) 1785 old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; 1786 else if (old_pipe->next_odm_pipe) 1787 old_index = old_pipe->next_odm_pipe->pipe_idx; 1788 } else { 1789 if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && 1790 old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) 1791 old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; 1792 else if (old_pipe->bottom_pipe && 1793 old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) 1794 old_index = old_pipe->bottom_pipe->pipe_idx; 1795 } 1796 hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index); 1797 ASSERT(hsplit_pipe); 1798 if (!hsplit_pipe) 1799 goto validate_fail; 1800 1801 if (!dcn32_split_stream_for_mpc_or_odm( 1802 dc, &context->res_ctx, 1803 pipe, hsplit_pipe, odm)) 1804 goto validate_fail; 1805 1806 newly_split[hsplit_pipe->pipe_idx] = true; 1807 repopulate_pipes = true; 1808 } 1809 if (split[i] == 4) { 1810 struct pipe_ctx *pipe_4to1; 1811 1812 if (odm && old_pipe->next_odm_pipe) 1813 old_index = old_pipe->next_odm_pipe->pipe_idx; 1814 else if (!odm && old_pipe->bottom_pipe && 1815 old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) 1816 old_index = old_pipe->bottom_pipe->pipe_idx; 1817 else 1818 old_index = -1; 1819 pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); 1820 ASSERT(pipe_4to1); 1821 if (!pipe_4to1) 1822 goto validate_fail; 1823 if (!dcn32_split_stream_for_mpc_or_odm( 1824 dc, &context->res_ctx, 1825 pipe, pipe_4to1, odm)) 1826 goto validate_fail; 1827 newly_split[pipe_4to1->pipe_idx] = true; 1828 1829 if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe 1830 && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) 1831 old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; 1832 else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && 1833 old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && 1834 old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) 1835 old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; 1836 else 1837 old_index = -1; 1838 pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); 1839 ASSERT(pipe_4to1); 1840 if (!pipe_4to1) 1841 goto validate_fail; 1842 if (!dcn32_split_stream_for_mpc_or_odm( 1843 dc, &context->res_ctx, 1844 hsplit_pipe, pipe_4to1, odm)) 1845 goto validate_fail; 1846 newly_split[pipe_4to1->pipe_idx] = true; 1847 } 1848 if (odm) 1849 dcn20_build_mapped_resource(dc, context, pipe->stream); 1850 } 1851 1852 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1853 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1854 1855 if (pipe->plane_state) { 1856 if (!resource_build_scaling_params(pipe)) 1857 goto validate_fail; 1858 } 1859 } 1860 1861 /* Actual dsc count per stream dsc validation*/ 1862 if (!dcn20_validate_dsc(dc, context)) { 1863 vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; 1864 goto validate_fail; 1865 } 1866 1867 if (repopulate_pipes) { 1868 int flag_max_mpc_comb = vba->maxMpcComb; 1869 int flag_vlevel = vlevel; 1870 int i; 1871 1872 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); 1873 1874 /* repopulate_pipes = 1 means the pipes were either split or merged. In this case 1875 * we have to re-calculate the DET allocation and run through DML once more to 1876 * ensure all the params are calculated correctly. We do not need to run the 1877 * pipe split check again after this call (pipes are already split / merged). 1878 * */ 1879 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 1880 dm_prefetch_support_uclk_fclk_and_stutter_if_possible; 1881 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1882 if (vlevel == context->bw_ctx.dml.soc.num_states) { 1883 /* failed after DET size changes */ 1884 goto validate_fail; 1885 } else if (flag_max_mpc_comb == 0 && 1886 flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) { 1887 /* check the context constructed with pipe split flags is still valid*/ 1888 bool flags_valid = false; 1889 for (i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { 1890 if (vba->ModeSupport[i][flag_max_mpc_comb]) { 1891 vba->maxMpcComb = flag_max_mpc_comb; 1892 vba->VoltageLevel = i; 1893 vlevel = i; 1894 flags_valid = true; 1895 } 1896 } 1897 1898 /* this should never happen */ 1899 if (!flags_valid) 1900 goto validate_fail; 1901 } 1902 } 1903 *vlevel_out = vlevel; 1904 *pipe_cnt_out = pipe_cnt; 1905 1906 out = true; 1907 goto validate_out; 1908 1909 validate_fail: 1910 out = false; 1911 1912 validate_out: 1913 return out; 1914 } 1915 1916 1917 void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, 1918 display_e2e_pipe_params_st *pipes, 1919 int pipe_cnt, 1920 int vlevel) 1921 { 1922 int i, pipe_idx, vlevel_temp = 0; 1923 double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz; 1924 double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 1925 double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation; 1926 bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 1927 dm_dram_clock_change_unsupported; 1928 unsigned int dummy_latency_index = 0; 1929 int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; 1930 unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; 1931 bool subvp_in_use = dcn32_subvp_in_use(dc, context); 1932 unsigned int min_dram_speed_mts_margin; 1933 bool need_fclk_lat_as_dummy = false; 1934 bool is_subvp_p_drr = false; 1935 struct dc_stream_state *fpo_candidate_stream = NULL; 1936 1937 dc_assert_fp_enabled(); 1938 1939 /* need to find dummy latency index for subvp */ 1940 if (subvp_in_use) { 1941 /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */ 1942 if (!pstate_en) { 1943 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; 1944 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter; 1945 pstate_en = true; 1946 is_subvp_p_drr = true; 1947 } 1948 dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc, 1949 context, pipes, pipe_cnt, vlevel); 1950 1951 /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so prefetch is 1952 * scheduled correctly to account for dummy pstate. 1953 */ 1954 if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) { 1955 need_fclk_lat_as_dummy = true; 1956 context->bw_ctx.dml.soc.fclk_change_latency_us = 1957 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; 1958 } 1959 context->bw_ctx.dml.soc.dram_clock_change_latency_us = 1960 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; 1961 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); 1962 maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; 1963 if (is_subvp_p_drr) { 1964 context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; 1965 } 1966 } 1967 1968 context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; 1969 for (i = 0; i < context->stream_count; i++) { 1970 if (context->streams[i]) 1971 context->streams[i]->fpo_in_use = false; 1972 } 1973 1974 if (!pstate_en || (!dc->debug.disable_fpo_optimizations && 1975 pstate_en && vlevel != 0)) { 1976 /* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */ 1977 fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context); 1978 if (fpo_candidate_stream) { 1979 fpo_candidate_stream->fpo_in_use = true; 1980 context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; 1981 } 1982 1983 if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { 1984 dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc, 1985 context, pipes, pipe_cnt, vlevel); 1986 1987 /* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch 1988 * we reinstate the original dram_clock_change_latency_us on the context 1989 * and all variables that may have changed up to this point, except the 1990 * newly found dummy_latency_index 1991 */ 1992 context->bw_ctx.dml.soc.dram_clock_change_latency_us = 1993 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; 1994 /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so 1995 * prefetch is scheduled correctly to account for dummy pstate. 1996 */ 1997 if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) { 1998 need_fclk_lat_as_dummy = true; 1999 context->bw_ctx.dml.soc.fclk_change_latency_us = 2000 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; 2001 } 2002 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false); 2003 if (vlevel_temp < vlevel) { 2004 vlevel = vlevel_temp; 2005 maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; 2006 dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; 2007 pstate_en = true; 2008 } else { 2009 /* Restore FCLK latency and re-run validation to go back to original validation 2010 * output if we find that enabling FPO does not give us any benefit (i.e. lower 2011 * voltage level) 2012 */ 2013 context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; 2014 for (i = 0; i < context->stream_count; i++) { 2015 if (context->streams[i]) 2016 context->streams[i]->fpo_in_use = false; 2017 } 2018 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; 2019 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); 2020 } 2021 } 2022 } 2023 2024 /* Set B: 2025 * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present, 2026 * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark 2027 * calculations to cover bootup clocks. 2028 * DCFCLK: soc.clock_limits[2] when available 2029 * UCLK: soc.clock_limits[2] when available 2030 */ 2031 if (dcn3_2_soc.num_states > 2) { 2032 vlevel_temp = 2; 2033 dcfclk = dcn3_2_soc.clock_limits[2].dcfclk_mhz; 2034 } else 2035 dcfclk = 615; //DCFCLK Vmin_lv 2036 2037 pipes[0].clks_cfg.voltage = vlevel_temp; 2038 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 2039 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; 2040 2041 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { 2042 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; 2043 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us; 2044 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; 2045 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; 2046 } 2047 context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2048 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2049 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2050 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2051 context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2052 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2053 context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2054 context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2055 context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2056 context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2057 2058 /* Set D: 2059 * All clocks min. 2060 * DCFCLK: Min, as reported by PM FW when available 2061 * UCLK : Min, as reported by PM FW when available 2062 * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) 2063 */ 2064 2065 if (dcn3_2_soc.num_states > 2) { 2066 vlevel_temp = 0; 2067 dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; 2068 } else 2069 dcfclk = 615; //DCFCLK Vmin_lv 2070 2071 pipes[0].clks_cfg.voltage = vlevel_temp; 2072 pipes[0].clks_cfg.dcfclk_mhz = dcfclk; 2073 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; 2074 2075 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { 2076 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; 2077 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us; 2078 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; 2079 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; 2080 } 2081 context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2082 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2083 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2084 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2085 context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2086 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2087 context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2088 context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2089 context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2090 context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2091 2092 /* Set C, for Dummy P-State: 2093 * All clocks min. 2094 * DCFCLK: Min, as reported by PM FW, when available 2095 * UCLK : Min, as reported by PM FW, when available 2096 * pstate latency as per UCLK state dummy pstate latency 2097 */ 2098 2099 // For Set A and Set C use values from validation 2100 pipes[0].clks_cfg.voltage = vlevel; 2101 pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation; 2102 pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; 2103 2104 if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { 2105 pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching; 2106 } 2107 2108 if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { 2109 min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; 2110 min_dram_speed_mts_margin = 160; 2111 2112 context->bw_ctx.dml.soc.dram_clock_change_latency_us = 2113 dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us; 2114 2115 if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] == 2116 dm_dram_clock_change_unsupported) { 2117 int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1; 2118 2119 min_dram_speed_mts = 2120 dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16; 2121 } 2122 2123 if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) { 2124 /* find largest table entry that is lower than dram speed, 2125 * but lower than DPM0 still uses DPM0 2126 */ 2127 for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--) 2128 if (min_dram_speed_mts + min_dram_speed_mts_margin > 2129 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts) 2130 break; 2131 } 2132 2133 context->bw_ctx.dml.soc.dram_clock_change_latency_us = 2134 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; 2135 2136 context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us; 2137 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; 2138 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; 2139 } 2140 2141 context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2142 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2143 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2144 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2145 context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2146 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2147 context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2148 context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2149 /* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state. 2150 * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM 2151 * value. 2152 */ 2153 context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2154 context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2155 2156 if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) { 2157 /* The only difference between A and C is p-state latency, if p-state is not supported 2158 * with full p-state latency we want to calculate DLG based on dummy p-state latency, 2159 * Set A p-state watermark set to 0 on DCN30, when p-state unsupported, for now keep as DCN30. 2160 */ 2161 context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; 2162 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; 2163 /* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case 2164 * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported 2165 */ 2166 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2167 } else { 2168 /* Set A: 2169 * All clocks min. 2170 * DCFCLK: Min, as reported by PM FW, when available 2171 * UCLK: Min, as reported by PM FW, when available 2172 */ 2173 2174 /* For set A set the correct latency values (i.e. non-dummy values) unconditionally 2175 */ 2176 context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; 2177 context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; 2178 context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; 2179 2180 context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2181 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2182 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2183 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2184 context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2185 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2186 context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2187 context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2188 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2189 context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; 2190 } 2191 2192 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 2193 if (!context->res_ctx.pipe_ctx[i].stream) 2194 continue; 2195 2196 pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); 2197 pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 2198 2199 if (dc->config.forced_clocks) { 2200 pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; 2201 pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; 2202 } 2203 if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) 2204 pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; 2205 if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) 2206 pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; 2207 2208 pipe_idx++; 2209 } 2210 2211 context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; 2212 2213 /* for proper prefetch calculations, if dummy lat > fclk lat, use fclk lat = dummy lat */ 2214 if (need_fclk_lat_as_dummy) 2215 context->bw_ctx.dml.soc.fclk_change_latency_us = 2216 dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; 2217 2218 dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); 2219 2220 if (!pstate_en) 2221 /* Restore full p-state latency */ 2222 context->bw_ctx.dml.soc.dram_clock_change_latency_us = 2223 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; 2224 2225 /* revert fclk lat changes if required */ 2226 if (need_fclk_lat_as_dummy) 2227 context->bw_ctx.dml.soc.fclk_change_latency_us = 2228 dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; 2229 } 2230 2231 static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, 2232 unsigned int *optimal_dcfclk, 2233 unsigned int *optimal_fclk) 2234 { 2235 double bw_from_dram, bw_from_dram1, bw_from_dram2; 2236 2237 bw_from_dram1 = uclk_mts * dcn3_2_soc.num_chans * 2238 dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_dram_bw_use_normal_percent / 100); 2239 bw_from_dram2 = uclk_mts * dcn3_2_soc.num_chans * 2240 dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100); 2241 2242 bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; 2243 2244 if (optimal_fclk) 2245 *optimal_fclk = bw_from_dram / 2246 (dcn3_2_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100)); 2247 2248 if (optimal_dcfclk) 2249 *optimal_dcfclk = bw_from_dram / 2250 (dcn3_2_soc.return_bus_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100)); 2251 } 2252 2253 static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries, 2254 unsigned int index) 2255 { 2256 int i; 2257 2258 if (*num_entries == 0) 2259 return; 2260 2261 for (i = index; i < *num_entries - 1; i++) { 2262 table[i] = table[i + 1]; 2263 } 2264 memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st)); 2265 } 2266 2267 void dcn32_patch_dpm_table(struct clk_bw_params *bw_params) 2268 { 2269 int i; 2270 unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, 2271 max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; 2272 2273 for (i = 0; i < MAX_NUM_DPM_LVL; i++) { 2274 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) 2275 max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; 2276 if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) 2277 max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; 2278 if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) 2279 max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; 2280 if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) 2281 max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; 2282 if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) 2283 max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; 2284 if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) 2285 max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; 2286 if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) 2287 max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; 2288 } 2289 2290 /* Scan through clock values we currently have and if they are 0, 2291 * then populate it with dcn3_2_soc.clock_limits[] value. 2292 * 2293 * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being 2294 * 0, will cause it to skip building the clock table. 2295 */ 2296 if (max_dcfclk_mhz == 0) 2297 bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz; 2298 if (max_dispclk_mhz == 0) 2299 bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz; 2300 if (max_dtbclk_mhz == 0) 2301 bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz; 2302 if (max_uclk_mhz == 0) 2303 bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16; 2304 } 2305 2306 static int build_synthetic_soc_states(struct clk_bw_params *bw_params, 2307 struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries) 2308 { 2309 int i, j; 2310 struct _vcs_dpi_voltage_scaling_st entry = {0}; 2311 2312 unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, 2313 max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0; 2314 2315 unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299; 2316 2317 static const unsigned int num_dcfclk_stas = 5; 2318 unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; 2319 2320 unsigned int num_uclk_dpms = 0; 2321 unsigned int num_fclk_dpms = 0; 2322 unsigned int num_dcfclk_dpms = 0; 2323 2324 for (i = 0; i < MAX_NUM_DPM_LVL; i++) { 2325 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) 2326 max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; 2327 if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz) 2328 max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; 2329 if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz) 2330 max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz; 2331 if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) 2332 max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; 2333 if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) 2334 max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; 2335 if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) 2336 max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; 2337 if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz) 2338 max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; 2339 2340 if (bw_params->clk_table.entries[i].memclk_mhz > 0) 2341 num_uclk_dpms++; 2342 if (bw_params->clk_table.entries[i].fclk_mhz > 0) 2343 num_fclk_dpms++; 2344 if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) 2345 num_dcfclk_dpms++; 2346 } 2347 2348 if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz) 2349 min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz; 2350 2351 if (!max_dcfclk_mhz || !max_dispclk_mhz || !max_dtbclk_mhz) 2352 return -1; 2353 2354 if (max_dppclk_mhz == 0) 2355 max_dppclk_mhz = max_dispclk_mhz; 2356 2357 if (max_fclk_mhz == 0) 2358 max_fclk_mhz = max_dcfclk_mhz * dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / dcn3_2_soc.pct_ideal_fabric_bw_after_urgent; 2359 2360 if (max_phyclk_mhz == 0) 2361 max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; 2362 2363 *num_entries = 0; 2364 entry.dispclk_mhz = max_dispclk_mhz; 2365 entry.dscclk_mhz = max_dispclk_mhz / 3; 2366 entry.dppclk_mhz = max_dppclk_mhz; 2367 entry.dtbclk_mhz = max_dtbclk_mhz; 2368 entry.phyclk_mhz = max_phyclk_mhz; 2369 entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; 2370 entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; 2371 2372 // Insert all the DCFCLK STAs 2373 for (i = 0; i < num_dcfclk_stas; i++) { 2374 entry.dcfclk_mhz = dcfclk_sta_targets[i]; 2375 entry.fabricclk_mhz = 0; 2376 entry.dram_speed_mts = 0; 2377 2378 insert_entry_into_table_sorted(table, num_entries, &entry); 2379 } 2380 2381 // Insert the max DCFCLK 2382 entry.dcfclk_mhz = max_dcfclk_mhz; 2383 entry.fabricclk_mhz = 0; 2384 entry.dram_speed_mts = 0; 2385 2386 insert_entry_into_table_sorted(table, num_entries, &entry); 2387 2388 // Insert the UCLK DPMS 2389 for (i = 0; i < num_uclk_dpms; i++) { 2390 entry.dcfclk_mhz = 0; 2391 entry.fabricclk_mhz = 0; 2392 entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16; 2393 2394 insert_entry_into_table_sorted(table, num_entries, &entry); 2395 } 2396 2397 // If FCLK is coarse grained, insert individual DPMs. 2398 if (num_fclk_dpms > 2) { 2399 for (i = 0; i < num_fclk_dpms; i++) { 2400 entry.dcfclk_mhz = 0; 2401 entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz; 2402 entry.dram_speed_mts = 0; 2403 2404 insert_entry_into_table_sorted(table, num_entries, &entry); 2405 } 2406 } 2407 // If FCLK fine grained, only insert max 2408 else { 2409 entry.dcfclk_mhz = 0; 2410 entry.fabricclk_mhz = max_fclk_mhz; 2411 entry.dram_speed_mts = 0; 2412 2413 insert_entry_into_table_sorted(table, num_entries, &entry); 2414 } 2415 2416 // At this point, the table contains all "points of interest" based on 2417 // DPMs from PMFW, and STAs. Table is sorted by BW, and all clock 2418 // ratios (by derate, are exact). 2419 2420 // Remove states that require higher clocks than are supported 2421 for (i = *num_entries - 1; i >= 0 ; i--) { 2422 if (table[i].dcfclk_mhz > max_dcfclk_mhz || 2423 table[i].fabricclk_mhz > max_fclk_mhz || 2424 table[i].dram_speed_mts > max_uclk_mhz * 16) 2425 remove_entry_from_table_at_index(table, num_entries, i); 2426 } 2427 2428 // At this point, the table only contains supported points of interest 2429 // it could be used as is, but some states may be redundant due to 2430 // coarse grained nature of some clocks, so we want to round up to 2431 // coarse grained DPMs and remove duplicates. 2432 2433 // Round up UCLKs 2434 for (i = *num_entries - 1; i >= 0 ; i--) { 2435 for (j = 0; j < num_uclk_dpms; j++) { 2436 if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) { 2437 table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16; 2438 break; 2439 } 2440 } 2441 } 2442 2443 // If FCLK is coarse grained, round up to next DPMs 2444 if (num_fclk_dpms > 2) { 2445 for (i = *num_entries - 1; i >= 0 ; i--) { 2446 for (j = 0; j < num_fclk_dpms; j++) { 2447 if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) { 2448 table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz; 2449 break; 2450 } 2451 } 2452 } 2453 } 2454 // Otherwise, round up to minimum. 2455 else { 2456 for (i = *num_entries - 1; i >= 0 ; i--) { 2457 if (table[i].fabricclk_mhz < min_fclk_mhz) { 2458 table[i].fabricclk_mhz = min_fclk_mhz; 2459 } 2460 } 2461 } 2462 2463 // Round DCFCLKs up to minimum 2464 for (i = *num_entries - 1; i >= 0 ; i--) { 2465 if (table[i].dcfclk_mhz < min_dcfclk_mhz) { 2466 table[i].dcfclk_mhz = min_dcfclk_mhz; 2467 } 2468 } 2469 2470 // Remove duplicate states, note duplicate states are always neighbouring since table is sorted. 2471 i = 0; 2472 while (i < *num_entries - 1) { 2473 if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz && 2474 table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz && 2475 table[i].dram_speed_mts == table[i + 1].dram_speed_mts) 2476 remove_entry_from_table_at_index(table, num_entries, i + 1); 2477 else 2478 i++; 2479 } 2480 2481 // Fix up the state indicies 2482 for (i = *num_entries - 1; i >= 0 ; i--) { 2483 table[i].state = i; 2484 } 2485 2486 return 0; 2487 } 2488 2489 /* 2490 * dcn32_update_bw_bounding_box 2491 * 2492 * This would override some dcn3_2 ip_or_soc initial parameters hardcoded from 2493 * spreadsheet with actual values as per dGPU SKU: 2494 * - with passed few options from dc->config 2495 * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might 2496 * need to get it from PM FW) 2497 * - with passed latency values (passed in ns units) in dc-> bb override for 2498 * debugging purposes 2499 * - with passed latencies from VBIOS (in 100_ns units) if available for 2500 * certain dGPU SKU 2501 * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU 2502 * of the same ASIC) 2503 * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM 2504 * FW for different clocks (which might differ for certain dGPU SKU of the 2505 * same ASIC) 2506 */ 2507 void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 2508 { 2509 dc_assert_fp_enabled(); 2510 2511 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 2512 /* Overrides from dc->config options */ 2513 dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk; 2514 2515 /* Override from passed dc->bb_overrides if available*/ 2516 if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns 2517 && dc->bb_overrides.sr_exit_time_ns) { 2518 dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; 2519 } 2520 2521 if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000) 2522 != dc->bb_overrides.sr_enter_plus_exit_time_ns 2523 && dc->bb_overrides.sr_enter_plus_exit_time_ns) { 2524 dcn3_2_soc.sr_enter_plus_exit_time_us = 2525 dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; 2526 } 2527 2528 if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns 2529 && dc->bb_overrides.urgent_latency_ns) { 2530 dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; 2531 dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0; 2532 } 2533 2534 if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000) 2535 != dc->bb_overrides.dram_clock_change_latency_ns 2536 && dc->bb_overrides.dram_clock_change_latency_ns) { 2537 dcn3_2_soc.dram_clock_change_latency_us = 2538 dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; 2539 } 2540 2541 if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000) 2542 != dc->bb_overrides.fclk_clock_change_latency_ns 2543 && dc->bb_overrides.fclk_clock_change_latency_ns) { 2544 dcn3_2_soc.fclk_change_latency_us = 2545 dc->bb_overrides.fclk_clock_change_latency_ns / 1000; 2546 } 2547 2548 if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000) 2549 != dc->bb_overrides.dummy_clock_change_latency_ns 2550 && dc->bb_overrides.dummy_clock_change_latency_ns) { 2551 dcn3_2_soc.dummy_pstate_latency_us = 2552 dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0; 2553 } 2554 2555 /* Override from VBIOS if VBIOS bb_info available */ 2556 if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { 2557 struct bp_soc_bb_info bb_info = {0}; 2558 2559 if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { 2560 if (bb_info.dram_clock_change_latency_100ns > 0) 2561 dcn3_2_soc.dram_clock_change_latency_us = 2562 bb_info.dram_clock_change_latency_100ns * 10; 2563 2564 if (bb_info.dram_sr_enter_exit_latency_100ns > 0) 2565 dcn3_2_soc.sr_enter_plus_exit_time_us = 2566 bb_info.dram_sr_enter_exit_latency_100ns * 10; 2567 2568 if (bb_info.dram_sr_exit_latency_100ns > 0) 2569 dcn3_2_soc.sr_exit_time_us = 2570 bb_info.dram_sr_exit_latency_100ns * 10; 2571 } 2572 } 2573 2574 /* Override from VBIOS for num_chan */ 2575 if (dc->ctx->dc_bios->vram_info.num_chans) { 2576 dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; 2577 dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, 2578 dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); 2579 } 2580 2581 if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) 2582 dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; 2583 } 2584 2585 /* DML DSC delay factor workaround */ 2586 dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; 2587 2588 dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0; 2589 2590 /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ 2591 dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; 2592 dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; 2593 2594 /* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */ 2595 if ((!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) && (bw_params->clk_table.entries[0].memclk_mhz)) { 2596 if (dc->debug.use_legacy_soc_bb_mechanism) { 2597 unsigned int i = 0, j = 0, num_states = 0; 2598 2599 unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; 2600 unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; 2601 unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; 2602 unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; 2603 unsigned int min_dcfclk = UINT_MAX; 2604 /* Set 199 as first value in STA target array to have a minimum DCFCLK value. 2605 * For DCN32 we set min to 199 so minimum FCLK DPM0 (300Mhz can be achieved) */ 2606 unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564}; 2607 unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0; 2608 unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; 2609 2610 for (i = 0; i < MAX_NUM_DPM_LVL; i++) { 2611 if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) 2612 max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; 2613 if (bw_params->clk_table.entries[i].dcfclk_mhz != 0 && 2614 bw_params->clk_table.entries[i].dcfclk_mhz < min_dcfclk) 2615 min_dcfclk = bw_params->clk_table.entries[i].dcfclk_mhz; 2616 if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) 2617 max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; 2618 if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) 2619 max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; 2620 if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) 2621 max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; 2622 } 2623 if (min_dcfclk > dcfclk_sta_targets[0]) 2624 dcfclk_sta_targets[0] = min_dcfclk; 2625 if (!max_dcfclk_mhz) 2626 max_dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz; 2627 if (!max_dispclk_mhz) 2628 max_dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz; 2629 if (!max_dppclk_mhz) 2630 max_dppclk_mhz = dcn3_2_soc.clock_limits[0].dppclk_mhz; 2631 if (!max_phyclk_mhz) 2632 max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz; 2633 2634 if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { 2635 // If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array 2636 dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; 2637 num_dcfclk_sta_targets++; 2638 } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { 2639 // If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates 2640 for (i = 0; i < num_dcfclk_sta_targets; i++) { 2641 if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { 2642 dcfclk_sta_targets[i] = max_dcfclk_mhz; 2643 break; 2644 } 2645 } 2646 // Update size of array since we "removed" duplicates 2647 num_dcfclk_sta_targets = i + 1; 2648 } 2649 2650 num_uclk_states = bw_params->clk_table.num_entries; 2651 2652 // Calculate optimal dcfclk for each uclk 2653 for (i = 0; i < num_uclk_states; i++) { 2654 dcn32_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, 2655 &optimal_dcfclk_for_uclk[i], NULL); 2656 if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { 2657 optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; 2658 } 2659 } 2660 2661 // Calculate optimal uclk for each dcfclk sta target 2662 for (i = 0; i < num_dcfclk_sta_targets; i++) { 2663 for (j = 0; j < num_uclk_states; j++) { 2664 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { 2665 optimal_uclk_for_dcfclk_sta_targets[i] = 2666 bw_params->clk_table.entries[j].memclk_mhz * 16; 2667 break; 2668 } 2669 } 2670 } 2671 2672 i = 0; 2673 j = 0; 2674 // create the final dcfclk and uclk table 2675 while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { 2676 if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { 2677 dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; 2678 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; 2679 } else { 2680 if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { 2681 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; 2682 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; 2683 } else { 2684 j = num_uclk_states; 2685 } 2686 } 2687 } 2688 2689 while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { 2690 dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; 2691 dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; 2692 } 2693 2694 while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && 2695 optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { 2696 dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; 2697 dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; 2698 } 2699 2700 dcn3_2_soc.num_states = num_states; 2701 for (i = 0; i < dcn3_2_soc.num_states; i++) { 2702 dcn3_2_soc.clock_limits[i].state = i; 2703 dcn3_2_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; 2704 dcn3_2_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; 2705 2706 /* Fill all states with max values of all these clocks */ 2707 dcn3_2_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; 2708 dcn3_2_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; 2709 dcn3_2_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; 2710 dcn3_2_soc.clock_limits[i].dscclk_mhz = max_dispclk_mhz / 3; 2711 2712 /* Populate from bw_params for DTBCLK, SOCCLK */ 2713 if (i > 0) { 2714 if (!bw_params->clk_table.entries[i].dtbclk_mhz) { 2715 dcn3_2_soc.clock_limits[i].dtbclk_mhz = dcn3_2_soc.clock_limits[i-1].dtbclk_mhz; 2716 } else { 2717 dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; 2718 } 2719 } else if (bw_params->clk_table.entries[i].dtbclk_mhz) { 2720 dcn3_2_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; 2721 } 2722 2723 if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) 2724 dcn3_2_soc.clock_limits[i].socclk_mhz = dcn3_2_soc.clock_limits[i-1].socclk_mhz; 2725 else 2726 dcn3_2_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; 2727 2728 if (!dram_speed_mts[i] && i > 0) 2729 dcn3_2_soc.clock_limits[i].dram_speed_mts = dcn3_2_soc.clock_limits[i-1].dram_speed_mts; 2730 else 2731 dcn3_2_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; 2732 2733 /* These clocks cannot come from bw_params, always fill from dcn3_2_soc[0] */ 2734 /* PHYCLK_D18, PHYCLK_D32 */ 2735 dcn3_2_soc.clock_limits[i].phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz; 2736 dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz; 2737 } 2738 } else { 2739 build_synthetic_soc_states(bw_params, dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states); 2740 } 2741 2742 /* Re-init DML with updated bb */ 2743 dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); 2744 if (dc->current_state) 2745 dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32); 2746 } 2747 } 2748 2749 void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, 2750 int pipe_cnt) 2751 { 2752 dc_assert_fp_enabled(); 2753 2754 pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; 2755 pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; 2756 } 2757 2758 bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) 2759 { 2760 bool allow = false; 2761 uint32_t refresh_rate = 0; 2762 2763 /* Allow subvp on displays that have active margin for 2560x1440@60hz displays 2764 * only for now. There must be no scaling as well. 2765 * 2766 * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs 2767 * for p-state switching. 2768 */ 2769 if (pipe->stream && pipe->plane_state) { 2770 refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + 2771 pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) 2772 / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); 2773 if (pipe->stream->timing.v_addressable == 1440 && 2774 pipe->stream->timing.h_addressable == 2560 && 2775 refresh_rate >= 55 && refresh_rate <= 65 && 2776 pipe->plane_state->src_rect.height == 1440 && 2777 pipe->plane_state->src_rect.width == 2560 && 2778 pipe->plane_state->dst_rect.height == 1440 && 2779 pipe->plane_state->dst_rect.width == 2560) 2780 allow = true; 2781 } 2782 return allow; 2783 } 2784 2785 /** 2786 * ******************************************************************************************* 2787 * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy 2788 * 2789 * @param [in]: dc: Current DC state 2790 * @param [in]: context: New DC state to be programmed 2791 * 2792 * @return: Max vratio for prefetch 2793 * 2794 * ******************************************************************************************* 2795 */ 2796 double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) 2797 { 2798 double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4 2799 int i; 2800 2801 /* For single display MPO configs, allow the max vratio to be 8 2802 * if any plane is YUV420 format 2803 */ 2804 if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) { 2805 for (i = 0; i < context->stream_status[0].plane_count; i++) { 2806 if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr || 2807 context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) { 2808 max_vratio_pre = __DML_MAX_VRATIO_PRE__; 2809 } 2810 } 2811 } 2812 return max_vratio_pre; 2813 } 2814 2815 /** 2816 * dcn32_assign_fpo_vactive_candidate - Assign the FPO stream candidate for FPO + VActive case 2817 * 2818 * This function chooses the FPO candidate stream for FPO + VActive cases (2 stream config). 2819 * For FPO + VAtive cases, the assumption is that one display has ActiveMargin > 0, and the 2820 * other display has ActiveMargin <= 0. This function will choose the pipe/stream that has 2821 * ActiveMargin <= 0 to be the FPO stream candidate if found. 2822 * 2823 * 2824 * @param [in]: dc - current dc state 2825 * @param [in]: context - new dc state 2826 * @param [out]: fpo_candidate_stream - pointer to FPO stream candidate if one is found 2827 * 2828 * Return: void 2829 */ 2830 void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream) 2831 { 2832 unsigned int i, pipe_idx; 2833 const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 2834 2835 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 2836 const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2837 2838 if (!pipe->stream) 2839 continue; 2840 2841 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { 2842 *fpo_candidate_stream = pipe->stream; 2843 break; 2844 } 2845 pipe_idx++; 2846 } 2847 } 2848 2849 /** 2850 * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE 2851 * 2852 * @param [in]: dc - current dc state 2853 * @param [in]: context - new dc state 2854 * @param [in]: vactive_margin_req_us - The vactive marign required for a vactive pipe to be 2855 * considered "found" 2856 * 2857 * Return: True if VACTIVE display is found, false otherwise 2858 */ 2859 bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req_us) 2860 { 2861 unsigned int i, pipe_idx; 2862 const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; 2863 bool vactive_found = false; 2864 2865 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 2866 const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2867 2868 if (!pipe->stream) 2869 continue; 2870 2871 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] >= vactive_margin_req_us) { 2872 vactive_found = true; 2873 break; 2874 } 2875 pipe_idx++; 2876 } 2877 return vactive_found; 2878 } 2879 2880 void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) 2881 { 2882 dc_assert_fp_enabled(); 2883 dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0; 2884 } 2885