1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dccg.h" 27 #include "clk_mgr_internal.h" 28 29 // For dce12_get_dp_ref_freq_khz 30 #include "dce100/dce_clk_mgr.h" 31 32 // For dcn20_update_clocks_update_dpp_dto 33 #include "dcn20/dcn20_clk_mgr.h" 34 35 #include "vg_clk_mgr.h" 36 #include "dcn301_smu.h" 37 #include "reg_helper.h" 38 #include "core_types.h" 39 #include "dm_helpers.h" 40 41 #include "atomfirmware.h" 42 #include "vangogh_ip_offset.h" 43 #include "clk/clk_11_5_0_offset.h" 44 #include "clk/clk_11_5_0_sh_mask.h" 45 46 /* Constants */ 47 48 #define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */ 49 50 /* Macros */ 51 52 #define TO_CLK_MGR_VGH(clk_mgr)\ 53 container_of(clk_mgr, struct clk_mgr_vgh, base) 54 55 #define REG(reg_name) \ 56 (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) 57 58 /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ 59 static int vg_get_active_display_cnt_wa( 60 struct dc *dc, 61 struct dc_state *context) 62 { 63 int i, display_count; 64 bool tmds_present = false; 65 66 display_count = 0; 67 for (i = 0; i < context->stream_count; i++) { 68 const struct dc_stream_state *stream = context->streams[i]; 69 70 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || 71 stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || 72 stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) 73 tmds_present = true; 74 } 75 76 for (i = 0; i < dc->link_count; i++) { 77 const struct dc_link *link = dc->links[i]; 78 79 /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ 80 if (link->link_enc->funcs->is_dig_enabled && 81 link->link_enc->funcs->is_dig_enabled(link->link_enc)) 82 display_count++; 83 } 84 85 /* WA for hang on HDMI after display off back back on*/ 86 if (display_count == 0 && tmds_present) 87 display_count = 1; 88 89 return display_count; 90 } 91 92 static void vg_update_clocks(struct clk_mgr *clk_mgr_base, 93 struct dc_state *context, 94 bool safe_to_lower) 95 { 96 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 97 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 98 struct dc *dc = clk_mgr_base->ctx->dc; 99 int display_count; 100 bool update_dppclk = false; 101 bool update_dispclk = false; 102 bool dpp_clock_lowered = false; 103 104 if (dc->work_arounds.skip_clock_update) 105 return; 106 107 /* 108 * if it is safe to lower, but we are already in the lower state, we don't have to do anything 109 * also if safe to lower is false, we just go in the higher state 110 */ 111 if (safe_to_lower) { 112 /* check that we're not already in lower */ 113 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { 114 115 display_count = vg_get_active_display_cnt_wa(dc, context); 116 /* if we can go lower, go lower */ 117 if (display_count == 0 && !IS_DIAG_DC(dc->ctx->dce_environment)) { 118 union display_idle_optimization_u idle_info = { 0 }; 119 120 idle_info.idle_info.df_request_disabled = 1; 121 idle_info.idle_info.phy_ref_clk_off = 1; 122 123 dcn301_smu_set_display_idle_optimization(clk_mgr, idle_info.data); 124 /* update power state */ 125 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; 126 } 127 } 128 } else { 129 /* check that we're not already in D0 */ 130 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { 131 union display_idle_optimization_u idle_info = { 0 }; 132 133 dcn301_smu_set_display_idle_optimization(clk_mgr, idle_info.data); 134 /* update power state */ 135 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; 136 } 137 } 138 139 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && !dc->debug.disable_min_fclk) { 140 clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; 141 dcn301_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz); 142 } 143 144 if (should_set_clock(safe_to_lower, 145 new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && !dc->debug.disable_min_fclk) { 146 clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; 147 dcn301_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz); 148 } 149 150 // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow. 151 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 152 if (new_clocks->dppclk_khz < 100000) 153 new_clocks->dppclk_khz = 100000; 154 } 155 156 if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { 157 if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) 158 dpp_clock_lowered = true; 159 clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz; 160 update_dppclk = true; 161 } 162 163 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { 164 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; 165 dcn301_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); 166 167 update_dispclk = true; 168 } 169 170 if (dpp_clock_lowered) { 171 // increase per DPP DTO before lowering global dppclk 172 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 173 dcn301_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 174 } else { 175 // increase global DPPCLK before lowering per DPP DTO 176 if (update_dppclk || update_dispclk) 177 dcn301_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 178 // always update dtos unless clock is lowered and not safe to lower 179 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 180 } 181 } 182 183 184 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr) 185 { 186 /* get FbMult value */ 187 struct fixed31_32 pll_req; 188 unsigned int fbmult_frac_val = 0; 189 unsigned int fbmult_int_val = 0; 190 191 192 /* 193 * Register value of fbmult is in 8.16 format, we are converting to 31.32 194 * to leverage the fix point operations available in driver 195 */ 196 197 REG_GET(CLK1_0_CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/ 198 REG_GET(CLK1_0_CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */ 199 200 pll_req = dc_fixpt_from_int(fbmult_int_val); 201 202 /* 203 * since fractional part is only 16 bit in register definition but is 32 bit 204 * in our fix point definiton, need to shift left by 16 to obtain correct value 205 */ 206 pll_req.value |= fbmult_frac_val << 16; 207 208 /* multiply by REFCLK period */ 209 pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz); 210 211 /* integer part is now VCO frequency in kHz */ 212 return dc_fixpt_floor(pll_req); 213 } 214 215 static void vg_dump_clk_registers_internal(struct dcn301_clk_internal *internal, struct clk_mgr *clk_mgr_base) 216 { 217 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 218 219 internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK3_CURRENT_CNT); 220 internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK3_BYPASS_CNTL); 221 222 internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_0_CLK1_CLK3_DS_CNTL); //dcf deep sleep divider 223 internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_0_CLK1_CLK3_ALLOW_DS); 224 225 internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK1_CURRENT_CNT); 226 internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK1_BYPASS_CNTL); 227 228 internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK2_CURRENT_CNT); 229 internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK2_BYPASS_CNTL); 230 231 internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_0_CLK1_CLK0_CURRENT_CNT); 232 internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_0_CLK1_CLK0_BYPASS_CNTL); 233 } 234 235 /* This function collect raw clk register values */ 236 static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 237 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 238 { 239 struct dcn301_clk_internal internal = {0}; 240 char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"}; 241 unsigned int chars_printed = 0; 242 unsigned int remaining_buffer = log_info->bufSize; 243 244 vg_dump_clk_registers_internal(&internal, clk_mgr_base); 245 246 regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; 247 regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10; 248 regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS; 249 regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; 250 regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; 251 regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; 252 253 regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; 254 if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) 255 regs_and_bypass->dppclk_bypass = 0; 256 regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; 257 if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) 258 regs_and_bypass->dcfclk_bypass = 0; 259 regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; 260 if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) 261 regs_and_bypass->dispclk_bypass = 0; 262 regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; 263 if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) 264 regs_and_bypass->dprefclk_bypass = 0; 265 266 if (log_info->enabled) { 267 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n"); 268 remaining_buffer -= chars_printed; 269 *log_info->sum_chars_printed += chars_printed; 270 log_info->pBuf += chars_printed; 271 272 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dcfclk,%d,%d,%d,%s\n", 273 regs_and_bypass->dcfclk, 274 regs_and_bypass->dcf_deep_sleep_divider, 275 regs_and_bypass->dcf_deep_sleep_allow, 276 bypass_clks[(int) regs_and_bypass->dcfclk_bypass]); 277 remaining_buffer -= chars_printed; 278 *log_info->sum_chars_printed += chars_printed; 279 log_info->pBuf += chars_printed; 280 281 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dprefclk,%d,N/A,N/A,%s\n", 282 regs_and_bypass->dprefclk, 283 bypass_clks[(int) regs_and_bypass->dprefclk_bypass]); 284 remaining_buffer -= chars_printed; 285 *log_info->sum_chars_printed += chars_printed; 286 log_info->pBuf += chars_printed; 287 288 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "dispclk,%d,N/A,N/A,%s\n", 289 regs_and_bypass->dispclk, 290 bypass_clks[(int) regs_and_bypass->dispclk_bypass]); 291 remaining_buffer -= chars_printed; 292 *log_info->sum_chars_printed += chars_printed; 293 log_info->pBuf += chars_printed; 294 295 //split 296 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "SPLIT\n"); 297 remaining_buffer -= chars_printed; 298 *log_info->sum_chars_printed += chars_printed; 299 log_info->pBuf += chars_printed; 300 301 // REGISTER VALUES 302 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "reg_name,value,clk_type\n"); 303 remaining_buffer -= chars_printed; 304 *log_info->sum_chars_printed += chars_printed; 305 log_info->pBuf += chars_printed; 306 307 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n", 308 internal.CLK1_CLK3_CURRENT_CNT); 309 remaining_buffer -= chars_printed; 310 *log_info->sum_chars_printed += chars_printed; 311 log_info->pBuf += chars_printed; 312 313 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider\n", 314 internal.CLK1_CLK3_DS_CNTL); 315 remaining_buffer -= chars_printed; 316 *log_info->sum_chars_printed += chars_printed; 317 log_info->pBuf += chars_printed; 318 319 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow\n", 320 internal.CLK1_CLK3_ALLOW_DS); 321 remaining_buffer -= chars_printed; 322 *log_info->sum_chars_printed += chars_printed; 323 log_info->pBuf += chars_printed; 324 325 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_CURRENT_CNT,%d,dprefclk\n", 326 internal.CLK1_CLK2_CURRENT_CNT); 327 remaining_buffer -= chars_printed; 328 *log_info->sum_chars_printed += chars_printed; 329 log_info->pBuf += chars_printed; 330 331 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_CURRENT_CNT,%d,dispclk\n", 332 internal.CLK1_CLK0_CURRENT_CNT); 333 remaining_buffer -= chars_printed; 334 *log_info->sum_chars_printed += chars_printed; 335 log_info->pBuf += chars_printed; 336 337 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_CURRENT_CNT,%d,dppclk\n", 338 internal.CLK1_CLK1_CURRENT_CNT); 339 remaining_buffer -= chars_printed; 340 *log_info->sum_chars_printed += chars_printed; 341 log_info->pBuf += chars_printed; 342 343 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass\n", 344 internal.CLK1_CLK3_BYPASS_CNTL); 345 remaining_buffer -= chars_printed; 346 *log_info->sum_chars_printed += chars_printed; 347 log_info->pBuf += chars_printed; 348 349 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass\n", 350 internal.CLK1_CLK2_BYPASS_CNTL); 351 remaining_buffer -= chars_printed; 352 *log_info->sum_chars_printed += chars_printed; 353 log_info->pBuf += chars_printed; 354 355 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass\n", 356 internal.CLK1_CLK0_BYPASS_CNTL); 357 remaining_buffer -= chars_printed; 358 *log_info->sum_chars_printed += chars_printed; 359 log_info->pBuf += chars_printed; 360 361 chars_printed = snprintf_count(log_info->pBuf, remaining_buffer, "CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass\n", 362 internal.CLK1_CLK1_BYPASS_CNTL); 363 remaining_buffer -= chars_printed; 364 *log_info->sum_chars_printed += chars_printed; 365 log_info->pBuf += chars_printed; 366 } 367 } 368 369 static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base) 370 { 371 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 372 373 dcn301_smu_enable_pme_wa(clk_mgr); 374 } 375 376 static void vg_init_clocks(struct clk_mgr *clk_mgr) 377 { 378 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); 379 // Assumption is that boot state always supports pstate 380 clk_mgr->clks.p_state_change_support = true; 381 clk_mgr->clks.prev_p_state_change_support = true; 382 clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; 383 } 384 385 static void vg_build_watermark_ranges(struct clk_bw_params *bw_params, struct watermarks *table) 386 { 387 int i, num_valid_sets; 388 389 num_valid_sets = 0; 390 391 for (i = 0; i < WM_SET_COUNT; i++) { 392 /* skip empty entries, the smu array has no holes*/ 393 if (!bw_params->wm_table.entries[i].valid) 394 continue; 395 396 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst; 397 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type; 398 /* We will not select WM based on fclk, so leave it as unconstrained */ 399 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; 400 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; 401 402 if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) { 403 if (i == 0) 404 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0; 405 else { 406 /* add 1 to make it non-overlapping with next lvl */ 407 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 408 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1; 409 } 410 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk = 411 bw_params->clk_table.entries[i].dcfclk_mhz; 412 413 } else { 414 /* unconstrained for memory retraining */ 415 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0; 416 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF; 417 418 /* Modify previous watermark range to cover up to max */ 419 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; 420 } 421 num_valid_sets++; 422 } 423 424 ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ 425 426 /* modify the min and max to make sure we cover the whole range*/ 427 table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0; 428 table->WatermarkRow[WM_DCFCLK][0].MinClock = 0; 429 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF; 430 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF; 431 432 /* This is for writeback only, does not matter currently as no writeback support*/ 433 table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A; 434 table->WatermarkRow[WM_SOCCLK][0].MinClock = 0; 435 table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF; 436 table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0; 437 table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF; 438 } 439 440 441 static void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base) 442 { 443 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 444 struct clk_mgr_vgh *clk_mgr_vgh = TO_CLK_MGR_VGH(clk_mgr); 445 struct watermarks *table = clk_mgr_vgh->smu_wm_set.wm_set; 446 447 if (!clk_mgr->smu_ver) 448 return; 449 450 if (!table || clk_mgr_vgh->smu_wm_set.mc_address.quad_part == 0) 451 return; 452 453 memset(table, 0, sizeof(*table)); 454 455 vg_build_watermark_ranges(clk_mgr_base->bw_params, table); 456 457 dcn301_smu_set_dram_addr_high(clk_mgr, 458 clk_mgr_vgh->smu_wm_set.mc_address.high_part); 459 dcn301_smu_set_dram_addr_low(clk_mgr, 460 clk_mgr_vgh->smu_wm_set.mc_address.low_part); 461 dcn301_smu_transfer_wm_table_dram_2_smu(clk_mgr); 462 } 463 464 static bool vg_are_clock_states_equal(struct dc_clocks *a, 465 struct dc_clocks *b) 466 { 467 if (a->dispclk_khz != b->dispclk_khz) 468 return false; 469 else if (a->dppclk_khz != b->dppclk_khz) 470 return false; 471 else if (a->dcfclk_khz != b->dcfclk_khz) 472 return false; 473 else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) 474 return false; 475 476 return true; 477 } 478 479 480 static struct clk_mgr_funcs vg_funcs = { 481 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 482 .update_clocks = vg_update_clocks, 483 .init_clocks = vg_init_clocks, 484 .enable_pme_wa = vg_enable_pme_wa, 485 .are_clock_states_equal = vg_are_clock_states_equal, 486 .notify_wm_ranges = vg_notify_wm_ranges 487 }; 488 489 static struct clk_bw_params vg_bw_params = { 490 .vram_type = Ddr4MemType, 491 .num_channels = 1, 492 .clk_table = { 493 .entries = { 494 { 495 .voltage = 0, 496 .dcfclk_mhz = 400, 497 .fclk_mhz = 400, 498 .memclk_mhz = 800, 499 .socclk_mhz = 0, 500 }, 501 { 502 .voltage = 0, 503 .dcfclk_mhz = 483, 504 .fclk_mhz = 800, 505 .memclk_mhz = 1600, 506 .socclk_mhz = 0, 507 }, 508 { 509 .voltage = 0, 510 .dcfclk_mhz = 602, 511 .fclk_mhz = 1067, 512 .memclk_mhz = 1067, 513 .socclk_mhz = 0, 514 }, 515 { 516 .voltage = 0, 517 .dcfclk_mhz = 738, 518 .fclk_mhz = 1333, 519 .memclk_mhz = 1600, 520 .socclk_mhz = 0, 521 }, 522 }, 523 524 .num_entries = 4, 525 }, 526 527 }; 528 529 static struct wm_table ddr4_wm_table = { 530 .entries = { 531 { 532 .wm_inst = WM_A, 533 .wm_type = WM_TYPE_PSTATE_CHG, 534 .pstate_latency_us = 11.72, 535 .sr_exit_time_us = 6.09, 536 .sr_enter_plus_exit_time_us = 7.14, 537 .valid = true, 538 }, 539 { 540 .wm_inst = WM_B, 541 .wm_type = WM_TYPE_PSTATE_CHG, 542 .pstate_latency_us = 11.72, 543 .sr_exit_time_us = 10.12, 544 .sr_enter_plus_exit_time_us = 11.48, 545 .valid = true, 546 }, 547 { 548 .wm_inst = WM_C, 549 .wm_type = WM_TYPE_PSTATE_CHG, 550 .pstate_latency_us = 11.72, 551 .sr_exit_time_us = 10.12, 552 .sr_enter_plus_exit_time_us = 11.48, 553 .valid = true, 554 }, 555 { 556 .wm_inst = WM_D, 557 .wm_type = WM_TYPE_PSTATE_CHG, 558 .pstate_latency_us = 11.72, 559 .sr_exit_time_us = 10.12, 560 .sr_enter_plus_exit_time_us = 11.48, 561 .valid = true, 562 }, 563 } 564 }; 565 566 static struct wm_table lpddr5_wm_table = { 567 .entries = { 568 { 569 .wm_inst = WM_A, 570 .wm_type = WM_TYPE_PSTATE_CHG, 571 .pstate_latency_us = 11.65333, 572 .sr_exit_time_us = 13.5, 573 .sr_enter_plus_exit_time_us = 16.5, 574 .valid = true, 575 }, 576 { 577 .wm_inst = WM_B, 578 .wm_type = WM_TYPE_PSTATE_CHG, 579 .pstate_latency_us = 11.65333, 580 .sr_exit_time_us = 13.5, 581 .sr_enter_plus_exit_time_us = 16.5, 582 .valid = true, 583 }, 584 { 585 .wm_inst = WM_C, 586 .wm_type = WM_TYPE_PSTATE_CHG, 587 .pstate_latency_us = 11.65333, 588 .sr_exit_time_us = 13.5, 589 .sr_enter_plus_exit_time_us = 16.5, 590 .valid = true, 591 }, 592 { 593 .wm_inst = WM_D, 594 .wm_type = WM_TYPE_PSTATE_CHG, 595 .pstate_latency_us = 11.65333, 596 .sr_exit_time_us = 13.5, 597 .sr_enter_plus_exit_time_us = 16.5, 598 .valid = true, 599 }, 600 } 601 }; 602 603 604 static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table, 605 unsigned int voltage) 606 { 607 int i; 608 609 for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) { 610 if (clock_table->SocVoltage[i] == voltage) 611 return clock_table->DcfClocks[i]; 612 } 613 614 ASSERT(0); 615 return 0; 616 } 617 618 static void vg_clk_mgr_helper_populate_bw_params( 619 struct clk_mgr_internal *clk_mgr, 620 struct integrated_info *bios_info, 621 const struct vg_dpm_clocks *clock_table) 622 { 623 int i, j; 624 struct clk_bw_params *bw_params = clk_mgr->base.bw_params; 625 626 j = -1; 627 628 ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); 629 630 /* Find lowest DPM, FCLK is filled in reverse order*/ 631 632 for (i = VG_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { 633 if (clock_table->DfPstateTable[i].fclk != 0) { 634 j = i; 635 break; 636 } 637 } 638 639 if (j == -1) { 640 /* clock table is all 0s, just use our own hardcode */ 641 ASSERT(0); 642 return; 643 } 644 645 bw_params->clk_table.num_entries = j + 1; 646 647 for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { 648 bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk; 649 bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; 650 bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; 651 bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage); 652 } 653 654 bw_params->vram_type = bios_info->memory_type; 655 bw_params->num_channels = bios_info->ma_channel_number; 656 657 for (i = 0; i < WM_SET_COUNT; i++) { 658 bw_params->wm_table.entries[i].wm_inst = i; 659 660 if (i >= bw_params->clk_table.num_entries) { 661 bw_params->wm_table.entries[i].valid = false; 662 continue; 663 } 664 665 bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; 666 bw_params->wm_table.entries[i].valid = true; 667 } 668 669 if (bw_params->vram_type == LpDdr4MemType) { 670 /* 671 * WM set D will be re-purposed for memory retraining 672 */ 673 bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY; 674 bw_params->wm_table.entries[WM_D].wm_inst = WM_D; 675 bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING; 676 bw_params->wm_table.entries[WM_D].valid = true; 677 } 678 679 } 680 681 /* Temporary Place holder until we can get them from fuse */ 682 static struct vg_dpm_clocks dummy_clocks = { 683 .DcfClocks = { 201, 403, 403, 403, 403, 403, 403 }, 684 .SocClocks = { 400, 600, 600, 600, 600, 600, 600 }, 685 .SocVoltage = { 2800, 2860, 2860, 2860, 2860, 2860, 2860, 2860 }, 686 .DfPstateTable = { 687 { .fclk = 400, .memclk = 400, .voltage = 2800 }, 688 { .fclk = 400, .memclk = 400, .voltage = 2800 }, 689 { .fclk = 400, .memclk = 400, .voltage = 2800 }, 690 { .fclk = 400, .memclk = 400, .voltage = 2800 } 691 } 692 }; 693 694 static struct watermarks dummy_wms = { 0 }; 695 696 static void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, 697 struct smu_dpm_clks *smu_dpm_clks) 698 { 699 struct vg_dpm_clocks *table = smu_dpm_clks->dpm_clks; 700 701 if (!clk_mgr->smu_ver) 702 return; 703 704 if (!table || smu_dpm_clks->mc_address.quad_part == 0) 705 return; 706 707 memset(table, 0, sizeof(*table)); 708 709 dcn301_smu_set_dram_addr_high(clk_mgr, 710 smu_dpm_clks->mc_address.high_part); 711 dcn301_smu_set_dram_addr_low(clk_mgr, 712 smu_dpm_clks->mc_address.low_part); 713 dcn301_smu_transfer_dpm_table_smu_2_dram(clk_mgr); 714 } 715 716 void vg_clk_mgr_construct( 717 struct dc_context *ctx, 718 struct clk_mgr_vgh *clk_mgr, 719 struct pp_smu_funcs *pp_smu, 720 struct dccg *dccg) 721 { 722 struct smu_dpm_clks smu_dpm_clks = { 0 }; 723 724 clk_mgr->base.base.ctx = ctx; 725 clk_mgr->base.base.funcs = &vg_funcs; 726 727 clk_mgr->base.pp_smu = pp_smu; 728 729 clk_mgr->base.dccg = dccg; 730 clk_mgr->base.dfs_bypass_disp_clk = 0; 731 732 clk_mgr->base.dprefclk_ss_percentage = 0; 733 clk_mgr->base.dprefclk_ss_divider = 1000; 734 clk_mgr->base.ss_on_dprefclk = false; 735 clk_mgr->base.dfs_ref_freq_khz = 48000; 736 737 clk_mgr->smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem( 738 clk_mgr->base.base.ctx, 739 DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 740 sizeof(struct watermarks), 741 &clk_mgr->smu_wm_set.mc_address.quad_part); 742 743 if (!clk_mgr->smu_wm_set.wm_set) { 744 clk_mgr->smu_wm_set.wm_set = &dummy_wms; 745 clk_mgr->smu_wm_set.mc_address.quad_part = 0; 746 } 747 ASSERT(clk_mgr->smu_wm_set.wm_set); 748 749 smu_dpm_clks.dpm_clks = (struct vg_dpm_clocks *)dm_helpers_allocate_gpu_mem( 750 clk_mgr->base.base.ctx, 751 DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 752 sizeof(struct vg_dpm_clocks), 753 &smu_dpm_clks.mc_address.quad_part); 754 755 if (smu_dpm_clks.dpm_clks == NULL) { 756 smu_dpm_clks.dpm_clks = &dummy_clocks; 757 smu_dpm_clks.mc_address.quad_part = 0; 758 } 759 760 ASSERT(smu_dpm_clks.dpm_clks); 761 762 if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { 763 vg_funcs.update_clocks = dcn2_update_clocks_fpga; 764 clk_mgr->base.base.dentist_vco_freq_khz = 3600000; 765 } else { 766 struct clk_log_info log_info = {0}; 767 768 clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base); 769 770 if (clk_mgr->base.smu_ver) 771 clk_mgr->base.smu_present = true; 772 773 /* TODO: Check we get what we expect during bringup */ 774 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); 775 776 /* in case we don't get a value from the register, use default */ 777 if (clk_mgr->base.base.dentist_vco_freq_khz == 0) 778 clk_mgr->base.base.dentist_vco_freq_khz = 3600000; 779 780 if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 781 vg_bw_params.wm_table = lpddr5_wm_table; 782 } else { 783 vg_bw_params.wm_table = ddr4_wm_table; 784 } 785 /* Saved clocks configured at boot for debug purposes */ 786 vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); 787 } 788 789 clk_mgr->base.base.dprefclk_khz = 600000; 790 dce_clock_read_ss_info(&clk_mgr->base); 791 792 clk_mgr->base.base.bw_params = &vg_bw_params; 793 794 vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks); 795 if (ctx->dc_bios && ctx->dc_bios->integrated_info) { 796 vg_clk_mgr_helper_populate_bw_params( 797 &clk_mgr->base, 798 ctx->dc_bios->integrated_info, 799 smu_dpm_clks.dpm_clks); 800 } 801 802 if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0) 803 dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 804 smu_dpm_clks.dpm_clks); 805 /* 806 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) { 807 enable powerfeatures when displaycount goes to 0 808 dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); 809 } 810 */ 811 } 812 813 void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int) 814 { 815 struct clk_mgr_vgh *clk_mgr = TO_CLK_MGR_VGH(clk_mgr_int); 816 817 if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0) 818 dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER, 819 clk_mgr->smu_wm_set.wm_set); 820 } 821