1 /* 2 * Copyright 2012-16 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dce_clk_mgr.h" 27 28 #include "reg_helper.h" 29 #include "dmcu.h" 30 #include "core_types.h" 31 #include "dal_asic_id.h" 32 33 #define TO_DCE_CLK_MGR(clocks)\ 34 container_of(clocks, struct dce_clk_mgr, base) 35 36 #define REG(reg) \ 37 (clk_mgr_dce->regs->reg) 38 39 #undef FN 40 #define FN(reg_name, field_name) \ 41 clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name 42 43 #define CTX \ 44 clk_mgr_dce->base.ctx 45 #define DC_LOGGER \ 46 clk_mgr->ctx->logger 47 48 /* Max clock values for each state indexed by "enum clocks_state": */ 49 static const struct state_dependent_clocks dce80_max_clks_by_state[] = { 50 /* ClocksStateInvalid - should not be used */ 51 { .display_clk_khz = 0, .pixel_clk_khz = 0 }, 52 /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ 53 { .display_clk_khz = 0, .pixel_clk_khz = 0 }, 54 /* ClocksStateLow */ 55 { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, 56 /* ClocksStateNominal */ 57 { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, 58 /* ClocksStatePerformance */ 59 { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; 60 61 static const struct state_dependent_clocks dce110_max_clks_by_state[] = { 62 /*ClocksStateInvalid - should not be used*/ 63 { .display_clk_khz = 0, .pixel_clk_khz = 0 }, 64 /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ 65 { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, 66 /*ClocksStateLow*/ 67 { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, 68 /*ClocksStateNominal*/ 69 { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, 70 /*ClocksStatePerformance*/ 71 { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; 72 73 static const struct state_dependent_clocks dce112_max_clks_by_state[] = { 74 /*ClocksStateInvalid - should not be used*/ 75 { .display_clk_khz = 0, .pixel_clk_khz = 0 }, 76 /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ 77 { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, 78 /*ClocksStateLow*/ 79 { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, 80 /*ClocksStateNominal*/ 81 { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, 82 /*ClocksStatePerformance*/ 83 { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; 84 85 static const struct state_dependent_clocks dce120_max_clks_by_state[] = { 86 /*ClocksStateInvalid - should not be used*/ 87 { .display_clk_khz = 0, .pixel_clk_khz = 0 }, 88 /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ 89 { .display_clk_khz = 0, .pixel_clk_khz = 0 }, 90 /*ClocksStateLow*/ 91 { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, 92 /*ClocksStateNominal*/ 93 { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, 94 /*ClocksStatePerformance*/ 95 { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; 96 97 int dentist_get_divider_from_did(int did) 98 { 99 if (did < DENTIST_BASE_DID_1) 100 did = DENTIST_BASE_DID_1; 101 if (did > DENTIST_MAX_DID) 102 did = DENTIST_MAX_DID; 103 104 if (did < DENTIST_BASE_DID_2) { 105 return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP 106 * (did - DENTIST_BASE_DID_1); 107 } else if (did < DENTIST_BASE_DID_3) { 108 return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP 109 * (did - DENTIST_BASE_DID_2); 110 } else if (did < DENTIST_BASE_DID_4) { 111 return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP 112 * (did - DENTIST_BASE_DID_3); 113 } else { 114 return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP 115 * (did - DENTIST_BASE_DID_4); 116 } 117 } 118 119 /* SW will adjust DP REF Clock average value for all purposes 120 * (DP DTO / DP Audio DTO and DP GTC) 121 if clock is spread for all cases: 122 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW 123 calculations for DS_INCR/DS_MODULO (this is planned to be default case) 124 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW 125 calculations (not planned to be used, but average clock should still 126 be valid) 127 -if SS enabled on DP Ref clock and HW de-spreading disabled 128 (should not be case with CIK) then SW should program all rates 129 generated according to average value (case as with previous ASICs) 130 */ 131 static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) 132 { 133 if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { 134 struct fixed31_32 ss_percentage = dc_fixpt_div_int( 135 dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, 136 clk_mgr_dce->dprefclk_ss_divider), 200); 137 struct fixed31_32 adj_dp_ref_clk_khz; 138 139 ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); 140 adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); 141 dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); 142 } 143 return dp_ref_clk_khz; 144 } 145 146 static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) 147 { 148 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 149 int dprefclk_wdivider; 150 int dprefclk_src_sel; 151 int dp_ref_clk_khz = 600000; 152 int target_div; 153 154 /* ASSERT DP Reference Clock source is from DFS*/ 155 REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); 156 ASSERT(dprefclk_src_sel == 0); 157 158 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently 159 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ 160 REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); 161 162 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ 163 target_div = dentist_get_divider_from_did(dprefclk_wdivider); 164 165 /* Calculate the current DFS clock, in kHz.*/ 166 dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR 167 * clk_mgr_dce->dentist_vco_freq_khz) / target_div; 168 169 return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); 170 } 171 172 int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) 173 { 174 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 175 176 return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); 177 } 178 179 /* unit: in_khz before mode set, get pixel clock from context. ASIC register 180 * may not be programmed yet 181 */ 182 static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) 183 { 184 uint32_t max_pix_clk = 0; 185 int i; 186 187 for (i = 0; i < MAX_PIPES; i++) { 188 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 189 190 if (pipe_ctx->stream == NULL) 191 continue; 192 193 /* do not check under lay */ 194 if (pipe_ctx->top_pipe) 195 continue; 196 197 if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk) 198 max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; 199 200 /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS 201 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail 202 */ 203 if (dc_is_dp_signal(pipe_ctx->stream->signal) && 204 pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) 205 max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; 206 } 207 208 return max_pix_clk; 209 } 210 211 static enum dm_pp_clocks_state dce_get_required_clocks_state( 212 struct clk_mgr *clk_mgr, 213 struct dc_state *context) 214 { 215 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 216 int i; 217 enum dm_pp_clocks_state low_req_clk; 218 int max_pix_clk = get_max_pixel_clock_for_all_paths(context); 219 220 /* Iterate from highest supported to lowest valid state, and update 221 * lowest RequiredState with the lowest state that satisfies 222 * all required clocks 223 */ 224 for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) 225 if (context->bw_ctx.bw.dce.dispclk_khz > 226 clk_mgr_dce->max_clks_by_state[i].display_clk_khz 227 || max_pix_clk > 228 clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) 229 break; 230 231 low_req_clk = i + 1; 232 if (low_req_clk > clk_mgr_dce->max_clks_state) { 233 /* set max clock state for high phyclock, invalid on exceeding display clock */ 234 if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz 235 < context->bw_ctx.bw.dce.dispclk_khz) 236 low_req_clk = DM_PP_CLOCKS_STATE_INVALID; 237 else 238 low_req_clk = clk_mgr_dce->max_clks_state; 239 } 240 241 return low_req_clk; 242 } 243 244 static int dce_set_clock( 245 struct clk_mgr *clk_mgr, 246 int requested_clk_khz) 247 { 248 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 249 struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; 250 struct dc_bios *bp = clk_mgr->ctx->dc_bios; 251 int actual_clock = requested_clk_khz; 252 struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; 253 254 /* Make sure requested clock isn't lower than minimum threshold*/ 255 if (requested_clk_khz > 0) 256 requested_clk_khz = max(requested_clk_khz, 257 clk_mgr_dce->dentist_vco_freq_khz / 64); 258 259 /* Prepare to program display clock*/ 260 pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; 261 pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; 262 263 if (clk_mgr_dce->dfs_bypass_active) 264 pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; 265 266 bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); 267 268 if (clk_mgr_dce->dfs_bypass_active) { 269 /* Cache the fixed display clock*/ 270 clk_mgr_dce->dfs_bypass_disp_clk = 271 pxl_clk_params.dfs_bypass_display_clock; 272 actual_clock = pxl_clk_params.dfs_bypass_display_clock; 273 } 274 275 /* from power down, we need mark the clock state as ClocksStateNominal 276 * from HWReset, so when resume we will call pplib voltage regulator.*/ 277 if (requested_clk_khz == 0) 278 clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; 279 280 if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 281 dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); 282 283 return actual_clock; 284 } 285 286 int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) 287 { 288 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 289 struct bp_set_dce_clock_parameters dce_clk_params; 290 struct dc_bios *bp = clk_mgr->ctx->dc_bios; 291 struct dc *core_dc = clk_mgr->ctx->dc; 292 struct dmcu *dmcu = core_dc->res_pool->dmcu; 293 int actual_clock = requested_clk_khz; 294 /* Prepare to program display clock*/ 295 memset(&dce_clk_params, 0, sizeof(dce_clk_params)); 296 297 /* Make sure requested clock isn't lower than minimum threshold*/ 298 if (requested_clk_khz > 0) 299 requested_clk_khz = max(requested_clk_khz, 300 clk_mgr_dce->dentist_vco_freq_khz / 62); 301 302 dce_clk_params.target_clock_frequency = requested_clk_khz; 303 dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; 304 dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; 305 306 bp->funcs->set_dce_clock(bp, &dce_clk_params); 307 actual_clock = dce_clk_params.target_clock_frequency; 308 309 /* from power down, we need mark the clock state as ClocksStateNominal 310 * from HWReset, so when resume we will call pplib voltage regulator.*/ 311 if (requested_clk_khz == 0) 312 clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; 313 314 /*Program DP ref Clock*/ 315 /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ 316 dce_clk_params.target_clock_frequency = 0; 317 dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; 318 if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev)) 319 dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = 320 (dce_clk_params.pll_id == 321 CLOCK_SOURCE_COMBO_DISPLAY_PLL0); 322 else 323 dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; 324 325 bp->funcs->set_dce_clock(bp, &dce_clk_params); 326 327 if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { 328 if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { 329 if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) 330 dmcu->funcs->set_psr_wait_loop(dmcu, 331 actual_clock / 1000 / 7); 332 } 333 } 334 335 clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; 336 return actual_clock; 337 } 338 339 static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) 340 { 341 struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; 342 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; 343 struct integrated_info info = { { { 0 } } }; 344 struct dc_firmware_info fw_info = { { 0 } }; 345 int i; 346 347 if (bp->integrated_info) 348 info = *bp->integrated_info; 349 350 clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; 351 if (clk_mgr_dce->dentist_vco_freq_khz == 0) { 352 bp->funcs->get_firmware_info(bp, &fw_info); 353 clk_mgr_dce->dentist_vco_freq_khz = 354 fw_info.smu_gpu_pll_output_freq; 355 if (clk_mgr_dce->dentist_vco_freq_khz == 0) 356 clk_mgr_dce->dentist_vco_freq_khz = 3600000; 357 } 358 359 /*update the maximum display clock for each power state*/ 360 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { 361 enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; 362 363 switch (i) { 364 case 0: 365 clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; 366 break; 367 368 case 1: 369 clk_state = DM_PP_CLOCKS_STATE_LOW; 370 break; 371 372 case 2: 373 clk_state = DM_PP_CLOCKS_STATE_NOMINAL; 374 break; 375 376 case 3: 377 clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; 378 break; 379 380 default: 381 clk_state = DM_PP_CLOCKS_STATE_INVALID; 382 break; 383 } 384 385 /*Do not allow bad VBIOS/SBIOS to override with invalid values, 386 * check for > 100MHz*/ 387 if (info.disp_clk_voltage[i].max_supported_clk >= 100000) 388 clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = 389 info.disp_clk_voltage[i].max_supported_clk; 390 } 391 392 if (!debug->disable_dfs_bypass && bp->integrated_info) 393 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) 394 clk_mgr_dce->dfs_bypass_enabled = true; 395 } 396 397 void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) 398 { 399 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; 400 int ss_info_num = bp->funcs->get_ss_entry_number( 401 bp, AS_SIGNAL_TYPE_GPU_PLL); 402 403 if (ss_info_num) { 404 struct spread_spectrum_info info = { { 0 } }; 405 enum bp_result result = bp->funcs->get_spread_spectrum_info( 406 bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); 407 408 /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS 409 * even if SS not enabled and in that case 410 * SSInfo.spreadSpectrumPercentage !=0 would be sign 411 * that SS is enabled 412 */ 413 if (result == BP_RESULT_OK && 414 info.spread_spectrum_percentage != 0) { 415 clk_mgr_dce->ss_on_dprefclk = true; 416 clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; 417 418 if (info.type.CENTER_MODE == 0) { 419 /* TODO: Currently for DP Reference clock we 420 * need only SS percentage for 421 * downspread */ 422 clk_mgr_dce->dprefclk_ss_percentage = 423 info.spread_spectrum_percentage; 424 } 425 426 return; 427 } 428 429 result = bp->funcs->get_spread_spectrum_info( 430 bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); 431 432 /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS 433 * even if SS not enabled and in that case 434 * SSInfo.spreadSpectrumPercentage !=0 would be sign 435 * that SS is enabled 436 */ 437 if (result == BP_RESULT_OK && 438 info.spread_spectrum_percentage != 0) { 439 clk_mgr_dce->ss_on_dprefclk = true; 440 clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; 441 442 if (info.type.CENTER_MODE == 0) { 443 /* Currently for DP Reference clock we 444 * need only SS percentage for 445 * downspread */ 446 clk_mgr_dce->dprefclk_ss_percentage = 447 info.spread_spectrum_percentage; 448 } 449 } 450 } 451 } 452 453 /** 454 * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info 455 * @clk_mgr: clock manager base structure 456 * 457 * Reads from VBIOS the XGMI spread spectrum info and saves it within 458 * the dce clock manager. This operation will overwrite the existing dprefclk 459 * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also 460 * sets the ->xgmi_enabled flag. 461 */ 462 void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr) 463 { 464 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 465 enum bp_result result; 466 struct spread_spectrum_info info = { { 0 } }; 467 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; 468 469 clk_mgr_dce->xgmi_enabled = false; 470 471 result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI, 472 0, &info); 473 if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { 474 clk_mgr_dce->xgmi_enabled = true; 475 clk_mgr_dce->ss_on_dprefclk = true; 476 clk_mgr_dce->dprefclk_ss_divider = 477 info.spread_percentage_divider; 478 479 if (info.type.CENTER_MODE == 0) { 480 /* Currently for DP Reference clock we 481 * need only SS percentage for 482 * downspread */ 483 clk_mgr_dce->dprefclk_ss_percentage = 484 info.spread_spectrum_percentage; 485 } 486 } 487 } 488 489 void dce110_fill_display_configs( 490 const struct dc_state *context, 491 struct dm_pp_display_configuration *pp_display_cfg) 492 { 493 int j; 494 int num_cfgs = 0; 495 496 for (j = 0; j < context->stream_count; j++) { 497 int k; 498 499 const struct dc_stream_state *stream = context->streams[j]; 500 struct dm_pp_single_disp_config *cfg = 501 &pp_display_cfg->disp_configs[num_cfgs]; 502 const struct pipe_ctx *pipe_ctx = NULL; 503 504 for (k = 0; k < MAX_PIPES; k++) 505 if (stream == context->res_ctx.pipe_ctx[k].stream) { 506 pipe_ctx = &context->res_ctx.pipe_ctx[k]; 507 break; 508 } 509 510 ASSERT(pipe_ctx != NULL); 511 512 /* only notify active stream */ 513 if (stream->dpms_off) 514 continue; 515 516 num_cfgs++; 517 cfg->signal = pipe_ctx->stream->signal; 518 cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; 519 cfg->src_height = stream->src.height; 520 cfg->src_width = stream->src.width; 521 cfg->ddi_channel_mapping = 522 stream->link->ddi_channel_mapping.raw; 523 cfg->transmitter = 524 stream->link->link_enc->transmitter; 525 cfg->link_settings.lane_count = 526 stream->link->cur_link_settings.lane_count; 527 cfg->link_settings.link_rate = 528 stream->link->cur_link_settings.link_rate; 529 cfg->link_settings.link_spread = 530 stream->link->cur_link_settings.link_spread; 531 cfg->sym_clock = stream->phy_pix_clk; 532 /* Round v_refresh*/ 533 cfg->v_refresh = stream->timing.pix_clk_100hz * 100; 534 cfg->v_refresh /= stream->timing.h_total; 535 cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) 536 / stream->timing.v_total; 537 } 538 539 pp_display_cfg->display_count = num_cfgs; 540 } 541 542 static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) 543 { 544 uint8_t j; 545 uint32_t min_vertical_blank_time = -1; 546 547 for (j = 0; j < context->stream_count; j++) { 548 struct dc_stream_state *stream = context->streams[j]; 549 uint32_t vertical_blank_in_pixels = 0; 550 uint32_t vertical_blank_time = 0; 551 552 vertical_blank_in_pixels = stream->timing.h_total * 553 (stream->timing.v_total 554 - stream->timing.v_addressable); 555 556 vertical_blank_time = vertical_blank_in_pixels 557 * 10000 / stream->timing.pix_clk_100hz; 558 559 if (min_vertical_blank_time > vertical_blank_time) 560 min_vertical_blank_time = vertical_blank_time; 561 } 562 563 return min_vertical_blank_time; 564 } 565 566 static int determine_sclk_from_bounding_box( 567 const struct dc *dc, 568 int required_sclk) 569 { 570 int i; 571 572 /* 573 * Some asics do not give us sclk levels, so we just report the actual 574 * required sclk 575 */ 576 if (dc->sclk_lvls.num_levels == 0) 577 return required_sclk; 578 579 for (i = 0; i < dc->sclk_lvls.num_levels; i++) { 580 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) 581 return dc->sclk_lvls.clocks_in_khz[i]; 582 } 583 /* 584 * even maximum level could not satisfy requirement, this 585 * is unexpected at this stage, should have been caught at 586 * validation time 587 */ 588 ASSERT(0); 589 return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; 590 } 591 592 static void dce_pplib_apply_display_requirements( 593 struct dc *dc, 594 struct dc_state *context) 595 { 596 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 597 598 pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); 599 600 dce110_fill_display_configs(context, pp_display_cfg); 601 602 if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) 603 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); 604 } 605 606 static void dce11_pplib_apply_display_requirements( 607 struct dc *dc, 608 struct dc_state *context) 609 { 610 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 611 612 pp_display_cfg->all_displays_in_sync = 613 context->bw_ctx.bw.dce.all_displays_in_sync; 614 pp_display_cfg->nb_pstate_switch_disable = 615 context->bw_ctx.bw.dce.nbp_state_change_enable == false; 616 pp_display_cfg->cpu_cc6_disable = 617 context->bw_ctx.bw.dce.cpuc_state_change_enable == false; 618 pp_display_cfg->cpu_pstate_disable = 619 context->bw_ctx.bw.dce.cpup_state_change_enable == false; 620 pp_display_cfg->cpu_pstate_separation_time = 621 context->bw_ctx.bw.dce.blackout_recovery_time_us; 622 623 pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz 624 / MEMORY_TYPE_MULTIPLIER_CZ; 625 626 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( 627 dc, 628 context->bw_ctx.bw.dce.sclk_khz); 629 630 /* 631 * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. 632 * This is not required for less than 5 displays, 633 * thus don't request decfclk in dc to avoid impact 634 * on power saving. 635 * 636 */ 637 pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? 638 pp_display_cfg->min_engine_clock_khz : 0; 639 640 pp_display_cfg->min_engine_clock_deep_sleep_khz 641 = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; 642 643 pp_display_cfg->avail_mclk_switch_time_us = 644 dce110_get_min_vblank_time_us(context); 645 /* TODO: dce11.2*/ 646 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; 647 648 pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; 649 650 dce110_fill_display_configs(context, pp_display_cfg); 651 652 /* TODO: is this still applicable?*/ 653 if (pp_display_cfg->display_count == 1) { 654 const struct dc_crtc_timing *timing = 655 &context->streams[0]->timing; 656 657 pp_display_cfg->crtc_index = 658 pp_display_cfg->disp_configs[0].pipe_idx; 659 pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; 660 } 661 662 if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) 663 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); 664 } 665 666 static void dce_update_clocks(struct clk_mgr *clk_mgr, 667 struct dc_state *context, 668 bool safe_to_lower) 669 { 670 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 671 struct dm_pp_power_level_change_request level_change_req; 672 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 673 674 /*TODO: W/A for dal3 linux, investigate why this works */ 675 if (!clk_mgr_dce->dfs_bypass_active) 676 patched_disp_clk = patched_disp_clk * 115 / 100; 677 678 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 679 /* get max clock state from PPLIB */ 680 if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) 681 || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { 682 if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) 683 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 684 } 685 686 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 687 patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk); 688 clk_mgr->clks.dispclk_khz = patched_disp_clk; 689 } 690 dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 691 } 692 693 static void dce11_update_clocks(struct clk_mgr *clk_mgr, 694 struct dc_state *context, 695 bool safe_to_lower) 696 { 697 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 698 struct dm_pp_power_level_change_request level_change_req; 699 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 700 701 /*TODO: W/A for dal3 linux, investigate why this works */ 702 if (!clk_mgr_dce->dfs_bypass_active) 703 patched_disp_clk = patched_disp_clk * 115 / 100; 704 705 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 706 /* get max clock state from PPLIB */ 707 if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) 708 || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { 709 if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) 710 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 711 } 712 713 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 714 context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); 715 clk_mgr->clks.dispclk_khz = patched_disp_clk; 716 } 717 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 718 } 719 720 static void dce112_update_clocks(struct clk_mgr *clk_mgr, 721 struct dc_state *context, 722 bool safe_to_lower) 723 { 724 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 725 struct dm_pp_power_level_change_request level_change_req; 726 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 727 728 /*TODO: W/A for dal3 linux, investigate why this works */ 729 if (!clk_mgr_dce->dfs_bypass_active) 730 patched_disp_clk = patched_disp_clk * 115 / 100; 731 732 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 733 /* get max clock state from PPLIB */ 734 if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) 735 || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { 736 if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) 737 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 738 } 739 740 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 741 patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk); 742 clk_mgr->clks.dispclk_khz = patched_disp_clk; 743 } 744 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 745 } 746 747 static void dce12_update_clocks(struct clk_mgr *clk_mgr, 748 struct dc_state *context, 749 bool safe_to_lower) 750 { 751 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 752 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; 753 int max_pix_clk = get_max_pixel_clock_for_all_paths(context); 754 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 755 756 /*TODO: W/A for dal3 linux, investigate why this works */ 757 if (!clk_mgr_dce->dfs_bypass_active) 758 patched_disp_clk = patched_disp_clk * 115 / 100; 759 760 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 761 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; 762 /* 763 * When xGMI is enabled, the display clk needs to be adjusted 764 * with the WAFL link's SS percentage. 765 */ 766 if (clk_mgr_dce->xgmi_enabled) 767 patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss( 768 clk_mgr_dce, patched_disp_clk); 769 clock_voltage_req.clocks_in_khz = patched_disp_clk; 770 clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk); 771 772 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); 773 } 774 775 if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { 776 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; 777 clock_voltage_req.clocks_in_khz = max_pix_clk; 778 clk_mgr->clks.phyclk_khz = max_pix_clk; 779 780 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); 781 } 782 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 783 } 784 785 static const struct clk_mgr_funcs dce120_funcs = { 786 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 787 .update_clocks = dce12_update_clocks 788 }; 789 790 static const struct clk_mgr_funcs dce112_funcs = { 791 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, 792 .update_clocks = dce112_update_clocks 793 }; 794 795 static const struct clk_mgr_funcs dce110_funcs = { 796 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, 797 .update_clocks = dce11_update_clocks, 798 }; 799 800 static const struct clk_mgr_funcs dce_funcs = { 801 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, 802 .update_clocks = dce_update_clocks 803 }; 804 805 static void dce_clk_mgr_construct( 806 struct dce_clk_mgr *clk_mgr_dce, 807 struct dc_context *ctx, 808 const struct clk_mgr_registers *regs, 809 const struct clk_mgr_shift *clk_shift, 810 const struct clk_mgr_mask *clk_mask) 811 { 812 struct clk_mgr *base = &clk_mgr_dce->base; 813 struct dm_pp_static_clock_info static_clk_info = {0}; 814 815 base->ctx = ctx; 816 base->funcs = &dce_funcs; 817 818 clk_mgr_dce->regs = regs; 819 clk_mgr_dce->clk_mgr_shift = clk_shift; 820 clk_mgr_dce->clk_mgr_mask = clk_mask; 821 822 clk_mgr_dce->dfs_bypass_disp_clk = 0; 823 824 clk_mgr_dce->dprefclk_ss_percentage = 0; 825 clk_mgr_dce->dprefclk_ss_divider = 1000; 826 clk_mgr_dce->ss_on_dprefclk = false; 827 828 829 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 830 clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; 831 else 832 clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; 833 clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; 834 835 dce_clock_read_integrated_info(clk_mgr_dce); 836 dce_clock_read_ss_info(clk_mgr_dce); 837 } 838 839 struct clk_mgr *dce_clk_mgr_create( 840 struct dc_context *ctx, 841 const struct clk_mgr_registers *regs, 842 const struct clk_mgr_shift *clk_shift, 843 const struct clk_mgr_mask *clk_mask) 844 { 845 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 846 847 if (clk_mgr_dce == NULL) { 848 BREAK_TO_DEBUGGER(); 849 return NULL; 850 } 851 852 memcpy(clk_mgr_dce->max_clks_by_state, 853 dce80_max_clks_by_state, 854 sizeof(dce80_max_clks_by_state)); 855 856 dce_clk_mgr_construct( 857 clk_mgr_dce, ctx, regs, clk_shift, clk_mask); 858 859 return &clk_mgr_dce->base; 860 } 861 862 struct clk_mgr *dce110_clk_mgr_create( 863 struct dc_context *ctx, 864 const struct clk_mgr_registers *regs, 865 const struct clk_mgr_shift *clk_shift, 866 const struct clk_mgr_mask *clk_mask) 867 { 868 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 869 870 if (clk_mgr_dce == NULL) { 871 BREAK_TO_DEBUGGER(); 872 return NULL; 873 } 874 875 memcpy(clk_mgr_dce->max_clks_by_state, 876 dce110_max_clks_by_state, 877 sizeof(dce110_max_clks_by_state)); 878 879 dce_clk_mgr_construct( 880 clk_mgr_dce, ctx, regs, clk_shift, clk_mask); 881 882 clk_mgr_dce->base.funcs = &dce110_funcs; 883 884 return &clk_mgr_dce->base; 885 } 886 887 struct clk_mgr *dce112_clk_mgr_create( 888 struct dc_context *ctx, 889 const struct clk_mgr_registers *regs, 890 const struct clk_mgr_shift *clk_shift, 891 const struct clk_mgr_mask *clk_mask) 892 { 893 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 894 895 if (clk_mgr_dce == NULL) { 896 BREAK_TO_DEBUGGER(); 897 return NULL; 898 } 899 900 memcpy(clk_mgr_dce->max_clks_by_state, 901 dce112_max_clks_by_state, 902 sizeof(dce112_max_clks_by_state)); 903 904 dce_clk_mgr_construct( 905 clk_mgr_dce, ctx, regs, clk_shift, clk_mask); 906 907 clk_mgr_dce->base.funcs = &dce112_funcs; 908 909 return &clk_mgr_dce->base; 910 } 911 912 struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) 913 { 914 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 915 916 if (clk_mgr_dce == NULL) { 917 BREAK_TO_DEBUGGER(); 918 return NULL; 919 } 920 921 memcpy(clk_mgr_dce->max_clks_by_state, 922 dce120_max_clks_by_state, 923 sizeof(dce120_max_clks_by_state)); 924 925 dce_clk_mgr_construct( 926 clk_mgr_dce, ctx, NULL, NULL, NULL); 927 928 clk_mgr_dce->dprefclk_khz = 600000; 929 clk_mgr_dce->base.funcs = &dce120_funcs; 930 931 return &clk_mgr_dce->base; 932 } 933 934 struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx) 935 { 936 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), 937 GFP_KERNEL); 938 939 if (clk_mgr_dce == NULL) { 940 BREAK_TO_DEBUGGER(); 941 return NULL; 942 } 943 944 memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state, 945 sizeof(dce120_max_clks_by_state)); 946 947 dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL); 948 949 clk_mgr_dce->dprefclk_khz = 625000; 950 clk_mgr_dce->base.funcs = &dce120_funcs; 951 952 return &clk_mgr_dce->base; 953 } 954 955 void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) 956 { 957 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); 958 959 kfree(clk_mgr_dce); 960 *clk_mgr = NULL; 961 } 962