1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 27 #include "dm_services.h" 28 #include "dm_helpers.h" 29 #include "core_types.h" 30 #include "resource.h" 31 #include "dcn30_hwseq.h" 32 #include "dccg.h" 33 #include "dce/dce_hwseq.h" 34 #include "dcn30_mpc.h" 35 #include "dcn30_dpp.h" 36 #include "dcn10/dcn10_cm_common.h" 37 #include "dcn30_cm_common.h" 38 #include "clk_mgr.h" 39 #include "reg_helper.h" 40 #include "abm.h" 41 #include "clk_mgr.h" 42 #include "hubp.h" 43 #include "dchubbub.h" 44 #include "timing_generator.h" 45 #include "opp.h" 46 #include "ipp.h" 47 #include "mpc.h" 48 #include "mcif_wb.h" 49 #include "dc_dmub_srv.h" 50 #include "link_hwss.h" 51 #include "dpcd_defs.h" 52 53 54 55 56 #define DC_LOGGER_INIT(logger) 57 58 #define CTX \ 59 hws->ctx 60 #define REG(reg)\ 61 hws->regs->reg 62 #define DC_LOGGER \ 63 dc->ctx->logger 64 65 66 #undef FN 67 #define FN(reg_name, field_name) \ 68 hws->shifts->field_name, hws->masks->field_name 69 70 bool dcn30_set_blend_lut( 71 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 72 { 73 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 74 bool result = true; 75 struct pwl_params *blend_lut = NULL; 76 77 if (plane_state->blend_tf) { 78 if (plane_state->blend_tf->type == TF_TYPE_HWPWL) 79 blend_lut = &plane_state->blend_tf->pwl; 80 else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 81 cm3_helper_translate_curve_to_hw_format( 82 plane_state->blend_tf, &dpp_base->regamma_params, false); 83 blend_lut = &dpp_base->regamma_params; 84 } 85 } 86 result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); 87 88 return result; 89 } 90 91 static bool dcn30_set_mpc_shaper_3dlut( 92 struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) 93 { 94 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 95 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 96 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 97 bool result = false; 98 int acquired_rmu = 0; 99 int mpcc_id_projected = 0; 100 101 const struct pwl_params *shaper_lut = NULL; 102 //get the shaper lut params 103 if (stream->func_shaper) { 104 if (stream->func_shaper->type == TF_TYPE_HWPWL) 105 shaper_lut = &stream->func_shaper->pwl; 106 else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 107 cm_helper_translate_curve_to_hw_format( 108 stream->func_shaper, 109 &dpp_base->shaper_params, true); 110 shaper_lut = &dpp_base->shaper_params; 111 } 112 } 113 114 if (stream->lut3d_func && 115 stream->lut3d_func->state.bits.initialized == 1 && 116 stream->lut3d_func->state.bits.rmu_idx_valid == 1) { 117 if (stream->lut3d_func->state.bits.rmu_mux_num == 0) 118 mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux; 119 else if (stream->lut3d_func->state.bits.rmu_mux_num == 1) 120 mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu1_mux; 121 else if (stream->lut3d_func->state.bits.rmu_mux_num == 2) 122 mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux; 123 if (mpcc_id_projected != mpcc_id) 124 BREAK_TO_DEBUGGER(); 125 /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/ 126 acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id, 127 stream->lut3d_func->state.bits.rmu_mux_num); 128 if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num) 129 BREAK_TO_DEBUGGER(); 130 result = mpc->funcs->program_3dlut(mpc, 131 &stream->lut3d_func->lut_3d, 132 stream->lut3d_func->state.bits.rmu_mux_num); 133 result = mpc->funcs->program_shaper(mpc, shaper_lut, 134 stream->lut3d_func->state.bits.rmu_mux_num); 135 } else 136 /*loop through the available mux and release the requested mpcc_id*/ 137 mpc->funcs->release_rmu(mpc, mpcc_id); 138 139 140 return result; 141 } 142 143 bool dcn30_set_input_transfer_func(struct dc *dc, 144 struct pipe_ctx *pipe_ctx, 145 const struct dc_plane_state *plane_state) 146 { 147 struct dce_hwseq *hws = dc->hwseq; 148 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 149 enum dc_transfer_func_predefined tf; 150 bool result = true; 151 struct pwl_params *params = NULL; 152 153 if (dpp_base == NULL || plane_state == NULL) 154 return false; 155 156 tf = TRANSFER_FUNCTION_UNITY; 157 158 if (plane_state->in_transfer_func && 159 plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) 160 tf = plane_state->in_transfer_func->tf; 161 162 dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); 163 164 if (plane_state->in_transfer_func) { 165 if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) 166 params = &plane_state->in_transfer_func->pwl; 167 else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && 168 cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, 169 &dpp_base->degamma_params, false)) 170 params = &dpp_base->degamma_params; 171 } 172 173 result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); 174 175 if (pipe_ctx->stream_res.opp && pipe_ctx->stream_res.opp->ctx) { 176 if (dpp_base->funcs->dpp_program_blnd_lut) 177 hws->funcs.set_blend_lut(pipe_ctx, plane_state); 178 if (dpp_base->funcs->dpp_program_shaper_lut && 179 dpp_base->funcs->dpp_program_3dlut) 180 hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); 181 } 182 183 return result; 184 } 185 186 bool dcn30_set_output_transfer_func(struct dc *dc, 187 struct pipe_ctx *pipe_ctx, 188 const struct dc_stream_state *stream) 189 { 190 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 191 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 192 struct pwl_params *params = NULL; 193 bool ret = false; 194 195 /* program OGAM or 3DLUT only for the top pipe*/ 196 if (pipe_ctx->top_pipe == NULL) { 197 /*program rmu shaper and 3dlut in MPC*/ 198 ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream); 199 if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { 200 if (stream->out_transfer_func->type == TF_TYPE_HWPWL) 201 params = &stream->out_transfer_func->pwl; 202 else if (pipe_ctx->stream->out_transfer_func->type == 203 TF_TYPE_DISTRIBUTED_POINTS && 204 cm3_helper_translate_curve_to_hw_format( 205 stream->out_transfer_func, 206 &mpc->blender_params, false)) 207 params = &mpc->blender_params; 208 /* there are no ROM LUTs in OUTGAM */ 209 if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) 210 BREAK_TO_DEBUGGER(); 211 } 212 } 213 214 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 215 return ret; 216 } 217 218 static void dcn30_set_writeback( 219 struct dc *dc, 220 struct dc_writeback_info *wb_info, 221 struct dc_state *context) 222 { 223 struct dwbc *dwb; 224 struct mcif_wb *mcif_wb; 225 struct mcif_buf_params *mcif_buf_params; 226 227 ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES); 228 ASSERT(wb_info->wb_enabled); 229 ASSERT(wb_info->mpcc_inst >= 0); 230 ASSERT(wb_info->mpcc_inst < 4); 231 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 232 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 233 mcif_buf_params = &wb_info->mcif_buf_params; 234 235 /* set DWB MPC mux */ 236 dc->res_pool->mpc->funcs->set_dwb_mux(dc->res_pool->mpc, 237 wb_info->dwb_pipe_inst, wb_info->mpcc_inst); 238 /* set MCIF_WB buffer and arbitration configuration */ 239 mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, wb_info->dwb_params.dest_height); 240 mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); 241 } 242 243 void dcn30_update_writeback( 244 struct dc *dc, 245 struct dc_writeback_info *wb_info, 246 struct dc_state *context) 247 { 248 struct dwbc *dwb; 249 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 250 DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ 251 __func__, wb_info->dwb_pipe_inst,\ 252 wb_info->mpcc_inst); 253 254 dcn30_set_writeback(dc, wb_info, context); 255 256 /* update DWB */ 257 dwb->funcs->update(dwb, &wb_info->dwb_params); 258 } 259 260 bool dcn30_mmhubbub_warmup( 261 struct dc *dc, 262 unsigned int num_dwb, 263 struct dc_writeback_info *wb_info) 264 { 265 struct dwbc *dwb; 266 struct mcif_wb *mcif_wb; 267 struct mcif_warmup_params warmup_params = {0}; 268 unsigned int i, i_buf; 269 /*make sure there is no active DWB eanbled */ 270 for (i = 0; i < num_dwb; i++) { 271 dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; 272 if (dwb->dwb_is_efc_transition || dwb->dwb_is_drc) { 273 /*can not do warmup while any dwb enabled*/ 274 return false; 275 } 276 } 277 278 if (wb_info->mcif_warmup_params.p_vmid == 0) 279 return false; 280 281 /*check whether this is new interface: warmup big buffer once*/ 282 if (wb_info->mcif_warmup_params.start_address.quad_part != 0 && 283 wb_info->mcif_warmup_params.region_size != 0) { 284 /*mmhubbub is shared, so it does not matter which MCIF*/ 285 mcif_wb = dc->res_pool->mcif_wb[0]; 286 /*warmup a big chunk of VM buffer at once*/ 287 warmup_params.start_address.quad_part = wb_info->mcif_warmup_params.start_address.quad_part; 288 warmup_params.address_increment = wb_info->mcif_warmup_params.region_size; 289 warmup_params.region_size = wb_info->mcif_warmup_params.region_size; 290 warmup_params.p_vmid = wb_info->mcif_warmup_params.p_vmid; 291 292 if (warmup_params.address_increment == 0) 293 warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes; 294 295 mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params); 296 return true; 297 } 298 /*following is the original: warmup each DWB's mcif buffer*/ 299 for (i = 0; i < num_dwb; i++) { 300 dwb = dc->res_pool->dwbc[wb_info[i].dwb_pipe_inst]; 301 mcif_wb = dc->res_pool->mcif_wb[wb_info[i].dwb_pipe_inst]; 302 /*warmup is for VM mode only*/ 303 if (wb_info[i].mcif_buf_params.p_vmid == 0) 304 return false; 305 306 /* Warmup MCIF_WB */ 307 for (i_buf = 0; i_buf < MCIF_BUF_COUNT; i_buf++) { 308 warmup_params.start_address.quad_part = wb_info[i].mcif_buf_params.luma_address[i_buf]; 309 warmup_params.address_increment = dc->dml.soc.vmm_page_size_bytes; 310 warmup_params.region_size = wb_info[i].mcif_buf_params.luma_pitch * wb_info[i].dwb_params.dest_height; 311 warmup_params.p_vmid = wb_info[i].mcif_buf_params.p_vmid; 312 mcif_wb->funcs->warmup_mcif(mcif_wb, &warmup_params); 313 } 314 } 315 return true; 316 } 317 318 void dcn30_enable_writeback( 319 struct dc *dc, 320 struct dc_writeback_info *wb_info, 321 struct dc_state *context) 322 { 323 struct dwbc *dwb; 324 struct mcif_wb *mcif_wb; 325 struct timing_generator *optc; 326 327 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 328 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 329 330 /* set the OPTC source mux */ 331 optc = dc->res_pool->timing_generators[dwb->otg_inst]; 332 DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\ 333 __func__, wb_info->dwb_pipe_inst,\ 334 wb_info->mpcc_inst); 335 if (IS_DIAG_DC(dc->ctx->dce_environment)) { 336 /*till diags switch to warmup interface*/ 337 dcn30_mmhubbub_warmup(dc, 1, wb_info); 338 } 339 /* Update writeback pipe */ 340 dcn30_set_writeback(dc, wb_info, context); 341 342 /* Enable MCIF_WB */ 343 mcif_wb->funcs->enable_mcif(mcif_wb); 344 /* Enable DWB */ 345 dwb->funcs->enable(dwb, &wb_info->dwb_params); 346 } 347 348 void dcn30_disable_writeback( 349 struct dc *dc, 350 unsigned int dwb_pipe_inst) 351 { 352 struct dwbc *dwb; 353 struct mcif_wb *mcif_wb; 354 355 ASSERT(dwb_pipe_inst < MAX_DWB_PIPES); 356 dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 357 mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; 358 DC_LOG_DWB("%s dwb_pipe_inst = %d",\ 359 __func__, dwb_pipe_inst); 360 361 /* disable DWB */ 362 dwb->funcs->disable(dwb); 363 /* disable MCIF */ 364 mcif_wb->funcs->disable_mcif(mcif_wb); 365 /* disable MPC DWB mux */ 366 dc->res_pool->mpc->funcs->disable_dwb_mux(dc->res_pool->mpc, dwb_pipe_inst); 367 } 368 369 void dcn30_program_all_writeback_pipes_in_tree( 370 struct dc *dc, 371 const struct dc_stream_state *stream, 372 struct dc_state *context) 373 { 374 struct dc_writeback_info wb_info; 375 struct dwbc *dwb; 376 struct dc_stream_status *stream_status = NULL; 377 int i_wb, i_pipe, i_stream; 378 DC_LOG_DWB("%s", __func__); 379 380 ASSERT(stream); 381 for (i_stream = 0; i_stream < context->stream_count; i_stream++) { 382 if (context->streams[i_stream] == stream) { 383 stream_status = &context->stream_status[i_stream]; 384 break; 385 } 386 } 387 ASSERT(stream_status); 388 389 ASSERT(stream->num_wb_info <= dc->res_pool->res_cap->num_dwb); 390 /* For each writeback pipe */ 391 for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) { 392 393 /* copy writeback info to local non-const so mpcc_inst can be set */ 394 wb_info = stream->writeback_info[i_wb]; 395 if (wb_info.wb_enabled) { 396 397 /* get the MPCC instance for writeback_source_plane */ 398 wb_info.mpcc_inst = -1; 399 for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) { 400 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe]; 401 402 if (pipe_ctx->plane_state == wb_info.writeback_source_plane) { 403 wb_info.mpcc_inst = pipe_ctx->plane_res.mpcc_inst; 404 break; 405 } 406 } 407 ASSERT(wb_info.mpcc_inst != -1); 408 409 ASSERT(wb_info.dwb_pipe_inst < dc->res_pool->res_cap->num_dwb); 410 dwb = dc->res_pool->dwbc[wb_info.dwb_pipe_inst]; 411 if (dwb->funcs->is_enabled(dwb)) { 412 /* writeback pipe already enabled, only need to update */ 413 dc->hwss.update_writeback(dc, &wb_info, context); 414 } else { 415 /* Enable writeback pipe and connect to MPCC */ 416 dc->hwss.enable_writeback(dc, &wb_info, context); 417 } 418 } else { 419 /* Disable writeback pipe and disconnect from MPCC */ 420 dc->hwss.disable_writeback(dc, wb_info.dwb_pipe_inst); 421 } 422 } 423 } 424 425 void dcn30_init_hw(struct dc *dc) 426 { 427 int i, j; 428 struct abm **abms = dc->res_pool->multiple_abms; 429 struct dce_hwseq *hws = dc->hwseq; 430 struct dc_bios *dcb = dc->ctx->dc_bios; 431 struct resource_pool *res_pool = dc->res_pool; 432 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 433 434 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 435 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 436 437 // Initialize the dccg 438 if (res_pool->dccg->funcs->dccg_init) 439 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 440 441 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 442 443 REG_WRITE(REFCLK_CNTL, 0); 444 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 445 REG_WRITE(DIO_MEM_PWR_CTRL, 0); 446 447 if (!dc->debug.disable_clock_gate) { 448 /* enable all DCN clock gating */ 449 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 450 451 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 452 453 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 454 } 455 456 //Enable ability to power gate / don't force power on permanently 457 if (hws->funcs.enable_power_gating_plane) 458 hws->funcs.enable_power_gating_plane(hws, true); 459 460 return; 461 } 462 463 if (!dcb->funcs->is_accelerated_mode(dcb)) { 464 hws->funcs.bios_golden_init(dc); 465 hws->funcs.disable_vga(dc->hwseq); 466 } 467 468 if (dc->ctx->dc_bios->fw_info_valid) { 469 res_pool->ref_clocks.xtalin_clock_inKhz = 470 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; 471 472 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 473 if (res_pool->dccg && res_pool->hubbub) { 474 475 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, 476 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, 477 &res_pool->ref_clocks.dccg_ref_clock_inKhz); 478 479 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, 480 res_pool->ref_clocks.dccg_ref_clock_inKhz, 481 &res_pool->ref_clocks.dchub_ref_clock_inKhz); 482 } else { 483 // Not all ASICs have DCCG sw component 484 res_pool->ref_clocks.dccg_ref_clock_inKhz = 485 res_pool->ref_clocks.xtalin_clock_inKhz; 486 res_pool->ref_clocks.dchub_ref_clock_inKhz = 487 res_pool->ref_clocks.xtalin_clock_inKhz; 488 } 489 } 490 } else 491 ASSERT_CRITICAL(false); 492 493 for (i = 0; i < dc->link_count; i++) { 494 /* Power up AND update implementation according to the 495 * required signal (which may be different from the 496 * default signal on connector). 497 */ 498 struct dc_link *link = dc->links[i]; 499 500 link->link_enc->funcs->hw_init(link->link_enc); 501 502 /* Check for enabled DIG to identify enabled display */ 503 if (link->link_enc->funcs->is_dig_enabled && 504 link->link_enc->funcs->is_dig_enabled(link->link_enc)) 505 link->link_status.link_active = true; 506 } 507 508 /* Power gate DSCs */ 509 for (i = 0; i < res_pool->res_cap->num_dsc; i++) 510 if (hws->funcs.dsc_pg_control != NULL) 511 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); 512 513 /* we want to turn off all dp displays before doing detection */ 514 if (dc->config.power_down_display_on_boot) { 515 uint8_t dpcd_power_state = '\0'; 516 enum dc_status status = DC_ERROR_UNEXPECTED; 517 518 for (i = 0; i < dc->link_count; i++) { 519 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) 520 continue; 521 522 /* if any of the displays are lit up turn them off */ 523 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, 524 &dpcd_power_state, sizeof(dpcd_power_state)); 525 if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { 526 /* blank dp stream before power off receiver*/ 527 if (dc->links[i]->link_enc->funcs->get_dig_frontend) { 528 unsigned int fe; 529 530 fe = dc->links[i]->link_enc->funcs->get_dig_frontend( 531 dc->links[i]->link_enc); 532 533 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 534 if (fe == dc->res_pool->stream_enc[j]->id) { 535 dc->res_pool->stream_enc[j]->funcs->dp_blank( 536 dc->res_pool->stream_enc[j]); 537 break; 538 } 539 } 540 } 541 dp_receiver_power_ctrl(dc->links[i], false); 542 } 543 } 544 } 545 546 /* If taking control over from VBIOS, we may want to optimize our first 547 * mode set, so we need to skip powering down pipes until we know which 548 * pipes we want to use. 549 * Otherwise, if taking control is not possible, we need to power 550 * everything down. 551 */ 552 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { 553 hws->funcs.init_pipes(dc, dc->current_state); 554 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) 555 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, 556 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); 557 } 558 559 /* In headless boot cases, DIG may be turned 560 * on which causes HW/SW discrepancies. 561 * To avoid this, power down hardware on boot 562 * if DIG is turned on and seamless boot not enabled 563 */ 564 if (dc->config.power_down_display_on_boot) { 565 struct dc_link *edp_link = get_edp_link(dc); 566 567 if (edp_link && 568 edp_link->link_enc->funcs->is_dig_enabled && 569 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 570 dc->hwss.edp_backlight_control && 571 dc->hwss.power_down && 572 dc->hwss.edp_power_control) { 573 dc->hwss.edp_backlight_control(edp_link, false); 574 dc->hwss.power_down(dc); 575 dc->hwss.edp_power_control(edp_link, false); 576 } else { 577 for (i = 0; i < dc->link_count; i++) { 578 struct dc_link *link = dc->links[i]; 579 580 if (link->link_enc->funcs->is_dig_enabled && 581 link->link_enc->funcs->is_dig_enabled(link->link_enc) && 582 dc->hwss.power_down) { 583 dc->hwss.power_down(dc); 584 break; 585 } 586 587 } 588 } 589 } 590 591 for (i = 0; i < res_pool->audio_count; i++) { 592 struct audio *audio = res_pool->audios[i]; 593 594 audio->funcs->hw_init(audio); 595 } 596 597 for (i = 0; i < dc->link_count; i++) { 598 struct dc_link *link = dc->links[i]; 599 600 if (link->panel_cntl) 601 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); 602 } 603 604 for (i = 0; i < dc->res_pool->pipe_count; i++) { 605 if (abms[i] != NULL) 606 abms[i]->funcs->abm_init(abms[i], backlight); 607 } 608 609 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 610 REG_WRITE(DIO_MEM_PWR_CTRL, 0); 611 612 if (!dc->debug.disable_clock_gate) { 613 /* enable all DCN clock gating */ 614 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 615 616 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 617 618 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 619 } 620 if (hws->funcs.enable_power_gating_plane) 621 hws->funcs.enable_power_gating_plane(dc->hwseq, true); 622 623 if (dc->clk_mgr->funcs->notify_wm_ranges) 624 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); 625 626 if (dc->clk_mgr->funcs->set_hard_max_memclk) 627 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 628 } 629 630 void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) 631 { 632 if (pipe_ctx == NULL) 633 return; 634 635 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) 636 pipe_ctx->stream_res.stream_enc->funcs->set_avmute( 637 pipe_ctx->stream_res.stream_enc, 638 enable); 639 } 640 641 void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) 642 { 643 bool is_hdmi_tmds; 644 bool is_dp; 645 646 ASSERT(pipe_ctx->stream); 647 648 if (pipe_ctx->stream_res.stream_enc == NULL) 649 return; /* this is not root pipe */ 650 651 is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); 652 is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); 653 654 if (!is_hdmi_tmds) 655 return; 656 657 if (is_hdmi_tmds) 658 pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets( 659 pipe_ctx->stream_res.stream_enc, 660 &pipe_ctx->stream_res.encoder_info_frame); 661 else 662 pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets( 663 pipe_ctx->stream_res.stream_enc, 664 &pipe_ctx->stream_res.encoder_info_frame); 665 } 666 667 void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx) 668 { 669 struct dc_stream_state *stream = pipe_ctx->stream; 670 struct hubp *hubp = pipe_ctx->plane_res.hubp; 671 bool enable = false; 672 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 673 enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) 674 ? dmdata_dp 675 : dmdata_hdmi; 676 677 /* if using dynamic meta, don't set up generic infopackets */ 678 if (pipe_ctx->stream->dmdata_address.quad_part != 0) { 679 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; 680 enable = true; 681 } 682 683 if (!hubp) 684 return; 685 686 if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) 687 return; 688 689 stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, 690 hubp->inst, mode); 691 } 692 693 bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) 694 { 695 unsigned int surface_size; 696 697 if (!dc->ctx->dmub_srv) 698 return false; 699 700 if (enable) { 701 if (dc->current_state 702 && dc->current_state->stream_count == 1 // single display only 703 && dc->current_state->stream_status[0].plane_count == 1 // single surface only 704 && dc->current_state->stream_status[0].plane_states[0]->address.page_table_base.quad_part == 0 // no VM 705 // Only 8 and 16 bit formats 706 && dc->current_state->stream_status[0].plane_states[0]->format <= SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F 707 && dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888) { 708 709 surface_size = dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_pitch * 710 dc->current_state->stream_status[0].plane_states[0]->plane_size.surface_size.height * 711 (dc->current_state->stream_status[0].plane_states[0]->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); 712 713 } 714 715 return false; 716 } 717 718 return true; 719 } 720