1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <linux/delay.h> 27 #include "dm_services.h" 28 #include "basics/dc_common.h" 29 #include "core_types.h" 30 #include "resource.h" 31 #include "custom_float.h" 32 #include "dcn10_hw_sequencer.h" 33 #include "dcn10_hw_sequencer_debug.h" 34 #include "dce/dce_hwseq.h" 35 #include "abm.h" 36 #include "dmcu.h" 37 #include "dcn10_optc.h" 38 #include "dcn10_dpp.h" 39 #include "dcn10_mpc.h" 40 #include "timing_generator.h" 41 #include "opp.h" 42 #include "ipp.h" 43 #include "mpc.h" 44 #include "reg_helper.h" 45 #include "dcn10_hubp.h" 46 #include "dcn10_hubbub.h" 47 #include "dcn10_cm_common.h" 48 #include "dccg.h" 49 #include "clk_mgr.h" 50 #include "link_hwss.h" 51 #include "dpcd_defs.h" 52 #include "dsc.h" 53 #include "dce/dmub_psr.h" 54 #include "dc_dmub_srv.h" 55 #include "dce/dmub_hw_lock_mgr.h" 56 #include "dc_trace.h" 57 #include "dce/dmub_outbox.h" 58 #include "link.h" 59 60 #define DC_LOGGER_INIT(logger) 61 62 #define CTX \ 63 hws->ctx 64 #define REG(reg)\ 65 hws->regs->reg 66 67 #undef FN 68 #define FN(reg_name, field_name) \ 69 hws->shifts->field_name, hws->masks->field_name 70 71 /*print is 17 wide, first two characters are spaces*/ 72 #define DTN_INFO_MICRO_SEC(ref_cycle) \ 73 print_microsec(dc_ctx, log_ctx, ref_cycle) 74 75 #define GAMMA_HW_POINTS_NUM 256 76 77 #define PGFSM_POWER_ON 0 78 #define PGFSM_POWER_OFF 2 79 80 static void print_microsec(struct dc_context *dc_ctx, 81 struct dc_log_buffer_ctx *log_ctx, 82 uint32_t ref_cycle) 83 { 84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; 85 static const unsigned int frac = 1000; 86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz; 87 88 DTN_INFO(" %11d.%03d", 89 us_x10 / frac, 90 us_x10 % frac); 91 } 92 93 void dcn10_lock_all_pipes(struct dc *dc, 94 struct dc_state *context, 95 bool lock) 96 { 97 struct pipe_ctx *pipe_ctx; 98 struct pipe_ctx *old_pipe_ctx; 99 struct timing_generator *tg; 100 int i; 101 102 for (i = 0; i < dc->res_pool->pipe_count; i++) { 103 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 104 pipe_ctx = &context->res_ctx.pipe_ctx[i]; 105 tg = pipe_ctx->stream_res.tg; 106 107 /* 108 * Only lock the top pipe's tg to prevent redundant 109 * (un)locking. Also skip if pipe is disabled. 110 */ 111 if (pipe_ctx->top_pipe || 112 !pipe_ctx->stream || 113 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) || 114 !tg->funcs->is_tg_enabled(tg) || 115 pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) 116 continue; 117 118 if (lock) 119 dc->hwss.pipe_control_lock(dc, pipe_ctx, true); 120 else 121 dc->hwss.pipe_control_lock(dc, pipe_ctx, false); 122 } 123 } 124 125 static void log_mpc_crc(struct dc *dc, 126 struct dc_log_buffer_ctx *log_ctx) 127 { 128 struct dc_context *dc_ctx = dc->ctx; 129 struct dce_hwseq *hws = dc->hwseq; 130 131 if (REG(MPC_CRC_RESULT_GB)) 132 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n", 133 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR)); 134 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A)) 135 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n", 136 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G)); 137 } 138 139 static void dcn10_log_hubbub_state(struct dc *dc, 140 struct dc_log_buffer_ctx *log_ctx) 141 { 142 struct dc_context *dc_ctx = dc->ctx; 143 struct dcn_hubbub_wm wm; 144 int i; 145 146 memset(&wm, 0, sizeof(struct dcn_hubbub_wm)); 147 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm); 148 149 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent" 150 " sr_enter sr_exit dram_clk_change\n"); 151 152 for (i = 0; i < 4; i++) { 153 struct dcn_hubbub_wm_set *s; 154 155 s = &wm.sets[i]; 156 DTN_INFO("WM_Set[%d]:", s->wm_set); 157 DTN_INFO_MICRO_SEC(s->data_urgent); 158 DTN_INFO_MICRO_SEC(s->pte_meta_urgent); 159 DTN_INFO_MICRO_SEC(s->sr_enter); 160 DTN_INFO_MICRO_SEC(s->sr_exit); 161 DTN_INFO_MICRO_SEC(s->dram_clk_change); 162 DTN_INFO("\n"); 163 } 164 165 DTN_INFO("\n"); 166 } 167 168 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) 169 { 170 struct dc_context *dc_ctx = dc->ctx; 171 struct resource_pool *pool = dc->res_pool; 172 int i; 173 174 DTN_INFO( 175 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n"); 176 for (i = 0; i < pool->pipe_count; i++) { 177 struct hubp *hubp = pool->hubps[i]; 178 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); 179 180 hubp->funcs->hubp_read_state(hubp); 181 182 if (!s->blank_en) { 183 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh", 184 hubp->inst, 185 s->pixel_format, 186 s->inuse_addr_hi, 187 s->viewport_width, 188 s->viewport_height, 189 s->rotation_angle, 190 s->h_mirror_en, 191 s->sw_mode, 192 s->dcc_en, 193 s->blank_en, 194 s->clock_en, 195 s->ttu_disable, 196 s->underflow_status); 197 DTN_INFO_MICRO_SEC(s->min_ttu_vblank); 198 DTN_INFO_MICRO_SEC(s->qos_level_low_wm); 199 DTN_INFO_MICRO_SEC(s->qos_level_high_wm); 200 DTN_INFO("\n"); 201 } 202 } 203 204 DTN_INFO("\n=========RQ========\n"); 205 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s" 206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s" 207 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n"); 208 for (i = 0; i < pool->pipe_count; i++) { 209 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); 210 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; 211 212 if (!s->blank_en) 213 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", 214 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode, 215 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size, 216 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size, 217 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size, 218 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height, 219 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size, 220 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size, 221 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size, 222 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear); 223 } 224 225 DTN_INFO("========DLG========\n"); 226 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s " 227 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq" 228 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll" 229 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc " 230 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l " 231 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay" 232 " x_rp_dlay x_rr_sfl\n"); 233 for (i = 0; i < pool->pipe_count; i++) { 234 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); 235 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; 236 237 if (!s->blank_en) 238 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" 239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" 240 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", 241 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, 242 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, 243 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank, 244 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq, 245 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l, 246 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l, 247 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l, 248 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l, 249 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l, 250 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l, 251 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l, 252 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l, 253 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l, 254 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l, 255 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1, 256 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit, 257 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay, 258 dlg_regs->xfc_reg_remote_surface_flip_latency); 259 } 260 261 DTN_INFO("========TTU========\n"); 262 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c" 263 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l" 264 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n"); 265 for (i = 0; i < pool->pipe_count; i++) { 266 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); 267 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; 268 269 if (!s->blank_en) 270 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", 271 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank, 272 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l, 273 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0, 274 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1, 275 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l, 276 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0, 277 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1); 278 } 279 DTN_INFO("\n"); 280 } 281 282 void dcn10_log_hw_state(struct dc *dc, 283 struct dc_log_buffer_ctx *log_ctx) 284 { 285 struct dc_context *dc_ctx = dc->ctx; 286 struct resource_pool *pool = dc->res_pool; 287 int i; 288 289 DTN_INFO_BEGIN(); 290 291 dcn10_log_hubbub_state(dc, log_ctx); 292 293 dcn10_log_hubp_states(dc, log_ctx); 294 295 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" 296 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 " 297 "C31 C32 C33 C34\n"); 298 for (i = 0; i < pool->pipe_count; i++) { 299 struct dpp *dpp = pool->dpps[i]; 300 struct dcn_dpp_state s = {0}; 301 302 dpp->funcs->dpp_read_state(dpp, &s); 303 304 if (!s.is_enabled) 305 continue; 306 307 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s" 308 "%8x %08xh %08xh %08xh %08xh %08xh %08xh", 309 dpp->inst, 310 s.igam_input_format, 311 (s.igam_lut_mode == 0) ? "BypassFixed" : 312 ((s.igam_lut_mode == 1) ? "BypassFloat" : 313 ((s.igam_lut_mode == 2) ? "RAM" : 314 ((s.igam_lut_mode == 3) ? "RAM" : 315 "Unknown"))), 316 (s.dgam_lut_mode == 0) ? "Bypass" : 317 ((s.dgam_lut_mode == 1) ? "sRGB" : 318 ((s.dgam_lut_mode == 2) ? "Ycc" : 319 ((s.dgam_lut_mode == 3) ? "RAM" : 320 ((s.dgam_lut_mode == 4) ? "RAM" : 321 "Unknown")))), 322 (s.rgam_lut_mode == 0) ? "Bypass" : 323 ((s.rgam_lut_mode == 1) ? "sRGB" : 324 ((s.rgam_lut_mode == 2) ? "Ycc" : 325 ((s.rgam_lut_mode == 3) ? "RAM" : 326 ((s.rgam_lut_mode == 4) ? "RAM" : 327 "Unknown")))), 328 s.gamut_remap_mode, 329 s.gamut_remap_c11_c12, 330 s.gamut_remap_c13_c14, 331 s.gamut_remap_c21_c22, 332 s.gamut_remap_c23_c24, 333 s.gamut_remap_c31_c32, 334 s.gamut_remap_c33_c34); 335 DTN_INFO("\n"); 336 } 337 DTN_INFO("\n"); 338 339 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n"); 340 for (i = 0; i < pool->pipe_count; i++) { 341 struct mpcc_state s = {0}; 342 343 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); 344 if (s.opp_id != 0xf) 345 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n", 346 i, s.opp_id, s.dpp_id, s.bot_mpcc_id, 347 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, 348 s.idle); 349 } 350 DTN_INFO("\n"); 351 352 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n"); 353 354 for (i = 0; i < pool->timing_generator_count; i++) { 355 struct timing_generator *tg = pool->timing_generators[i]; 356 struct dcn_otg_state s = {0}; 357 /* Read shared OTG state registers for all DCNx */ 358 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); 359 360 /* 361 * For DCN2 and greater, a register on the OPP is used to 362 * determine if the CRTC is blanked instead of the OTG. So use 363 * dpg_is_blanked() if exists, otherwise fallback on otg. 364 * 365 * TODO: Implement DCN-specific read_otg_state hooks. 366 */ 367 if (pool->opps[i]->funcs->dpg_is_blanked) 368 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]); 369 else 370 s.blank_enabled = tg->funcs->is_blanked(tg); 371 372 //only print if OTG master is enabled 373 if ((s.otg_enabled & 1) == 0) 374 continue; 375 376 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n", 377 tg->inst, 378 s.v_blank_start, 379 s.v_blank_end, 380 s.v_sync_a_start, 381 s.v_sync_a_end, 382 s.v_sync_a_pol, 383 s.v_total_max, 384 s.v_total_min, 385 s.v_total_max_sel, 386 s.v_total_min_sel, 387 s.h_blank_start, 388 s.h_blank_end, 389 s.h_sync_a_start, 390 s.h_sync_a_end, 391 s.h_sync_a_pol, 392 s.h_total, 393 s.v_total, 394 s.underflow_occurred_status, 395 s.blank_enabled); 396 397 // Clear underflow for debug purposes 398 // We want to keep underflow sticky bit on for the longevity tests outside of test environment. 399 // This function is called only from Windows or Diags test environment, hence it's safe to clear 400 // it from here without affecting the original intent. 401 tg->funcs->clear_optc_underflow(tg); 402 } 403 DTN_INFO("\n"); 404 405 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel 406 // TODO: Update golden log header to reflect this name change 407 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); 408 for (i = 0; i < pool->res_cap->num_dsc; i++) { 409 struct display_stream_compressor *dsc = pool->dscs[i]; 410 struct dcn_dsc_state s = {0}; 411 412 dsc->funcs->dsc_read_state(dsc, &s); 413 DTN_INFO("[%d]: %-9d %-12d %-10d\n", 414 dsc->inst, 415 s.dsc_clock_en, 416 s.dsc_slice_width, 417 s.dsc_bits_per_pixel); 418 DTN_INFO("\n"); 419 } 420 DTN_INFO("\n"); 421 422 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM" 423 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n"); 424 for (i = 0; i < pool->stream_enc_count; i++) { 425 struct stream_encoder *enc = pool->stream_enc[i]; 426 struct enc_state s = {0}; 427 428 if (enc->funcs->enc_read_state) { 429 enc->funcs->enc_read_state(enc, &s); 430 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n", 431 enc->id, 432 s.dsc_mode, 433 s.sec_gsp_pps_line_num, 434 s.vbid6_line_reference, 435 s.vbid6_line_num, 436 s.sec_gsp_pps_enable, 437 s.sec_stream_enable); 438 DTN_INFO("\n"); 439 } 440 } 441 DTN_INFO("\n"); 442 443 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n"); 444 for (i = 0; i < dc->link_count; i++) { 445 struct link_encoder *lenc = dc->links[i]->link_enc; 446 447 struct link_enc_state s = {0}; 448 449 if (lenc && lenc->funcs->read_state) { 450 lenc->funcs->read_state(lenc, &s); 451 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n", 452 i, 453 s.dphy_fec_en, 454 s.dphy_fec_ready_shadow, 455 s.dphy_fec_active_status, 456 s.dp_link_training_complete); 457 DTN_INFO("\n"); 458 } 459 } 460 DTN_INFO("\n"); 461 462 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" 463 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", 464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz, 465 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, 466 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz, 467 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz, 468 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz, 469 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz, 470 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz); 471 472 log_mpc_crc(dc, log_ctx); 473 474 { 475 if (pool->hpo_dp_stream_enc_count > 0) { 476 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n"); 477 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { 478 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0}; 479 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i]; 480 481 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) { 482 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state); 483 484 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n", 485 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0, 486 hpo_dp_se_state.stream_enc_enabled, 487 hpo_dp_se_state.otg_inst, 488 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" : 489 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" : 490 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"), 491 (hpo_dp_se_state.component_depth == 0) ? 6 : 492 ((hpo_dp_se_state.component_depth == 1) ? 8 : 493 (hpo_dp_se_state.component_depth == 2) ? 10 : 12), 494 hpo_dp_se_state.vid_stream_enabled, 495 hpo_dp_se_state.sdp_enabled, 496 hpo_dp_se_state.compressed_format, 497 hpo_dp_se_state.mapped_to_link_enc); 498 } 499 } 500 501 DTN_INFO("\n"); 502 } 503 504 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */ 505 if (pool->hpo_dp_link_enc_count) { 506 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n"); 507 508 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { 509 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i]; 510 struct hpo_dp_link_enc_state hpo_dp_le_state = {0}; 511 512 if (hpo_dp_link_enc->funcs->read_state) { 513 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state); 514 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n", 515 hpo_dp_link_enc->inst, 516 hpo_dp_le_state.link_enc_enabled, 517 (hpo_dp_le_state.link_mode == 0) ? "TPS1" : 518 (hpo_dp_le_state.link_mode == 1) ? "TPS2" : 519 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST", 520 hpo_dp_le_state.lane_count, 521 hpo_dp_le_state.stream_src[0], 522 hpo_dp_le_state.slot_count[0], 523 hpo_dp_le_state.vc_rate_x[0], 524 hpo_dp_le_state.vc_rate_y[0]); 525 DTN_INFO("\n"); 526 } 527 } 528 529 DTN_INFO("\n"); 530 } 531 } 532 533 DTN_INFO_END(); 534 } 535 536 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx) 537 { 538 struct hubp *hubp = pipe_ctx->plane_res.hubp; 539 struct timing_generator *tg = pipe_ctx->stream_res.tg; 540 541 if (tg->funcs->is_optc_underflow_occurred(tg)) { 542 tg->funcs->clear_optc_underflow(tg); 543 return true; 544 } 545 546 if (hubp->funcs->hubp_get_underflow_status(hubp)) { 547 hubp->funcs->hubp_clear_underflow(hubp); 548 return true; 549 } 550 return false; 551 } 552 553 void dcn10_enable_power_gating_plane( 554 struct dce_hwseq *hws, 555 bool enable) 556 { 557 bool force_on = true; /* disable power gating */ 558 559 if (enable) 560 force_on = false; 561 562 /* DCHUBP0/1/2/3 */ 563 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); 564 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on); 565 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on); 566 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on); 567 568 /* DPP0/1/2/3 */ 569 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on); 570 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on); 571 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on); 572 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); 573 } 574 575 void dcn10_disable_vga( 576 struct dce_hwseq *hws) 577 { 578 unsigned int in_vga1_mode = 0; 579 unsigned int in_vga2_mode = 0; 580 unsigned int in_vga3_mode = 0; 581 unsigned int in_vga4_mode = 0; 582 583 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode); 584 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode); 585 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode); 586 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode); 587 588 if (in_vga1_mode == 0 && in_vga2_mode == 0 && 589 in_vga3_mode == 0 && in_vga4_mode == 0) 590 return; 591 592 REG_WRITE(D1VGA_CONTROL, 0); 593 REG_WRITE(D2VGA_CONTROL, 0); 594 REG_WRITE(D3VGA_CONTROL, 0); 595 REG_WRITE(D4VGA_CONTROL, 0); 596 597 /* HW Engineer's Notes: 598 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and 599 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly. 600 * 601 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset 602 * VGA_TEST_ENABLE, to leave it in the same state as before. 603 */ 604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1); 605 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); 606 } 607 608 /** 609 * dcn10_dpp_pg_control - DPP power gate control. 610 * 611 * @hws: dce_hwseq reference. 612 * @dpp_inst: DPP instance reference. 613 * @power_on: true if we want to enable power gate, false otherwise. 614 * 615 * Enable or disable power gate in the specific DPP instance. 616 */ 617 void dcn10_dpp_pg_control( 618 struct dce_hwseq *hws, 619 unsigned int dpp_inst, 620 bool power_on) 621 { 622 uint32_t power_gate = power_on ? 0 : 1; 623 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF; 624 625 if (hws->ctx->dc->debug.disable_dpp_power_gate) 626 return; 627 if (REG(DOMAIN1_PG_CONFIG) == 0) 628 return; 629 630 switch (dpp_inst) { 631 case 0: /* DPP0 */ 632 REG_UPDATE(DOMAIN1_PG_CONFIG, 633 DOMAIN1_POWER_GATE, power_gate); 634 635 REG_WAIT(DOMAIN1_PG_STATUS, 636 DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 637 1, 1000); 638 break; 639 case 1: /* DPP1 */ 640 REG_UPDATE(DOMAIN3_PG_CONFIG, 641 DOMAIN3_POWER_GATE, power_gate); 642 643 REG_WAIT(DOMAIN3_PG_STATUS, 644 DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 645 1, 1000); 646 break; 647 case 2: /* DPP2 */ 648 REG_UPDATE(DOMAIN5_PG_CONFIG, 649 DOMAIN5_POWER_GATE, power_gate); 650 651 REG_WAIT(DOMAIN5_PG_STATUS, 652 DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 653 1, 1000); 654 break; 655 case 3: /* DPP3 */ 656 REG_UPDATE(DOMAIN7_PG_CONFIG, 657 DOMAIN7_POWER_GATE, power_gate); 658 659 REG_WAIT(DOMAIN7_PG_STATUS, 660 DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 661 1, 1000); 662 break; 663 default: 664 BREAK_TO_DEBUGGER(); 665 break; 666 } 667 } 668 669 /** 670 * dcn10_hubp_pg_control - HUBP power gate control. 671 * 672 * @hws: dce_hwseq reference. 673 * @hubp_inst: DPP instance reference. 674 * @power_on: true if we want to enable power gate, false otherwise. 675 * 676 * Enable or disable power gate in the specific HUBP instance. 677 */ 678 void dcn10_hubp_pg_control( 679 struct dce_hwseq *hws, 680 unsigned int hubp_inst, 681 bool power_on) 682 { 683 uint32_t power_gate = power_on ? 0 : 1; 684 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF; 685 686 if (hws->ctx->dc->debug.disable_hubp_power_gate) 687 return; 688 if (REG(DOMAIN0_PG_CONFIG) == 0) 689 return; 690 691 switch (hubp_inst) { 692 case 0: /* DCHUBP0 */ 693 REG_UPDATE(DOMAIN0_PG_CONFIG, 694 DOMAIN0_POWER_GATE, power_gate); 695 696 REG_WAIT(DOMAIN0_PG_STATUS, 697 DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 698 1, 1000); 699 break; 700 case 1: /* DCHUBP1 */ 701 REG_UPDATE(DOMAIN2_PG_CONFIG, 702 DOMAIN2_POWER_GATE, power_gate); 703 704 REG_WAIT(DOMAIN2_PG_STATUS, 705 DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 706 1, 1000); 707 break; 708 case 2: /* DCHUBP2 */ 709 REG_UPDATE(DOMAIN4_PG_CONFIG, 710 DOMAIN4_POWER_GATE, power_gate); 711 712 REG_WAIT(DOMAIN4_PG_STATUS, 713 DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 714 1, 1000); 715 break; 716 case 3: /* DCHUBP3 */ 717 REG_UPDATE(DOMAIN6_PG_CONFIG, 718 DOMAIN6_POWER_GATE, power_gate); 719 720 REG_WAIT(DOMAIN6_PG_STATUS, 721 DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 722 1, 1000); 723 break; 724 default: 725 BREAK_TO_DEBUGGER(); 726 break; 727 } 728 } 729 730 static void power_on_plane_resources( 731 struct dce_hwseq *hws, 732 int plane_id) 733 { 734 DC_LOGGER_INIT(hws->ctx->logger); 735 736 if (hws->funcs.dpp_root_clock_control) 737 hws->funcs.dpp_root_clock_control(hws, plane_id, true); 738 739 if (REG(DC_IP_REQUEST_CNTL)) { 740 REG_SET(DC_IP_REQUEST_CNTL, 0, 741 IP_REQUEST_EN, 1); 742 743 if (hws->funcs.dpp_pg_control) 744 hws->funcs.dpp_pg_control(hws, plane_id, true); 745 746 if (hws->funcs.hubp_pg_control) 747 hws->funcs.hubp_pg_control(hws, plane_id, true); 748 749 REG_SET(DC_IP_REQUEST_CNTL, 0, 750 IP_REQUEST_EN, 0); 751 DC_LOG_DEBUG( 752 "Un-gated front end for pipe %d\n", plane_id); 753 } 754 } 755 756 static void undo_DEGVIDCN10_253_wa(struct dc *dc) 757 { 758 struct dce_hwseq *hws = dc->hwseq; 759 struct hubp *hubp = dc->res_pool->hubps[0]; 760 761 if (!hws->wa_state.DEGVIDCN10_253_applied) 762 return; 763 764 hubp->funcs->set_blank(hubp, true); 765 766 REG_SET(DC_IP_REQUEST_CNTL, 0, 767 IP_REQUEST_EN, 1); 768 769 hws->funcs.hubp_pg_control(hws, 0, false); 770 REG_SET(DC_IP_REQUEST_CNTL, 0, 771 IP_REQUEST_EN, 0); 772 773 hws->wa_state.DEGVIDCN10_253_applied = false; 774 } 775 776 static void apply_DEGVIDCN10_253_wa(struct dc *dc) 777 { 778 struct dce_hwseq *hws = dc->hwseq; 779 struct hubp *hubp = dc->res_pool->hubps[0]; 780 int i; 781 782 if (dc->debug.disable_stutter) 783 return; 784 785 if (!hws->wa.DEGVIDCN10_253) 786 return; 787 788 for (i = 0; i < dc->res_pool->pipe_count; i++) { 789 if (!dc->res_pool->hubps[i]->power_gated) 790 return; 791 } 792 793 /* all pipe power gated, apply work around to enable stutter. */ 794 795 REG_SET(DC_IP_REQUEST_CNTL, 0, 796 IP_REQUEST_EN, 1); 797 798 hws->funcs.hubp_pg_control(hws, 0, true); 799 REG_SET(DC_IP_REQUEST_CNTL, 0, 800 IP_REQUEST_EN, 0); 801 802 hubp->funcs->set_hubp_blank_en(hubp, false); 803 hws->wa_state.DEGVIDCN10_253_applied = true; 804 } 805 806 void dcn10_bios_golden_init(struct dc *dc) 807 { 808 struct dce_hwseq *hws = dc->hwseq; 809 struct dc_bios *bp = dc->ctx->dc_bios; 810 int i; 811 bool allow_self_fresh_force_enable = true; 812 813 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc)) 814 return; 815 816 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) 817 allow_self_fresh_force_enable = 818 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); 819 820 821 /* WA for making DF sleep when idle after resume from S0i3. 822 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by 823 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 824 * before calling command table and it changed to 1 after, 825 * it should be set back to 0. 826 */ 827 828 /* initialize dcn global */ 829 bp->funcs->enable_disp_power_gating(bp, 830 CONTROLLER_ID_D0, ASIC_PIPE_INIT); 831 832 for (i = 0; i < dc->res_pool->pipe_count; i++) { 833 /* initialize dcn per pipe */ 834 bp->funcs->enable_disp_power_gating(bp, 835 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE); 836 } 837 838 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) 839 if (allow_self_fresh_force_enable == false && 840 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub)) 841 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, 842 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); 843 844 } 845 846 static void false_optc_underflow_wa( 847 struct dc *dc, 848 const struct dc_stream_state *stream, 849 struct timing_generator *tg) 850 { 851 int i; 852 bool underflow; 853 854 if (!dc->hwseq->wa.false_optc_underflow) 855 return; 856 857 underflow = tg->funcs->is_optc_underflow_occurred(tg); 858 859 for (i = 0; i < dc->res_pool->pipe_count; i++) { 860 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 861 862 if (old_pipe_ctx->stream != stream) 863 continue; 864 865 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx); 866 } 867 868 if (tg->funcs->set_blank_data_double_buffer) 869 tg->funcs->set_blank_data_double_buffer(tg, true); 870 871 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow) 872 tg->funcs->clear_optc_underflow(tg); 873 } 874 875 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe) 876 { 877 struct pipe_ctx *other_pipe; 878 int vready_offset = pipe->pipe_dlg_param.vready_offset; 879 880 /* Always use the largest vready_offset of all connected pipes */ 881 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) { 882 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 883 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 884 } 885 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) { 886 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 887 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 888 } 889 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) { 890 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 891 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 892 } 893 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) { 894 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset) 895 vready_offset = other_pipe->pipe_dlg_param.vready_offset; 896 } 897 898 return vready_offset; 899 } 900 901 enum dc_status dcn10_enable_stream_timing( 902 struct pipe_ctx *pipe_ctx, 903 struct dc_state *context, 904 struct dc *dc) 905 { 906 struct dc_stream_state *stream = pipe_ctx->stream; 907 enum dc_color_space color_space; 908 struct tg_color black_color = {0}; 909 910 /* by upper caller loop, pipe0 is parent pipe and be called first. 911 * back end is set up by for pipe0. Other children pipe share back end 912 * with pipe 0. No program is needed. 913 */ 914 if (pipe_ctx->top_pipe != NULL) 915 return DC_OK; 916 917 /* TODO check if timing_changed, disable stream if timing changed */ 918 919 /* HW program guide assume display already disable 920 * by unplug sequence. OTG assume stop. 921 */ 922 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 923 924 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 925 pipe_ctx->clock_source, 926 &pipe_ctx->stream_res.pix_clk_params, 927 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings), 928 &pipe_ctx->pll_settings)) { 929 BREAK_TO_DEBUGGER(); 930 return DC_ERROR_UNEXPECTED; 931 } 932 933 if (dc_is_hdmi_tmds_signal(stream->signal)) { 934 stream->link->phy_state.symclk_ref_cnts.otg = 1; 935 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) 936 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; 937 else 938 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON; 939 } 940 941 pipe_ctx->stream_res.tg->funcs->program_timing( 942 pipe_ctx->stream_res.tg, 943 &stream->timing, 944 calculate_vready_offset_for_group(pipe_ctx), 945 pipe_ctx->pipe_dlg_param.vstartup_start, 946 pipe_ctx->pipe_dlg_param.vupdate_offset, 947 pipe_ctx->pipe_dlg_param.vupdate_width, 948 pipe_ctx->stream->signal, 949 true); 950 951 #if 0 /* move to after enable_crtc */ 952 /* TODO: OPP FMT, ABM. etc. should be done here. */ 953 /* or FPGA now. instance 0 only. TODO: move to opp.c */ 954 955 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt; 956 957 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 958 pipe_ctx->stream_res.opp, 959 &stream->bit_depth_params, 960 &stream->clamping); 961 #endif 962 /* program otg blank color */ 963 color_space = stream->output_color_space; 964 color_space_to_black_color(dc, color_space, &black_color); 965 966 /* 967 * The way 420 is packed, 2 channels carry Y component, 1 channel 968 * alternate between Cb and Cr, so both channels need the pixel 969 * value for Y 970 */ 971 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 972 black_color.color_r_cr = black_color.color_g_y; 973 974 if (pipe_ctx->stream_res.tg->funcs->set_blank_color) 975 pipe_ctx->stream_res.tg->funcs->set_blank_color( 976 pipe_ctx->stream_res.tg, 977 &black_color); 978 979 if (pipe_ctx->stream_res.tg->funcs->is_blanked && 980 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) { 981 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true); 982 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg); 983 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg); 984 } 985 986 /* VTG is within DCHUB command block. DCFCLK is always on */ 987 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 988 BREAK_TO_DEBUGGER(); 989 return DC_ERROR_UNEXPECTED; 990 } 991 992 /* TODO program crtc source select for non-virtual signal*/ 993 /* TODO program FMT */ 994 /* TODO setup link_enc */ 995 /* TODO set stream attributes */ 996 /* TODO program audio */ 997 /* TODO enable stream if timing changed */ 998 /* TODO unblank stream if DP */ 999 1000 return DC_OK; 1001 } 1002 1003 static void dcn10_reset_back_end_for_pipe( 1004 struct dc *dc, 1005 struct pipe_ctx *pipe_ctx, 1006 struct dc_state *context) 1007 { 1008 int i; 1009 struct dc_link *link; 1010 DC_LOGGER_INIT(dc->ctx->logger); 1011 if (pipe_ctx->stream_res.stream_enc == NULL) { 1012 pipe_ctx->stream = NULL; 1013 return; 1014 } 1015 1016 link = pipe_ctx->stream->link; 1017 /* DPMS may already disable or */ 1018 /* dpms_off status is incorrect due to fastboot 1019 * feature. When system resume from S4 with second 1020 * screen only, the dpms_off would be true but 1021 * VBIOS lit up eDP, so check link status too. 1022 */ 1023 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 1024 dc->link_srv->set_dpms_off(pipe_ctx); 1025 else if (pipe_ctx->stream_res.audio) 1026 dc->hwss.disable_audio_stream(pipe_ctx); 1027 1028 if (pipe_ctx->stream_res.audio) { 1029 /*disable az_endpoint*/ 1030 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 1031 1032 /*free audio*/ 1033 if (dc->caps.dynamic_audio == true) { 1034 /*we have to dynamic arbitrate the audio endpoints*/ 1035 /*we free the resource, need reset is_audio_acquired*/ 1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 1037 pipe_ctx->stream_res.audio, false); 1038 pipe_ctx->stream_res.audio = NULL; 1039 } 1040 } 1041 1042 /* by upper caller loop, parent pipe: pipe0, will be reset last. 1043 * back end share by all pipes and will be disable only when disable 1044 * parent pipe. 1045 */ 1046 if (pipe_ctx->top_pipe == NULL) { 1047 1048 if (pipe_ctx->stream_res.abm) 1049 dc->hwss.set_abm_immediate_disable(pipe_ctx); 1050 1051 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 1052 1053 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 1054 if (pipe_ctx->stream_res.tg->funcs->set_drr) 1055 pipe_ctx->stream_res.tg->funcs->set_drr( 1056 pipe_ctx->stream_res.tg, NULL); 1057 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) 1058 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0; 1059 } 1060 1061 for (i = 0; i < dc->res_pool->pipe_count; i++) 1062 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) 1063 break; 1064 1065 if (i == dc->res_pool->pipe_count) 1066 return; 1067 1068 pipe_ctx->stream = NULL; 1069 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 1070 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 1071 } 1072 1073 static bool dcn10_hw_wa_force_recovery(struct dc *dc) 1074 { 1075 struct hubp *hubp ; 1076 unsigned int i; 1077 bool need_recover = true; 1078 1079 if (!dc->debug.recovery_enabled) 1080 return false; 1081 1082 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1083 struct pipe_ctx *pipe_ctx = 1084 &dc->current_state->res_ctx.pipe_ctx[i]; 1085 if (pipe_ctx != NULL) { 1086 hubp = pipe_ctx->plane_res.hubp; 1087 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) { 1088 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { 1089 /* one pipe underflow, we will reset all the pipes*/ 1090 need_recover = true; 1091 } 1092 } 1093 } 1094 } 1095 if (!need_recover) 1096 return false; 1097 /* 1098 DCHUBP_CNTL:HUBP_BLANK_EN=1 1099 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1 1100 DCHUBP_CNTL:HUBP_DISABLE=1 1101 DCHUBP_CNTL:HUBP_DISABLE=0 1102 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0 1103 DCSURF_PRIMARY_SURFACE_ADDRESS 1104 DCHUBP_CNTL:HUBP_BLANK_EN=0 1105 */ 1106 1107 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1108 struct pipe_ctx *pipe_ctx = 1109 &dc->current_state->res_ctx.pipe_ctx[i]; 1110 if (pipe_ctx != NULL) { 1111 hubp = pipe_ctx->plane_res.hubp; 1112 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ 1113 if (hubp != NULL && hubp->funcs->set_hubp_blank_en) 1114 hubp->funcs->set_hubp_blank_en(hubp, true); 1115 } 1116 } 1117 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/ 1118 hubbub1_soft_reset(dc->res_pool->hubbub, true); 1119 1120 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1121 struct pipe_ctx *pipe_ctx = 1122 &dc->current_state->res_ctx.pipe_ctx[i]; 1123 if (pipe_ctx != NULL) { 1124 hubp = pipe_ctx->plane_res.hubp; 1125 /*DCHUBP_CNTL:HUBP_DISABLE=1*/ 1126 if (hubp != NULL && hubp->funcs->hubp_disable_control) 1127 hubp->funcs->hubp_disable_control(hubp, true); 1128 } 1129 } 1130 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1131 struct pipe_ctx *pipe_ctx = 1132 &dc->current_state->res_ctx.pipe_ctx[i]; 1133 if (pipe_ctx != NULL) { 1134 hubp = pipe_ctx->plane_res.hubp; 1135 /*DCHUBP_CNTL:HUBP_DISABLE=0*/ 1136 if (hubp != NULL && hubp->funcs->hubp_disable_control) 1137 hubp->funcs->hubp_disable_control(hubp, true); 1138 } 1139 } 1140 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/ 1141 hubbub1_soft_reset(dc->res_pool->hubbub, false); 1142 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1143 struct pipe_ctx *pipe_ctx = 1144 &dc->current_state->res_ctx.pipe_ctx[i]; 1145 if (pipe_ctx != NULL) { 1146 hubp = pipe_ctx->plane_res.hubp; 1147 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ 1148 if (hubp != NULL && hubp->funcs->set_hubp_blank_en) 1149 hubp->funcs->set_hubp_blank_en(hubp, true); 1150 } 1151 } 1152 return true; 1153 1154 } 1155 1156 void dcn10_verify_allow_pstate_change_high(struct dc *dc) 1157 { 1158 struct hubbub *hubbub = dc->res_pool->hubbub; 1159 static bool should_log_hw_state; /* prevent hw state log by default */ 1160 1161 if (!hubbub->funcs->verify_allow_pstate_change_high) 1162 return; 1163 1164 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { 1165 int i = 0; 1166 1167 if (should_log_hw_state) 1168 dcn10_log_hw_state(dc, NULL); 1169 1170 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 1171 BREAK_TO_DEBUGGER(); 1172 if (dcn10_hw_wa_force_recovery(dc)) { 1173 /*check again*/ 1174 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) 1175 BREAK_TO_DEBUGGER(); 1176 } 1177 } 1178 } 1179 1180 /* trigger HW to start disconnect plane from stream on the next vsync */ 1181 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) 1182 { 1183 struct dce_hwseq *hws = dc->hwseq; 1184 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1185 int dpp_id = pipe_ctx->plane_res.dpp->inst; 1186 struct mpc *mpc = dc->res_pool->mpc; 1187 struct mpc_tree *mpc_tree_params; 1188 struct mpcc *mpcc_to_remove = NULL; 1189 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; 1190 1191 mpc_tree_params = &(opp->mpc_tree_params); 1192 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id); 1193 1194 /*Already reset*/ 1195 if (mpcc_to_remove == NULL) 1196 return; 1197 1198 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); 1199 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, 1200 // so don't wait for MPCC_IDLE in the programming sequence 1201 if (opp != NULL && !pipe_ctx->plane_state->is_phantom) 1202 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 1203 1204 dc->optimized_required = true; 1205 1206 if (hubp->funcs->hubp_disconnect) 1207 hubp->funcs->hubp_disconnect(hubp); 1208 1209 if (dc->debug.sanity_checks) 1210 hws->funcs.verify_allow_pstate_change_high(dc); 1211 } 1212 1213 /** 1214 * dcn10_plane_atomic_power_down - Power down plane components. 1215 * 1216 * @dc: dc struct reference. used for grab hwseq. 1217 * @dpp: dpp struct reference. 1218 * @hubp: hubp struct reference. 1219 * 1220 * Keep in mind that this operation requires a power gate configuration; 1221 * however, requests for switch power gate are precisely controlled to avoid 1222 * problems. For this reason, power gate request is usually disabled. This 1223 * function first needs to enable the power gate request before disabling DPP 1224 * and HUBP. Finally, it disables the power gate request again. 1225 */ 1226 void dcn10_plane_atomic_power_down(struct dc *dc, 1227 struct dpp *dpp, 1228 struct hubp *hubp) 1229 { 1230 struct dce_hwseq *hws = dc->hwseq; 1231 DC_LOGGER_INIT(dc->ctx->logger); 1232 1233 if (REG(DC_IP_REQUEST_CNTL)) { 1234 REG_SET(DC_IP_REQUEST_CNTL, 0, 1235 IP_REQUEST_EN, 1); 1236 1237 if (hws->funcs.dpp_pg_control) 1238 hws->funcs.dpp_pg_control(hws, dpp->inst, false); 1239 1240 if (hws->funcs.hubp_pg_control) 1241 hws->funcs.hubp_pg_control(hws, hubp->inst, false); 1242 1243 dpp->funcs->dpp_reset(dpp); 1244 1245 REG_SET(DC_IP_REQUEST_CNTL, 0, 1246 IP_REQUEST_EN, 0); 1247 DC_LOG_DEBUG( 1248 "Power gated front end %d\n", hubp->inst); 1249 } 1250 1251 if (hws->funcs.dpp_root_clock_control) 1252 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false); 1253 } 1254 1255 /* disable HW used by plane. 1256 * note: cannot disable until disconnect is complete 1257 */ 1258 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 1259 { 1260 struct dce_hwseq *hws = dc->hwseq; 1261 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1262 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1263 int opp_id = hubp->opp_id; 1264 1265 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 1266 1267 hubp->funcs->hubp_clk_cntl(hubp, false); 1268 1269 dpp->funcs->dpp_dppclk_control(dpp, false, false); 1270 1271 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL) 1272 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 1273 pipe_ctx->stream_res.opp, 1274 false); 1275 1276 hubp->power_gated = true; 1277 dc->optimized_required = false; /* We're powering off, no need to optimize */ 1278 1279 hws->funcs.plane_atomic_power_down(dc, 1280 pipe_ctx->plane_res.dpp, 1281 pipe_ctx->plane_res.hubp); 1282 1283 pipe_ctx->stream = NULL; 1284 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 1285 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); 1286 pipe_ctx->top_pipe = NULL; 1287 pipe_ctx->bottom_pipe = NULL; 1288 pipe_ctx->plane_state = NULL; 1289 } 1290 1291 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 1292 { 1293 struct dce_hwseq *hws = dc->hwseq; 1294 DC_LOGGER_INIT(dc->ctx->logger); 1295 1296 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 1297 return; 1298 1299 hws->funcs.plane_atomic_disable(dc, pipe_ctx); 1300 1301 apply_DEGVIDCN10_253_wa(dc); 1302 1303 DC_LOG_DC("Power down front end %d\n", 1304 pipe_ctx->pipe_idx); 1305 } 1306 1307 void dcn10_init_pipes(struct dc *dc, struct dc_state *context) 1308 { 1309 int i; 1310 struct dce_hwseq *hws = dc->hwseq; 1311 struct hubbub *hubbub = dc->res_pool->hubbub; 1312 bool can_apply_seamless_boot = false; 1313 1314 for (i = 0; i < context->stream_count; i++) { 1315 if (context->streams[i]->apply_seamless_boot_optimization) { 1316 can_apply_seamless_boot = true; 1317 break; 1318 } 1319 } 1320 1321 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1322 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 1323 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1324 1325 /* There is assumption that pipe_ctx is not mapping irregularly 1326 * to non-preferred front end. If pipe_ctx->stream is not NULL, 1327 * we will use the pipe, so don't disable 1328 */ 1329 if (pipe_ctx->stream != NULL && can_apply_seamless_boot) 1330 continue; 1331 1332 /* Blank controller using driver code instead of 1333 * command table. 1334 */ 1335 if (tg->funcs->is_tg_enabled(tg)) { 1336 if (hws->funcs.init_blank != NULL) { 1337 hws->funcs.init_blank(dc, tg); 1338 tg->funcs->lock(tg); 1339 } else { 1340 tg->funcs->lock(tg); 1341 tg->funcs->set_blank(tg, true); 1342 hwss_wait_for_blank_complete(tg); 1343 } 1344 } 1345 } 1346 1347 /* Reset det size */ 1348 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1349 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1350 struct hubp *hubp = dc->res_pool->hubps[i]; 1351 1352 /* Do not need to reset for seamless boot */ 1353 if (pipe_ctx->stream != NULL && can_apply_seamless_boot) 1354 continue; 1355 1356 if (hubbub && hubp) { 1357 if (hubbub->funcs->program_det_size) 1358 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); 1359 } 1360 } 1361 1362 /* num_opp will be equal to number of mpcc */ 1363 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 1364 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1365 1366 /* Cannot reset the MPC mux if seamless boot */ 1367 if (pipe_ctx->stream != NULL && can_apply_seamless_boot) 1368 continue; 1369 1370 dc->res_pool->mpc->funcs->mpc_init_single_inst( 1371 dc->res_pool->mpc, i); 1372 } 1373 1374 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1375 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 1376 struct hubp *hubp = dc->res_pool->hubps[i]; 1377 struct dpp *dpp = dc->res_pool->dpps[i]; 1378 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1379 1380 /* There is assumption that pipe_ctx is not mapping irregularly 1381 * to non-preferred front end. If pipe_ctx->stream is not NULL, 1382 * we will use the pipe, so don't disable 1383 */ 1384 if (can_apply_seamless_boot && 1385 pipe_ctx->stream != NULL && 1386 pipe_ctx->stream_res.tg->funcs->is_tg_enabled( 1387 pipe_ctx->stream_res.tg)) { 1388 // Enable double buffering for OTG_BLANK no matter if 1389 // seamless boot is enabled or not to suppress global sync 1390 // signals when OTG blanked. This is to prevent pipe from 1391 // requesting data while in PSR. 1392 tg->funcs->tg_init(tg); 1393 hubp->power_gated = true; 1394 continue; 1395 } 1396 1397 /* Disable on the current state so the new one isn't cleared. */ 1398 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1399 1400 dpp->funcs->dpp_reset(dpp); 1401 1402 pipe_ctx->stream_res.tg = tg; 1403 pipe_ctx->pipe_idx = i; 1404 1405 pipe_ctx->plane_res.hubp = hubp; 1406 pipe_ctx->plane_res.dpp = dpp; 1407 pipe_ctx->plane_res.mpcc_inst = dpp->inst; 1408 hubp->mpcc_id = dpp->inst; 1409 hubp->opp_id = OPP_ID_INVALID; 1410 hubp->power_gated = false; 1411 1412 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 1413 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 1414 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 1415 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 1416 1417 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); 1418 1419 if (tg->funcs->is_tg_enabled(tg)) 1420 tg->funcs->unlock(tg); 1421 1422 dc->hwss.disable_plane(dc, pipe_ctx); 1423 1424 pipe_ctx->stream_res.tg = NULL; 1425 pipe_ctx->plane_res.hubp = NULL; 1426 1427 if (tg->funcs->is_tg_enabled(tg)) { 1428 if (tg->funcs->init_odm) 1429 tg->funcs->init_odm(tg); 1430 } 1431 1432 tg->funcs->tg_init(tg); 1433 } 1434 1435 /* Power gate DSCs */ 1436 if (hws->funcs.dsc_pg_control != NULL) { 1437 uint32_t num_opps = 0; 1438 uint32_t opp_id_src0 = OPP_ID_INVALID; 1439 uint32_t opp_id_src1 = OPP_ID_INVALID; 1440 1441 // Step 1: To find out which OPTC is running & OPTC DSC is ON 1442 // We can't use res_pool->res_cap->num_timing_generator to check 1443 // Because it records display pipes default setting built in driver, 1444 // not display pipes of the current chip. 1445 // Some ASICs would be fused display pipes less than the default setting. 1446 // In dcnxx_resource_construct function, driver would obatin real information. 1447 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 1448 uint32_t optc_dsc_state = 0; 1449 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 1450 1451 if (tg->funcs->is_tg_enabled(tg)) { 1452 if (tg->funcs->get_dsc_status) 1453 tg->funcs->get_dsc_status(tg, &optc_dsc_state); 1454 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block. 1455 // non-zero value is DSC enabled 1456 if (optc_dsc_state != 0) { 1457 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 1458 break; 1459 } 1460 } 1461 } 1462 1463 // Step 2: To power down DSC but skip DSC of running OPTC 1464 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { 1465 struct dcn_dsc_state s = {0}; 1466 1467 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); 1468 1469 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && 1470 s.dsc_clock_en && s.dsc_fw_en) 1471 continue; 1472 1473 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false); 1474 } 1475 } 1476 } 1477 1478 void dcn10_init_hw(struct dc *dc) 1479 { 1480 int i; 1481 struct abm *abm = dc->res_pool->abm; 1482 struct dmcu *dmcu = dc->res_pool->dmcu; 1483 struct dce_hwseq *hws = dc->hwseq; 1484 struct dc_bios *dcb = dc->ctx->dc_bios; 1485 struct resource_pool *res_pool = dc->res_pool; 1486 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 1487 bool is_optimized_init_done = false; 1488 1489 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 1490 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 1491 1492 /* Align bw context with hw config when system resume. */ 1493 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) { 1494 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; 1495 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz; 1496 } 1497 1498 // Initialize the dccg 1499 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init) 1500 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg); 1501 1502 if (!dcb->funcs->is_accelerated_mode(dcb)) 1503 hws->funcs.disable_vga(dc->hwseq); 1504 1505 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv)) 1506 hws->funcs.bios_golden_init(dc); 1507 1508 1509 if (dc->ctx->dc_bios->fw_info_valid) { 1510 res_pool->ref_clocks.xtalin_clock_inKhz = 1511 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; 1512 1513 if (res_pool->dccg && res_pool->hubbub) { 1514 1515 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, 1516 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, 1517 &res_pool->ref_clocks.dccg_ref_clock_inKhz); 1518 1519 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, 1520 res_pool->ref_clocks.dccg_ref_clock_inKhz, 1521 &res_pool->ref_clocks.dchub_ref_clock_inKhz); 1522 } else { 1523 // Not all ASICs have DCCG sw component 1524 res_pool->ref_clocks.dccg_ref_clock_inKhz = 1525 res_pool->ref_clocks.xtalin_clock_inKhz; 1526 res_pool->ref_clocks.dchub_ref_clock_inKhz = 1527 res_pool->ref_clocks.xtalin_clock_inKhz; 1528 } 1529 } else 1530 ASSERT_CRITICAL(false); 1531 1532 for (i = 0; i < dc->link_count; i++) { 1533 /* Power up AND update implementation according to the 1534 * required signal (which may be different from the 1535 * default signal on connector). 1536 */ 1537 struct dc_link *link = dc->links[i]; 1538 1539 if (!is_optimized_init_done) 1540 link->link_enc->funcs->hw_init(link->link_enc); 1541 1542 /* Check for enabled DIG to identify enabled display */ 1543 if (link->link_enc->funcs->is_dig_enabled && 1544 link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1545 link->link_status.link_active = true; 1546 if (link->link_enc->funcs->fec_is_active && 1547 link->link_enc->funcs->fec_is_active(link->link_enc)) 1548 link->fec_state = dc_link_fec_enabled; 1549 } 1550 } 1551 1552 /* we want to turn off all dp displays before doing detection */ 1553 dc->link_srv->blank_all_dp_displays(dc); 1554 1555 if (hws->funcs.enable_power_gating_plane) 1556 hws->funcs.enable_power_gating_plane(dc->hwseq, true); 1557 1558 /* If taking control over from VBIOS, we may want to optimize our first 1559 * mode set, so we need to skip powering down pipes until we know which 1560 * pipes we want to use. 1561 * Otherwise, if taking control is not possible, we need to power 1562 * everything down. 1563 */ 1564 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { 1565 if (!is_optimized_init_done) { 1566 hws->funcs.init_pipes(dc, dc->current_state); 1567 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) 1568 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, 1569 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); 1570 } 1571 } 1572 1573 if (!is_optimized_init_done) { 1574 1575 for (i = 0; i < res_pool->audio_count; i++) { 1576 struct audio *audio = res_pool->audios[i]; 1577 1578 audio->funcs->hw_init(audio); 1579 } 1580 1581 for (i = 0; i < dc->link_count; i++) { 1582 struct dc_link *link = dc->links[i]; 1583 1584 if (link->panel_cntl) 1585 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl); 1586 } 1587 1588 if (abm != NULL) 1589 abm->funcs->abm_init(abm, backlight); 1590 1591 if (dmcu != NULL && !dmcu->auto_load_dmcu) 1592 dmcu->funcs->dmcu_init(dmcu); 1593 } 1594 1595 if (abm != NULL && dmcu != NULL) 1596 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1597 1598 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ 1599 if (!is_optimized_init_done) 1600 REG_WRITE(DIO_MEM_PWR_CTRL, 0); 1601 1602 if (!dc->debug.disable_clock_gate) { 1603 /* enable all DCN clock gating */ 1604 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); 1605 1606 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); 1607 1608 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); 1609 } 1610 1611 if (dc->clk_mgr->funcs->notify_wm_ranges) 1612 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); 1613 } 1614 1615 /* In headless boot cases, DIG may be turned 1616 * on which causes HW/SW discrepancies. 1617 * To avoid this, power down hardware on boot 1618 * if DIG is turned on 1619 */ 1620 void dcn10_power_down_on_boot(struct dc *dc) 1621 { 1622 struct dc_link *edp_links[MAX_NUM_EDP]; 1623 struct dc_link *edp_link = NULL; 1624 int edp_num; 1625 int i = 0; 1626 1627 dc_get_edp_links(dc, edp_links, &edp_num); 1628 if (edp_num) 1629 edp_link = edp_links[0]; 1630 1631 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled && 1632 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && 1633 dc->hwseq->funcs.edp_backlight_control && 1634 dc->hwss.power_down && 1635 dc->hwss.edp_power_control) { 1636 dc->hwseq->funcs.edp_backlight_control(edp_link, false); 1637 dc->hwss.power_down(dc); 1638 dc->hwss.edp_power_control(edp_link, false); 1639 } else { 1640 for (i = 0; i < dc->link_count; i++) { 1641 struct dc_link *link = dc->links[i]; 1642 1643 if (link->link_enc && link->link_enc->funcs->is_dig_enabled && 1644 link->link_enc->funcs->is_dig_enabled(link->link_enc) && 1645 dc->hwss.power_down) { 1646 dc->hwss.power_down(dc); 1647 break; 1648 } 1649 1650 } 1651 } 1652 1653 /* 1654 * Call update_clocks with empty context 1655 * to send DISPLAY_OFF 1656 * Otherwise DISPLAY_OFF may not be asserted 1657 */ 1658 if (dc->clk_mgr->funcs->set_low_power_state) 1659 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); 1660 } 1661 1662 void dcn10_reset_hw_ctx_wrap( 1663 struct dc *dc, 1664 struct dc_state *context) 1665 { 1666 int i; 1667 struct dce_hwseq *hws = dc->hwseq; 1668 1669 /* Reset Back End*/ 1670 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 1671 struct pipe_ctx *pipe_ctx_old = 1672 &dc->current_state->res_ctx.pipe_ctx[i]; 1673 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1674 1675 if (!pipe_ctx_old->stream) 1676 continue; 1677 1678 if (pipe_ctx_old->top_pipe) 1679 continue; 1680 1681 if (!pipe_ctx->stream || 1682 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 1683 struct clock_source *old_clk = pipe_ctx_old->clock_source; 1684 1685 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 1686 if (hws->funcs.enable_stream_gating) 1687 hws->funcs.enable_stream_gating(dc, pipe_ctx_old); 1688 if (old_clk) 1689 old_clk->funcs->cs_power_down(old_clk); 1690 } 1691 } 1692 } 1693 1694 static bool patch_address_for_sbs_tb_stereo( 1695 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr) 1696 { 1697 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1698 bool sec_split = pipe_ctx->top_pipe && 1699 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; 1700 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && 1701 (pipe_ctx->stream->timing.timing_3d_format == 1702 TIMING_3D_FORMAT_SIDE_BY_SIDE || 1703 pipe_ctx->stream->timing.timing_3d_format == 1704 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { 1705 *addr = plane_state->address.grph_stereo.left_addr; 1706 plane_state->address.grph_stereo.left_addr = 1707 plane_state->address.grph_stereo.right_addr; 1708 return true; 1709 } else { 1710 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && 1711 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { 1712 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; 1713 plane_state->address.grph_stereo.right_addr = 1714 plane_state->address.grph_stereo.left_addr; 1715 plane_state->address.grph_stereo.right_meta_addr = 1716 plane_state->address.grph_stereo.left_meta_addr; 1717 } 1718 } 1719 return false; 1720 } 1721 1722 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) 1723 { 1724 bool addr_patched = false; 1725 PHYSICAL_ADDRESS_LOC addr; 1726 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1727 1728 if (plane_state == NULL) 1729 return; 1730 1731 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); 1732 1733 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 1734 pipe_ctx->plane_res.hubp, 1735 &plane_state->address, 1736 plane_state->flip_immediate); 1737 1738 plane_state->status.requested_address = plane_state->address; 1739 1740 if (plane_state->flip_immediate) 1741 plane_state->status.current_address = plane_state->address; 1742 1743 if (addr_patched) 1744 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; 1745 } 1746 1747 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 1748 const struct dc_plane_state *plane_state) 1749 { 1750 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 1751 const struct dc_transfer_func *tf = NULL; 1752 bool result = true; 1753 1754 if (dpp_base == NULL) 1755 return false; 1756 1757 if (plane_state->in_transfer_func) 1758 tf = plane_state->in_transfer_func; 1759 1760 if (plane_state->gamma_correction && 1761 !dpp_base->ctx->dc->debug.always_use_regamma 1762 && !plane_state->gamma_correction->is_identity 1763 && dce_use_lut(plane_state->format)) 1764 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); 1765 1766 if (tf == NULL) 1767 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); 1768 else if (tf->type == TF_TYPE_PREDEFINED) { 1769 switch (tf->tf) { 1770 case TRANSFER_FUNCTION_SRGB: 1771 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB); 1772 break; 1773 case TRANSFER_FUNCTION_BT709: 1774 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC); 1775 break; 1776 case TRANSFER_FUNCTION_LINEAR: 1777 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); 1778 break; 1779 case TRANSFER_FUNCTION_PQ: 1780 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); 1781 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); 1782 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); 1783 result = true; 1784 break; 1785 default: 1786 result = false; 1787 break; 1788 } 1789 } else if (tf->type == TF_TYPE_BYPASS) { 1790 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); 1791 } else { 1792 cm_helper_translate_curve_to_degamma_hw_format(tf, 1793 &dpp_base->degamma_params); 1794 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 1795 &dpp_base->degamma_params); 1796 result = true; 1797 } 1798 1799 return result; 1800 } 1801 1802 #define MAX_NUM_HW_POINTS 0x200 1803 1804 static void log_tf(struct dc_context *ctx, 1805 struct dc_transfer_func *tf, uint32_t hw_points_num) 1806 { 1807 // DC_LOG_GAMMA is default logging of all hw points 1808 // DC_LOG_ALL_GAMMA logs all points, not only hw points 1809 // DC_LOG_ALL_TF_POINTS logs all channels of the tf 1810 int i = 0; 1811 1812 DC_LOGGER_INIT(ctx->logger); 1813 DC_LOG_GAMMA("Gamma Correction TF"); 1814 DC_LOG_ALL_GAMMA("Logging all tf points..."); 1815 DC_LOG_ALL_TF_CHANNELS("Logging all channels..."); 1816 1817 for (i = 0; i < hw_points_num; i++) { 1818 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); 1819 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); 1820 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); 1821 } 1822 1823 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) { 1824 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); 1825 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); 1826 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); 1827 } 1828 } 1829 1830 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 1831 const struct dc_stream_state *stream) 1832 { 1833 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1834 1835 if (!stream) 1836 return false; 1837 1838 if (dpp == NULL) 1839 return false; 1840 1841 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; 1842 1843 if (stream->out_transfer_func && 1844 stream->out_transfer_func->type == TF_TYPE_PREDEFINED && 1845 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) 1846 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB); 1847 1848 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full 1849 * update. 1850 */ 1851 else if (cm_helper_translate_curve_to_hw_format(dc->ctx, 1852 stream->out_transfer_func, 1853 &dpp->regamma_params, false)) { 1854 dpp->funcs->dpp_program_regamma_pwl( 1855 dpp, 1856 &dpp->regamma_params, OPP_REGAMMA_USER); 1857 } else 1858 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); 1859 1860 if (stream->ctx && 1861 stream->out_transfer_func) { 1862 log_tf(stream->ctx, 1863 stream->out_transfer_func, 1864 dpp->regamma_params.hw_points_num); 1865 } 1866 1867 return true; 1868 } 1869 1870 void dcn10_pipe_control_lock( 1871 struct dc *dc, 1872 struct pipe_ctx *pipe, 1873 bool lock) 1874 { 1875 struct dce_hwseq *hws = dc->hwseq; 1876 1877 /* use TG master update lock to lock everything on the TG 1878 * therefore only top pipe need to lock 1879 */ 1880 if (!pipe || pipe->top_pipe) 1881 return; 1882 1883 if (dc->debug.sanity_checks) 1884 hws->funcs.verify_allow_pstate_change_high(dc); 1885 1886 if (lock) 1887 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1888 else 1889 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1890 1891 if (dc->debug.sanity_checks) 1892 hws->funcs.verify_allow_pstate_change_high(dc); 1893 } 1894 1895 /** 1896 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE. 1897 * 1898 * Software keepout workaround to prevent cursor update locking from stalling 1899 * out cursor updates indefinitely or from old values from being retained in 1900 * the case where the viewport changes in the same frame as the cursor. 1901 * 1902 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's 1903 * too close to VUPDATE, then stall out until VUPDATE finishes. 1904 * 1905 * TODO: Optimize cursor programming to be once per frame before VUPDATE 1906 * to avoid the need for this workaround. 1907 * 1908 * @dc: Current DC state 1909 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update 1910 * 1911 * Return: void 1912 */ 1913 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) 1914 { 1915 struct dc_stream_state *stream = pipe_ctx->stream; 1916 struct crtc_position position; 1917 uint32_t vupdate_start, vupdate_end; 1918 unsigned int lines_to_vupdate, us_to_vupdate, vpos; 1919 unsigned int us_per_line, us_vupdate; 1920 1921 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position) 1922 return; 1923 1924 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg) 1925 return; 1926 1927 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start, 1928 &vupdate_end); 1929 1930 dc->hwss.get_position(&pipe_ctx, 1, &position); 1931 vpos = position.vertical_count; 1932 1933 /* Avoid wraparound calculation issues */ 1934 vupdate_start += stream->timing.v_total; 1935 vupdate_end += stream->timing.v_total; 1936 vpos += stream->timing.v_total; 1937 1938 if (vpos <= vupdate_start) { 1939 /* VPOS is in VACTIVE or back porch. */ 1940 lines_to_vupdate = vupdate_start - vpos; 1941 } else if (vpos > vupdate_end) { 1942 /* VPOS is in the front porch. */ 1943 return; 1944 } else { 1945 /* VPOS is in VUPDATE. */ 1946 lines_to_vupdate = 0; 1947 } 1948 1949 /* Calculate time until VUPDATE in microseconds. */ 1950 us_per_line = 1951 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; 1952 us_to_vupdate = lines_to_vupdate * us_per_line; 1953 1954 /* 70 us is a conservative estimate of cursor update time*/ 1955 if (us_to_vupdate > 70) 1956 return; 1957 1958 /* Stall out until the cursor update completes. */ 1959 if (vupdate_end < vupdate_start) 1960 vupdate_end += stream->timing.v_total; 1961 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; 1962 udelay(us_to_vupdate + us_vupdate); 1963 } 1964 1965 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) 1966 { 1967 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */ 1968 if (!pipe || pipe->top_pipe) 1969 return; 1970 1971 /* Prevent cursor lock from stalling out cursor updates. */ 1972 if (lock) 1973 delay_cursor_until_vupdate(dc, pipe); 1974 1975 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { 1976 union dmub_hw_lock_flags hw_locks = { 0 }; 1977 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 1978 1979 hw_locks.bits.lock_cursor = 1; 1980 inst_flags.opp_inst = pipe->stream_res.opp->inst; 1981 1982 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 1983 lock, 1984 &hw_locks, 1985 &inst_flags); 1986 } else 1987 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc, 1988 pipe->stream_res.opp->inst, lock); 1989 } 1990 1991 static bool wait_for_reset_trigger_to_occur( 1992 struct dc_context *dc_ctx, 1993 struct timing_generator *tg) 1994 { 1995 bool rc = false; 1996 1997 /* To avoid endless loop we wait at most 1998 * frames_to_wait_on_triggered_reset frames for the reset to occur. */ 1999 const uint32_t frames_to_wait_on_triggered_reset = 10; 2000 int i; 2001 2002 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) { 2003 2004 if (!tg->funcs->is_counter_moving(tg)) { 2005 DC_ERROR("TG counter is not moving!\n"); 2006 break; 2007 } 2008 2009 if (tg->funcs->did_triggered_reset_occur(tg)) { 2010 rc = true; 2011 /* usually occurs at i=1 */ 2012 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n", 2013 i); 2014 break; 2015 } 2016 2017 /* Wait for one frame. */ 2018 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE); 2019 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK); 2020 } 2021 2022 if (false == rc) 2023 DC_ERROR("GSL: Timeout on reset trigger!\n"); 2024 2025 return rc; 2026 } 2027 2028 static uint64_t reduceSizeAndFraction(uint64_t *numerator, 2029 uint64_t *denominator, 2030 bool checkUint32Bounary) 2031 { 2032 int i; 2033 bool ret = checkUint32Bounary == false; 2034 uint64_t max_int32 = 0xffffffff; 2035 uint64_t num, denom; 2036 static const uint16_t prime_numbers[] = { 2037 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 2038 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 2039 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 2040 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 2041 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 2042 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 2043 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 2044 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 2045 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 2046 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 2047 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 2048 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 2049 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 2050 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 2051 941, 947, 953, 967, 971, 977, 983, 991, 997}; 2052 int count = ARRAY_SIZE(prime_numbers); 2053 2054 num = *numerator; 2055 denom = *denominator; 2056 for (i = 0; i < count; i++) { 2057 uint32_t num_remainder, denom_remainder; 2058 uint64_t num_result, denom_result; 2059 if (checkUint32Bounary && 2060 num <= max_int32 && denom <= max_int32) { 2061 ret = true; 2062 break; 2063 } 2064 do { 2065 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder); 2066 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder); 2067 if (num_remainder == 0 && denom_remainder == 0) { 2068 num = num_result; 2069 denom = denom_result; 2070 } 2071 } while (num_remainder == 0 && denom_remainder == 0); 2072 } 2073 *numerator = num; 2074 *denominator = denom; 2075 return ret; 2076 } 2077 2078 static bool is_low_refresh_rate(struct pipe_ctx *pipe) 2079 { 2080 uint32_t master_pipe_refresh_rate = 2081 pipe->stream->timing.pix_clk_100hz * 100 / 2082 pipe->stream->timing.h_total / 2083 pipe->stream->timing.v_total; 2084 return master_pipe_refresh_rate <= 30; 2085 } 2086 2087 static uint8_t get_clock_divider(struct pipe_ctx *pipe, 2088 bool account_low_refresh_rate) 2089 { 2090 uint32_t clock_divider = 1; 2091 uint32_t numpipes = 1; 2092 2093 if (account_low_refresh_rate && is_low_refresh_rate(pipe)) 2094 clock_divider *= 2; 2095 2096 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) 2097 clock_divider *= 2; 2098 2099 while (pipe->next_odm_pipe) { 2100 pipe = pipe->next_odm_pipe; 2101 numpipes++; 2102 } 2103 clock_divider *= numpipes; 2104 2105 return clock_divider; 2106 } 2107 2108 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, 2109 struct pipe_ctx *grouped_pipes[]) 2110 { 2111 struct dc_context *dc_ctx = dc->ctx; 2112 int i, master = -1, embedded = -1; 2113 struct dc_crtc_timing *hw_crtc_timing; 2114 uint64_t phase[MAX_PIPES]; 2115 uint64_t modulo[MAX_PIPES]; 2116 unsigned int pclk; 2117 2118 uint32_t embedded_pix_clk_100hz; 2119 uint16_t embedded_h_total; 2120 uint16_t embedded_v_total; 2121 uint32_t dp_ref_clk_100hz = 2122 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10; 2123 2124 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL); 2125 if (!hw_crtc_timing) 2126 return master; 2127 2128 if (dc->config.vblank_alignment_dto_params && 2129 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) { 2130 embedded_h_total = 2131 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF; 2132 embedded_v_total = 2133 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF; 2134 embedded_pix_clk_100hz = 2135 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF; 2136 2137 for (i = 0; i < group_size; i++) { 2138 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing( 2139 grouped_pipes[i]->stream_res.tg, 2140 &hw_crtc_timing[i]); 2141 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 2142 dc->res_pool->dp_clock_source, 2143 grouped_pipes[i]->stream_res.tg->inst, 2144 &pclk); 2145 hw_crtc_timing[i].pix_clk_100hz = pclk; 2146 if (dc_is_embedded_signal( 2147 grouped_pipes[i]->stream->signal)) { 2148 embedded = i; 2149 master = i; 2150 phase[i] = embedded_pix_clk_100hz*100; 2151 modulo[i] = dp_ref_clk_100hz*100; 2152 } else { 2153 2154 phase[i] = (uint64_t)embedded_pix_clk_100hz* 2155 hw_crtc_timing[i].h_total* 2156 hw_crtc_timing[i].v_total; 2157 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true)); 2158 modulo[i] = (uint64_t)dp_ref_clk_100hz* 2159 embedded_h_total* 2160 embedded_v_total; 2161 2162 if (reduceSizeAndFraction(&phase[i], 2163 &modulo[i], true) == false) { 2164 /* 2165 * this will help to stop reporting 2166 * this timing synchronizable 2167 */ 2168 DC_SYNC_INFO("Failed to reduce DTO parameters\n"); 2169 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true; 2170 } 2171 } 2172 } 2173 2174 for (i = 0; i < group_size; i++) { 2175 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) { 2176 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk( 2177 dc->res_pool->dp_clock_source, 2178 grouped_pipes[i]->stream_res.tg->inst, 2179 phase[i], modulo[i]); 2180 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 2181 dc->res_pool->dp_clock_source, 2182 grouped_pipes[i]->stream_res.tg->inst, &pclk); 2183 grouped_pipes[i]->stream->timing.pix_clk_100hz = 2184 pclk*get_clock_divider(grouped_pipes[i], false); 2185 if (master == -1) 2186 master = i; 2187 } 2188 } 2189 2190 } 2191 2192 kfree(hw_crtc_timing); 2193 return master; 2194 } 2195 2196 void dcn10_enable_vblanks_synchronization( 2197 struct dc *dc, 2198 int group_index, 2199 int group_size, 2200 struct pipe_ctx *grouped_pipes[]) 2201 { 2202 struct dc_context *dc_ctx = dc->ctx; 2203 struct output_pixel_processor *opp; 2204 struct timing_generator *tg; 2205 int i, width, height, master; 2206 2207 for (i = 1; i < group_size; i++) { 2208 opp = grouped_pipes[i]->stream_res.opp; 2209 tg = grouped_pipes[i]->stream_res.tg; 2210 tg->funcs->get_otg_active_size(tg, &width, &height); 2211 2212 if (!tg->funcs->is_tg_enabled(tg)) { 2213 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n"); 2214 return; 2215 } 2216 2217 if (opp->funcs->opp_program_dpg_dimensions) 2218 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1); 2219 } 2220 2221 for (i = 0; i < group_size; i++) { 2222 if (grouped_pipes[i]->stream == NULL) 2223 continue; 2224 grouped_pipes[i]->stream->vblank_synchronized = false; 2225 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false; 2226 } 2227 2228 DC_SYNC_INFO("Aligning DP DTOs\n"); 2229 2230 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes); 2231 2232 DC_SYNC_INFO("Synchronizing VBlanks\n"); 2233 2234 if (master >= 0) { 2235 for (i = 0; i < group_size; i++) { 2236 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) 2237 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks( 2238 grouped_pipes[master]->stream_res.tg, 2239 grouped_pipes[i]->stream_res.tg, 2240 grouped_pipes[master]->stream->timing.pix_clk_100hz, 2241 grouped_pipes[i]->stream->timing.pix_clk_100hz, 2242 get_clock_divider(grouped_pipes[master], false), 2243 get_clock_divider(grouped_pipes[i], false)); 2244 grouped_pipes[i]->stream->vblank_synchronized = true; 2245 } 2246 grouped_pipes[master]->stream->vblank_synchronized = true; 2247 DC_SYNC_INFO("Sync complete\n"); 2248 } 2249 2250 for (i = 1; i < group_size; i++) { 2251 opp = grouped_pipes[i]->stream_res.opp; 2252 tg = grouped_pipes[i]->stream_res.tg; 2253 tg->funcs->get_otg_active_size(tg, &width, &height); 2254 if (opp->funcs->opp_program_dpg_dimensions) 2255 opp->funcs->opp_program_dpg_dimensions(opp, width, height); 2256 } 2257 } 2258 2259 void dcn10_enable_timing_synchronization( 2260 struct dc *dc, 2261 int group_index, 2262 int group_size, 2263 struct pipe_ctx *grouped_pipes[]) 2264 { 2265 struct dc_context *dc_ctx = dc->ctx; 2266 struct output_pixel_processor *opp; 2267 struct timing_generator *tg; 2268 int i, width, height; 2269 2270 DC_SYNC_INFO("Setting up OTG reset trigger\n"); 2271 2272 for (i = 1; i < group_size; i++) { 2273 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2274 continue; 2275 2276 opp = grouped_pipes[i]->stream_res.opp; 2277 tg = grouped_pipes[i]->stream_res.tg; 2278 tg->funcs->get_otg_active_size(tg, &width, &height); 2279 2280 if (!tg->funcs->is_tg_enabled(tg)) { 2281 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n"); 2282 return; 2283 } 2284 2285 if (opp->funcs->opp_program_dpg_dimensions) 2286 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1); 2287 } 2288 2289 for (i = 0; i < group_size; i++) { 2290 if (grouped_pipes[i]->stream == NULL) 2291 continue; 2292 2293 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2294 continue; 2295 2296 grouped_pipes[i]->stream->vblank_synchronized = false; 2297 } 2298 2299 for (i = 1; i < group_size; i++) { 2300 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2301 continue; 2302 2303 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( 2304 grouped_pipes[i]->stream_res.tg, 2305 grouped_pipes[0]->stream_res.tg->inst); 2306 } 2307 2308 DC_SYNC_INFO("Waiting for trigger\n"); 2309 2310 /* Need to get only check 1 pipe for having reset as all the others are 2311 * synchronized. Look at last pipe programmed to reset. 2312 */ 2313 2314 if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM) 2315 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg); 2316 2317 for (i = 1; i < group_size; i++) { 2318 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2319 continue; 2320 2321 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger( 2322 grouped_pipes[i]->stream_res.tg); 2323 } 2324 2325 for (i = 1; i < group_size; i++) { 2326 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2327 continue; 2328 2329 opp = grouped_pipes[i]->stream_res.opp; 2330 tg = grouped_pipes[i]->stream_res.tg; 2331 tg->funcs->get_otg_active_size(tg, &width, &height); 2332 if (opp->funcs->opp_program_dpg_dimensions) 2333 opp->funcs->opp_program_dpg_dimensions(opp, width, height); 2334 } 2335 2336 DC_SYNC_INFO("Sync complete\n"); 2337 } 2338 2339 void dcn10_enable_per_frame_crtc_position_reset( 2340 struct dc *dc, 2341 int group_size, 2342 struct pipe_ctx *grouped_pipes[]) 2343 { 2344 struct dc_context *dc_ctx = dc->ctx; 2345 int i; 2346 2347 DC_SYNC_INFO("Setting up\n"); 2348 for (i = 0; i < group_size; i++) 2349 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset) 2350 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset( 2351 grouped_pipes[i]->stream_res.tg, 2352 0, 2353 &grouped_pipes[i]->stream->triggered_crtc_reset); 2354 2355 DC_SYNC_INFO("Waiting for trigger\n"); 2356 2357 for (i = 0; i < group_size; i++) 2358 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg); 2359 2360 DC_SYNC_INFO("Multi-display sync is complete\n"); 2361 } 2362 2363 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1, 2364 struct vm_system_aperture_param *apt, 2365 struct dce_hwseq *hws) 2366 { 2367 PHYSICAL_ADDRESS_LOC physical_page_number; 2368 uint32_t logical_addr_low; 2369 uint32_t logical_addr_high; 2370 2371 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 2372 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part); 2373 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 2374 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part); 2375 2376 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 2377 LOGICAL_ADDR, &logical_addr_low); 2378 2379 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 2380 LOGICAL_ADDR, &logical_addr_high); 2381 2382 apt->sys_default.quad_part = physical_page_number.quad_part << 12; 2383 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18; 2384 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18; 2385 } 2386 2387 /* Temporary read settings, future will get values from kmd directly */ 2388 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, 2389 struct vm_context0_param *vm0, 2390 struct dce_hwseq *hws) 2391 { 2392 PHYSICAL_ADDRESS_LOC fb_base; 2393 PHYSICAL_ADDRESS_LOC fb_offset; 2394 uint32_t fb_base_value; 2395 uint32_t fb_offset_value; 2396 2397 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value); 2398 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value); 2399 2400 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, 2401 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part); 2402 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, 2403 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part); 2404 2405 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, 2406 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part); 2407 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, 2408 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part); 2409 2410 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, 2411 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part); 2412 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, 2413 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part); 2414 2415 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 2416 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part); 2417 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 2418 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part); 2419 2420 /* 2421 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space. 2422 * Therefore we need to do 2423 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 2424 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE 2425 */ 2426 fb_base.quad_part = (uint64_t)fb_base_value << 24; 2427 fb_offset.quad_part = (uint64_t)fb_offset_value << 24; 2428 vm0->pte_base.quad_part += fb_base.quad_part; 2429 vm0->pte_base.quad_part -= fb_offset.quad_part; 2430 } 2431 2432 2433 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) 2434 { 2435 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 2436 struct vm_system_aperture_param apt = {0}; 2437 struct vm_context0_param vm0 = {0}; 2438 2439 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws); 2440 mmhub_read_vm_context0_settings(hubp1, &vm0, hws); 2441 2442 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt); 2443 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0); 2444 } 2445 2446 static void dcn10_enable_plane( 2447 struct dc *dc, 2448 struct pipe_ctx *pipe_ctx, 2449 struct dc_state *context) 2450 { 2451 struct dce_hwseq *hws = dc->hwseq; 2452 2453 if (dc->debug.sanity_checks) { 2454 hws->funcs.verify_allow_pstate_change_high(dc); 2455 } 2456 2457 undo_DEGVIDCN10_253_wa(dc); 2458 2459 power_on_plane_resources(dc->hwseq, 2460 pipe_ctx->plane_res.hubp->inst); 2461 2462 /* enable DCFCLK current DCHUB */ 2463 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); 2464 2465 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 2466 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 2467 pipe_ctx->stream_res.opp, 2468 true); 2469 2470 if (dc->config.gpu_vm_support) 2471 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp); 2472 2473 if (dc->debug.sanity_checks) { 2474 hws->funcs.verify_allow_pstate_change_high(dc); 2475 } 2476 2477 if (!pipe_ctx->top_pipe 2478 && pipe_ctx->plane_state 2479 && pipe_ctx->plane_state->flip_int_enabled 2480 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) 2481 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); 2482 2483 } 2484 2485 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) 2486 { 2487 int i = 0; 2488 struct dpp_grph_csc_adjustment adjust; 2489 memset(&adjust, 0, sizeof(adjust)); 2490 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; 2491 2492 2493 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { 2494 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 2495 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 2496 adjust.temperature_matrix[i] = 2497 pipe_ctx->stream->gamut_remap_matrix.matrix[i]; 2498 } else if (pipe_ctx->plane_state && 2499 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) { 2500 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; 2501 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) 2502 adjust.temperature_matrix[i] = 2503 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i]; 2504 } 2505 2506 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); 2507 } 2508 2509 2510 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace) 2511 { 2512 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) { 2513 if (pipe_ctx->top_pipe) { 2514 struct pipe_ctx *top = pipe_ctx->top_pipe; 2515 2516 while (top->top_pipe) 2517 top = top->top_pipe; // Traverse to top pipe_ctx 2518 if (top->plane_state && top->plane_state->layer_index == 0) 2519 return true; // Front MPO plane not hidden 2520 } 2521 } 2522 return false; 2523 } 2524 2525 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix) 2526 { 2527 // Override rear plane RGB bias to fix MPO brightness 2528 uint16_t rgb_bias = matrix[3]; 2529 2530 matrix[3] = 0; 2531 matrix[7] = 0; 2532 matrix[11] = 0; 2533 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); 2534 matrix[3] = rgb_bias; 2535 matrix[7] = rgb_bias; 2536 matrix[11] = rgb_bias; 2537 } 2538 2539 void dcn10_program_output_csc(struct dc *dc, 2540 struct pipe_ctx *pipe_ctx, 2541 enum dc_color_space colorspace, 2542 uint16_t *matrix, 2543 int opp_id) 2544 { 2545 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 2546 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) { 2547 2548 /* MPO is broken with RGB colorspaces when OCSC matrix 2549 * brightness offset >= 0 on DCN1 due to OCSC before MPC 2550 * Blending adds offsets from front + rear to rear plane 2551 * 2552 * Fix is to set RGB bias to 0 on rear plane, top plane 2553 * black value pixels add offset instead of rear + front 2554 */ 2555 2556 int16_t rgb_bias = matrix[3]; 2557 // matrix[3/7/11] are all the same offset value 2558 2559 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) { 2560 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix); 2561 } else { 2562 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); 2563 } 2564 } 2565 } else { 2566 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL) 2567 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); 2568 } 2569 } 2570 2571 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state) 2572 { 2573 struct dc_bias_and_scale bns_params = {0}; 2574 2575 // program the input csc 2576 dpp->funcs->dpp_setup(dpp, 2577 plane_state->format, 2578 EXPANSION_MODE_ZERO, 2579 plane_state->input_csc_color_matrix, 2580 plane_state->color_space, 2581 NULL); 2582 2583 //set scale and bias registers 2584 build_prescale_params(&bns_params, plane_state); 2585 if (dpp->funcs->dpp_program_bias_and_scale) 2586 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 2587 } 2588 2589 void dcn10_update_visual_confirm_color(struct dc *dc, 2590 struct pipe_ctx *pipe_ctx, 2591 int mpcc_id) 2592 { 2593 struct mpc *mpc = dc->res_pool->mpc; 2594 2595 if (mpc->funcs->set_bg_color) { 2596 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color)); 2597 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id); 2598 } 2599 } 2600 2601 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) 2602 { 2603 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2604 struct mpcc_blnd_cfg blnd_cfg = {0}; 2605 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; 2606 int mpcc_id; 2607 struct mpcc *new_mpcc; 2608 struct mpc *mpc = dc->res_pool->mpc; 2609 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); 2610 2611 blnd_cfg.overlap_only = false; 2612 blnd_cfg.global_gain = 0xff; 2613 2614 if (per_pixel_alpha) { 2615 /* DCN1.0 has output CM before MPC which seems to screw with 2616 * pre-multiplied alpha. 2617 */ 2618 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace( 2619 pipe_ctx->stream->output_color_space) 2620 && pipe_ctx->plane_state->pre_multiplied_alpha); 2621 if (pipe_ctx->plane_state->global_alpha) { 2622 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; 2623 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; 2624 } else { 2625 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; 2626 } 2627 } else { 2628 blnd_cfg.pre_multiplied_alpha = false; 2629 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; 2630 } 2631 2632 if (pipe_ctx->plane_state->global_alpha) 2633 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; 2634 else 2635 blnd_cfg.global_alpha = 0xff; 2636 2637 /* 2638 * TODO: remove hack 2639 * Note: currently there is a bug in init_hw such that 2640 * on resume from hibernate, BIOS sets up MPCC0, and 2641 * we do mpcc_remove but the mpcc cannot go to idle 2642 * after remove. This cause us to pick mpcc1 here, 2643 * which causes a pstate hang for yet unknown reason. 2644 */ 2645 mpcc_id = hubp->inst; 2646 2647 /* If there is no full update, don't need to touch MPC tree*/ 2648 if (!pipe_ctx->plane_state->update_flags.bits.full_update) { 2649 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); 2650 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); 2651 return; 2652 } 2653 2654 /* check if this MPCC is already being used */ 2655 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); 2656 /* remove MPCC if being used */ 2657 if (new_mpcc != NULL) 2658 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc); 2659 else 2660 if (dc->debug.sanity_checks) 2661 mpc->funcs->assert_mpcc_idle_before_connect( 2662 dc->res_pool->mpc, mpcc_id); 2663 2664 /* Call MPC to insert new plane */ 2665 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, 2666 mpc_tree_params, 2667 &blnd_cfg, 2668 NULL, 2669 NULL, 2670 hubp->inst, 2671 mpcc_id); 2672 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id); 2673 2674 ASSERT(new_mpcc != NULL); 2675 hubp->opp_id = pipe_ctx->stream_res.opp->inst; 2676 hubp->mpcc_id = mpcc_id; 2677 } 2678 2679 static void update_scaler(struct pipe_ctx *pipe_ctx) 2680 { 2681 bool per_pixel_alpha = 2682 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; 2683 2684 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha; 2685 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; 2686 /* scaler configuration */ 2687 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( 2688 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); 2689 } 2690 2691 static void dcn10_update_dchubp_dpp( 2692 struct dc *dc, 2693 struct pipe_ctx *pipe_ctx, 2694 struct dc_state *context) 2695 { 2696 struct dce_hwseq *hws = dc->hwseq; 2697 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2698 struct dpp *dpp = pipe_ctx->plane_res.dpp; 2699 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2700 struct plane_size size = plane_state->plane_size; 2701 unsigned int compat_level = 0; 2702 bool should_divided_by_2 = false; 2703 2704 /* depends on DML calculation, DPP clock value may change dynamically */ 2705 /* If request max dpp clk is lower than current dispclk, no need to 2706 * divided by 2 2707 */ 2708 if (plane_state->update_flags.bits.full_update) { 2709 2710 /* new calculated dispclk, dppclk are stored in 2711 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current 2712 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz. 2713 * dcn10_validate_bandwidth compute new dispclk, dppclk. 2714 * dispclk will put in use after optimize_bandwidth when 2715 * ramp_up_dispclk_with_dpp is called. 2716 * there are two places for dppclk be put in use. One location 2717 * is the same as the location as dispclk. Another is within 2718 * update_dchubp_dpp which happens between pre_bandwidth and 2719 * optimize_bandwidth. 2720 * dppclk updated within update_dchubp_dpp will cause new 2721 * clock values of dispclk and dppclk not be in use at the same 2722 * time. when clocks are decreased, this may cause dppclk is 2723 * lower than previous configuration and let pipe stuck. 2724 * for example, eDP + external dp, change resolution of DP from 2725 * 1920x1080x144hz to 1280x960x60hz. 2726 * before change: dispclk = 337889 dppclk = 337889 2727 * change mode, dcn10_validate_bandwidth calculate 2728 * dispclk = 143122 dppclk = 143122 2729 * update_dchubp_dpp be executed before dispclk be updated, 2730 * dispclk = 337889, but dppclk use new value dispclk /2 = 2731 * 168944. this will cause pipe pstate warning issue. 2732 * solution: between pre_bandwidth and optimize_bandwidth, while 2733 * dispclk is going to be decreased, keep dppclk = dispclk 2734 **/ 2735 if (context->bw_ctx.bw.dcn.clk.dispclk_khz < 2736 dc->clk_mgr->clks.dispclk_khz) 2737 should_divided_by_2 = false; 2738 else 2739 should_divided_by_2 = 2740 context->bw_ctx.bw.dcn.clk.dppclk_khz <= 2741 dc->clk_mgr->clks.dispclk_khz / 2; 2742 2743 dpp->funcs->dpp_dppclk_control( 2744 dpp, 2745 should_divided_by_2, 2746 true); 2747 2748 if (dc->res_pool->dccg) 2749 dc->res_pool->dccg->funcs->update_dpp_dto( 2750 dc->res_pool->dccg, 2751 dpp->inst, 2752 pipe_ctx->plane_res.bw.dppclk_khz); 2753 else 2754 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? 2755 dc->clk_mgr->clks.dispclk_khz / 2 : 2756 dc->clk_mgr->clks.dispclk_khz; 2757 } 2758 2759 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 2760 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. 2761 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG 2762 */ 2763 if (plane_state->update_flags.bits.full_update) { 2764 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); 2765 2766 hubp->funcs->hubp_setup( 2767 hubp, 2768 &pipe_ctx->dlg_regs, 2769 &pipe_ctx->ttu_regs, 2770 &pipe_ctx->rq_regs, 2771 &pipe_ctx->pipe_dlg_param); 2772 hubp->funcs->hubp_setup_interdependent( 2773 hubp, 2774 &pipe_ctx->dlg_regs, 2775 &pipe_ctx->ttu_regs); 2776 } 2777 2778 size.surface_size = pipe_ctx->plane_res.scl_data.viewport; 2779 2780 if (plane_state->update_flags.bits.full_update || 2781 plane_state->update_flags.bits.bpp_change) 2782 dcn10_update_dpp(dpp, plane_state); 2783 2784 if (plane_state->update_flags.bits.full_update || 2785 plane_state->update_flags.bits.per_pixel_alpha_change || 2786 plane_state->update_flags.bits.global_alpha_change) 2787 hws->funcs.update_mpcc(dc, pipe_ctx); 2788 2789 if (plane_state->update_flags.bits.full_update || 2790 plane_state->update_flags.bits.per_pixel_alpha_change || 2791 plane_state->update_flags.bits.global_alpha_change || 2792 plane_state->update_flags.bits.scaling_change || 2793 plane_state->update_flags.bits.position_change) { 2794 update_scaler(pipe_ctx); 2795 } 2796 2797 if (plane_state->update_flags.bits.full_update || 2798 plane_state->update_flags.bits.scaling_change || 2799 plane_state->update_flags.bits.position_change) { 2800 hubp->funcs->mem_program_viewport( 2801 hubp, 2802 &pipe_ctx->plane_res.scl_data.viewport, 2803 &pipe_ctx->plane_res.scl_data.viewport_c); 2804 } 2805 2806 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { 2807 dc->hwss.set_cursor_position(pipe_ctx); 2808 dc->hwss.set_cursor_attribute(pipe_ctx); 2809 2810 if (dc->hwss.set_cursor_sdr_white_level) 2811 dc->hwss.set_cursor_sdr_white_level(pipe_ctx); 2812 } 2813 2814 if (plane_state->update_flags.bits.full_update) { 2815 /*gamut remap*/ 2816 dc->hwss.program_gamut_remap(pipe_ctx); 2817 2818 dc->hwss.program_output_csc(dc, 2819 pipe_ctx, 2820 pipe_ctx->stream->output_color_space, 2821 pipe_ctx->stream->csc_color_matrix.matrix, 2822 pipe_ctx->stream_res.opp->inst); 2823 } 2824 2825 if (plane_state->update_flags.bits.full_update || 2826 plane_state->update_flags.bits.pixel_format_change || 2827 plane_state->update_flags.bits.horizontal_mirror_change || 2828 plane_state->update_flags.bits.rotation_change || 2829 plane_state->update_flags.bits.swizzle_change || 2830 plane_state->update_flags.bits.dcc_change || 2831 plane_state->update_flags.bits.bpp_change || 2832 plane_state->update_flags.bits.scaling_change || 2833 plane_state->update_flags.bits.plane_size_change) { 2834 hubp->funcs->hubp_program_surface_config( 2835 hubp, 2836 plane_state->format, 2837 &plane_state->tiling_info, 2838 &size, 2839 plane_state->rotation, 2840 &plane_state->dcc, 2841 plane_state->horizontal_mirror, 2842 compat_level); 2843 } 2844 2845 hubp->power_gated = false; 2846 2847 hws->funcs.update_plane_addr(dc, pipe_ctx); 2848 2849 if (is_pipe_tree_visible(pipe_ctx)) 2850 hubp->funcs->set_blank(hubp, false); 2851 } 2852 2853 void dcn10_blank_pixel_data( 2854 struct dc *dc, 2855 struct pipe_ctx *pipe_ctx, 2856 bool blank) 2857 { 2858 enum dc_color_space color_space; 2859 struct tg_color black_color = {0}; 2860 struct stream_resource *stream_res = &pipe_ctx->stream_res; 2861 struct dc_stream_state *stream = pipe_ctx->stream; 2862 2863 /* program otg blank color */ 2864 color_space = stream->output_color_space; 2865 color_space_to_black_color(dc, color_space, &black_color); 2866 2867 /* 2868 * The way 420 is packed, 2 channels carry Y component, 1 channel 2869 * alternate between Cb and Cr, so both channels need the pixel 2870 * value for Y 2871 */ 2872 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 2873 black_color.color_r_cr = black_color.color_g_y; 2874 2875 2876 if (stream_res->tg->funcs->set_blank_color) 2877 stream_res->tg->funcs->set_blank_color( 2878 stream_res->tg, 2879 &black_color); 2880 2881 if (!blank) { 2882 if (stream_res->tg->funcs->set_blank) 2883 stream_res->tg->funcs->set_blank(stream_res->tg, blank); 2884 if (stream_res->abm) { 2885 dc->hwss.set_pipe(pipe_ctx); 2886 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); 2887 } 2888 } else { 2889 dc->hwss.set_abm_immediate_disable(pipe_ctx); 2890 if (stream_res->tg->funcs->set_blank) { 2891 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK); 2892 stream_res->tg->funcs->set_blank(stream_res->tg, blank); 2893 } 2894 } 2895 } 2896 2897 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx) 2898 { 2899 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; 2900 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier 2901 struct custom_float_format fmt; 2902 2903 fmt.exponenta_bits = 6; 2904 fmt.mantissa_bits = 12; 2905 fmt.sign = true; 2906 2907 2908 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 2909 convert_to_custom_float_format(multiplier, &fmt, &hw_mult); 2910 2911 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier( 2912 pipe_ctx->plane_res.dpp, hw_mult); 2913 } 2914 2915 void dcn10_program_pipe( 2916 struct dc *dc, 2917 struct pipe_ctx *pipe_ctx, 2918 struct dc_state *context) 2919 { 2920 struct dce_hwseq *hws = dc->hwseq; 2921 2922 if (pipe_ctx->top_pipe == NULL) { 2923 bool blank = !is_pipe_tree_visible(pipe_ctx); 2924 2925 pipe_ctx->stream_res.tg->funcs->program_global_sync( 2926 pipe_ctx->stream_res.tg, 2927 calculate_vready_offset_for_group(pipe_ctx), 2928 pipe_ctx->pipe_dlg_param.vstartup_start, 2929 pipe_ctx->pipe_dlg_param.vupdate_offset, 2930 pipe_ctx->pipe_dlg_param.vupdate_width); 2931 2932 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2933 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 2934 2935 if (hws->funcs.setup_vupdate_interrupt) 2936 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 2937 2938 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); 2939 } 2940 2941 if (pipe_ctx->plane_state->update_flags.bits.full_update) 2942 dcn10_enable_plane(dc, pipe_ctx, context); 2943 2944 dcn10_update_dchubp_dpp(dc, pipe_ctx, context); 2945 2946 hws->funcs.set_hdr_multiplier(pipe_ctx); 2947 2948 if (pipe_ctx->plane_state->update_flags.bits.full_update || 2949 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 2950 pipe_ctx->plane_state->update_flags.bits.gamma_change) 2951 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); 2952 2953 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 2954 * only do gamma programming for full update. 2955 * TODO: This can be further optimized/cleaned up 2956 * Always call this for now since it does memcmp inside before 2957 * doing heavy calculation and programming 2958 */ 2959 if (pipe_ctx->plane_state->update_flags.bits.full_update) 2960 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); 2961 } 2962 2963 void dcn10_wait_for_pending_cleared(struct dc *dc, 2964 struct dc_state *context) 2965 { 2966 struct pipe_ctx *pipe_ctx; 2967 struct timing_generator *tg; 2968 int i; 2969 2970 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2971 pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2972 tg = pipe_ctx->stream_res.tg; 2973 2974 /* 2975 * Only wait for top pipe's tg penindg bit 2976 * Also skip if pipe is disabled. 2977 */ 2978 if (pipe_ctx->top_pipe || 2979 !pipe_ctx->stream || !pipe_ctx->plane_state || 2980 !tg->funcs->is_tg_enabled(tg)) 2981 continue; 2982 2983 /* 2984 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE. 2985 * For some reason waiting for OTG_UPDATE_PENDING cleared 2986 * seems to not trigger the update right away, and if we 2987 * lock again before VUPDATE then we don't get a separated 2988 * operation. 2989 */ 2990 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); 2991 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 2992 } 2993 } 2994 2995 void dcn10_post_unlock_program_front_end( 2996 struct dc *dc, 2997 struct dc_state *context) 2998 { 2999 int i; 3000 3001 DC_LOGGER_INIT(dc->ctx->logger); 3002 3003 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3004 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 3005 3006 if (!pipe_ctx->top_pipe && 3007 !pipe_ctx->prev_odm_pipe && 3008 pipe_ctx->stream) { 3009 struct timing_generator *tg = pipe_ctx->stream_res.tg; 3010 3011 if (context->stream_status[i].plane_count == 0) 3012 false_optc_underflow_wa(dc, pipe_ctx->stream, tg); 3013 } 3014 } 3015 3016 for (i = 0; i < dc->res_pool->pipe_count; i++) 3017 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 3018 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 3019 3020 for (i = 0; i < dc->res_pool->pipe_count; i++) 3021 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) { 3022 dc->hwss.optimize_bandwidth(dc, context); 3023 break; 3024 } 3025 3026 if (dc->hwseq->wa.DEGVIDCN10_254) 3027 hubbub1_wm_change_req_wa(dc->res_pool->hubbub); 3028 } 3029 3030 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context) 3031 { 3032 uint8_t i; 3033 3034 for (i = 0; i < context->stream_count; i++) { 3035 if (context->streams[i]->timing.timing_3d_format 3036 == TIMING_3D_FORMAT_HW_FRAME_PACKING) { 3037 /* 3038 * Disable stutter 3039 */ 3040 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false); 3041 break; 3042 } 3043 } 3044 } 3045 3046 void dcn10_prepare_bandwidth( 3047 struct dc *dc, 3048 struct dc_state *context) 3049 { 3050 struct dce_hwseq *hws = dc->hwseq; 3051 struct hubbub *hubbub = dc->res_pool->hubbub; 3052 int min_fclk_khz, min_dcfclk_khz, socclk_khz; 3053 3054 if (dc->debug.sanity_checks) 3055 hws->funcs.verify_allow_pstate_change_high(dc); 3056 3057 if (context->stream_count == 0) 3058 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; 3059 3060 dc->clk_mgr->funcs->update_clocks( 3061 dc->clk_mgr, 3062 context, 3063 false); 3064 3065 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, 3066 &context->bw_ctx.bw.dcn.watermarks, 3067 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 3068 true); 3069 dcn10_stereo_hw_frame_pack_wa(dc, context); 3070 3071 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { 3072 DC_FP_START(); 3073 dcn_get_soc_clks( 3074 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz); 3075 DC_FP_END(); 3076 dcn_bw_notify_pplib_of_wm_ranges( 3077 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz); 3078 } 3079 3080 if (dc->debug.sanity_checks) 3081 hws->funcs.verify_allow_pstate_change_high(dc); 3082 } 3083 3084 void dcn10_optimize_bandwidth( 3085 struct dc *dc, 3086 struct dc_state *context) 3087 { 3088 struct dce_hwseq *hws = dc->hwseq; 3089 struct hubbub *hubbub = dc->res_pool->hubbub; 3090 int min_fclk_khz, min_dcfclk_khz, socclk_khz; 3091 3092 if (dc->debug.sanity_checks) 3093 hws->funcs.verify_allow_pstate_change_high(dc); 3094 3095 if (context->stream_count == 0) 3096 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; 3097 3098 dc->clk_mgr->funcs->update_clocks( 3099 dc->clk_mgr, 3100 context, 3101 true); 3102 3103 hubbub->funcs->program_watermarks(hubbub, 3104 &context->bw_ctx.bw.dcn.watermarks, 3105 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 3106 true); 3107 3108 dcn10_stereo_hw_frame_pack_wa(dc, context); 3109 3110 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { 3111 DC_FP_START(); 3112 dcn_get_soc_clks( 3113 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz); 3114 DC_FP_END(); 3115 dcn_bw_notify_pplib_of_wm_ranges( 3116 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz); 3117 } 3118 3119 if (dc->debug.sanity_checks) 3120 hws->funcs.verify_allow_pstate_change_high(dc); 3121 } 3122 3123 void dcn10_set_drr(struct pipe_ctx **pipe_ctx, 3124 int num_pipes, struct dc_crtc_timing_adjust adjust) 3125 { 3126 int i = 0; 3127 struct drr_params params = {0}; 3128 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow 3129 unsigned int event_triggers = 0x800; 3130 // Note DRR trigger events are generated regardless of whether num frames met. 3131 unsigned int num_frames = 2; 3132 3133 params.vertical_total_max = adjust.v_total_max; 3134 params.vertical_total_min = adjust.v_total_min; 3135 params.vertical_total_mid = adjust.v_total_mid; 3136 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; 3137 /* TODO: If multiple pipes are to be supported, you need 3138 * some GSL stuff. Static screen triggers may be programmed differently 3139 * as well. 3140 */ 3141 for (i = 0; i < num_pipes; i++) { 3142 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) { 3143 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr) 3144 pipe_ctx[i]->stream_res.tg->funcs->set_drr( 3145 pipe_ctx[i]->stream_res.tg, ¶ms); 3146 if (adjust.v_total_max != 0 && adjust.v_total_min != 0) 3147 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control) 3148 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( 3149 pipe_ctx[i]->stream_res.tg, 3150 event_triggers, num_frames); 3151 } 3152 } 3153 } 3154 3155 void dcn10_get_position(struct pipe_ctx **pipe_ctx, 3156 int num_pipes, 3157 struct crtc_position *position) 3158 { 3159 int i = 0; 3160 3161 /* TODO: handle pipes > 1 3162 */ 3163 for (i = 0; i < num_pipes; i++) 3164 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position); 3165 } 3166 3167 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx, 3168 int num_pipes, const struct dc_static_screen_params *params) 3169 { 3170 unsigned int i; 3171 unsigned int triggers = 0; 3172 3173 if (params->triggers.surface_update) 3174 triggers |= 0x80; 3175 if (params->triggers.cursor_update) 3176 triggers |= 0x2; 3177 if (params->triggers.force_trigger) 3178 triggers |= 0x1; 3179 3180 for (i = 0; i < num_pipes; i++) 3181 pipe_ctx[i]->stream_res.tg->funcs-> 3182 set_static_screen_control(pipe_ctx[i]->stream_res.tg, 3183 triggers, params->num_frames); 3184 } 3185 3186 static void dcn10_config_stereo_parameters( 3187 struct dc_stream_state *stream, struct crtc_stereo_flags *flags) 3188 { 3189 enum view_3d_format view_format = stream->view_format; 3190 enum dc_timing_3d_format timing_3d_format =\ 3191 stream->timing.timing_3d_format; 3192 bool non_stereo_timing = false; 3193 3194 if (timing_3d_format == TIMING_3D_FORMAT_NONE || 3195 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE || 3196 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM) 3197 non_stereo_timing = true; 3198 3199 if (non_stereo_timing == false && 3200 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) { 3201 3202 flags->PROGRAM_STEREO = 1; 3203 flags->PROGRAM_POLARITY = 1; 3204 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE || 3205 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA || 3206 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA || 3207 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { 3208 3209 if (stream->link && stream->link->ddc) { 3210 enum display_dongle_type dongle = \ 3211 stream->link->ddc->dongle_type; 3212 3213 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || 3214 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || 3215 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 3216 flags->DISABLE_STEREO_DP_SYNC = 1; 3217 } 3218 } 3219 flags->RIGHT_EYE_POLARITY =\ 3220 stream->timing.flags.RIGHT_EYE_3D_POLARITY; 3221 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 3222 flags->FRAME_PACKED = 1; 3223 } 3224 3225 return; 3226 } 3227 3228 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) 3229 { 3230 struct crtc_stereo_flags flags = { 0 }; 3231 struct dc_stream_state *stream = pipe_ctx->stream; 3232 3233 dcn10_config_stereo_parameters(stream, &flags); 3234 3235 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { 3236 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service)) 3237 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service); 3238 } else { 3239 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service); 3240 } 3241 3242 pipe_ctx->stream_res.opp->funcs->opp_program_stereo( 3243 pipe_ctx->stream_res.opp, 3244 flags.PROGRAM_STEREO == 1, 3245 &stream->timing); 3246 3247 pipe_ctx->stream_res.tg->funcs->program_stereo( 3248 pipe_ctx->stream_res.tg, 3249 &stream->timing, 3250 &flags); 3251 3252 return; 3253 } 3254 3255 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst) 3256 { 3257 int i; 3258 3259 for (i = 0; i < res_pool->pipe_count; i++) { 3260 if (res_pool->hubps[i]->inst == mpcc_inst) 3261 return res_pool->hubps[i]; 3262 } 3263 ASSERT(false); 3264 return NULL; 3265 } 3266 3267 void dcn10_wait_for_mpcc_disconnect( 3268 struct dc *dc, 3269 struct resource_pool *res_pool, 3270 struct pipe_ctx *pipe_ctx) 3271 { 3272 struct dce_hwseq *hws = dc->hwseq; 3273 int mpcc_inst; 3274 3275 if (dc->debug.sanity_checks) { 3276 hws->funcs.verify_allow_pstate_change_high(dc); 3277 } 3278 3279 if (!pipe_ctx->stream_res.opp) 3280 return; 3281 3282 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { 3283 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { 3284 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); 3285 3286 if (pipe_ctx->stream_res.tg && 3287 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) 3288 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst); 3289 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; 3290 hubp->funcs->set_blank(hubp, true); 3291 } 3292 } 3293 3294 if (dc->debug.sanity_checks) { 3295 hws->funcs.verify_allow_pstate_change_high(dc); 3296 } 3297 3298 } 3299 3300 bool dcn10_dummy_display_power_gating( 3301 struct dc *dc, 3302 uint8_t controller_id, 3303 struct dc_bios *dcb, 3304 enum pipe_gating_control power_gating) 3305 { 3306 return true; 3307 } 3308 3309 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) 3310 { 3311 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 3312 struct timing_generator *tg = pipe_ctx->stream_res.tg; 3313 bool flip_pending; 3314 struct dc *dc = pipe_ctx->stream->ctx->dc; 3315 3316 if (plane_state == NULL) 3317 return; 3318 3319 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending( 3320 pipe_ctx->plane_res.hubp); 3321 3322 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending; 3323 3324 if (!flip_pending) 3325 plane_state->status.current_address = plane_state->status.requested_address; 3326 3327 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO && 3328 tg->funcs->is_stereo_left_eye) { 3329 plane_state->status.is_right_eye = 3330 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg); 3331 } 3332 3333 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) { 3334 struct dce_hwseq *hwseq = dc->hwseq; 3335 struct timing_generator *tg = dc->res_pool->timing_generators[0]; 3336 unsigned int cur_frame = tg->funcs->get_frame_count(tg); 3337 3338 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) { 3339 struct hubbub *hubbub = dc->res_pool->hubbub; 3340 3341 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter); 3342 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false; 3343 } 3344 } 3345 } 3346 3347 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) 3348 { 3349 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub; 3350 3351 /* In DCN, this programming sequence is owned by the hubbub */ 3352 hubbub->funcs->update_dchub(hubbub, dh_data); 3353 } 3354 3355 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) 3356 { 3357 struct pipe_ctx *test_pipe, *split_pipe; 3358 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; 3359 struct rect r1 = scl_data->recout, r2, r2_half; 3360 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; 3361 int cur_layer = pipe_ctx->plane_state->layer_index; 3362 3363 /** 3364 * Disable the cursor if there's another pipe above this with a 3365 * plane that contains this pipe's viewport to prevent double cursor 3366 * and incorrect scaling artifacts. 3367 */ 3368 for (test_pipe = pipe_ctx->top_pipe; test_pipe; 3369 test_pipe = test_pipe->top_pipe) { 3370 // Skip invisible layer and pipe-split plane on same layer 3371 if (!test_pipe->plane_state || 3372 !test_pipe->plane_state->visible || 3373 test_pipe->plane_state->layer_index == cur_layer) 3374 continue; 3375 3376 r2 = test_pipe->plane_res.scl_data.recout; 3377 r2_r = r2.x + r2.width; 3378 r2_b = r2.y + r2.height; 3379 split_pipe = test_pipe; 3380 3381 /** 3382 * There is another half plane on same layer because of 3383 * pipe-split, merge together per same height. 3384 */ 3385 for (split_pipe = pipe_ctx->top_pipe; split_pipe; 3386 split_pipe = split_pipe->top_pipe) 3387 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { 3388 r2_half = split_pipe->plane_res.scl_data.recout; 3389 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; 3390 r2.width = r2.width + r2_half.width; 3391 r2_r = r2.x + r2.width; 3392 break; 3393 } 3394 3395 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) 3396 return true; 3397 } 3398 3399 return false; 3400 } 3401 3402 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) 3403 { 3404 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; 3405 struct hubp *hubp = pipe_ctx->plane_res.hubp; 3406 struct dpp *dpp = pipe_ctx->plane_res.dpp; 3407 struct dc_cursor_mi_param param = { 3408 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10, 3409 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz, 3410 .viewport = pipe_ctx->plane_res.scl_data.viewport, 3411 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 3412 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, 3413 .rotation = pipe_ctx->plane_state->rotation, 3414 .mirror = pipe_ctx->plane_state->horizontal_mirror, 3415 .stream = pipe_ctx->stream, 3416 }; 3417 bool pipe_split_on = false; 3418 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || 3419 (pipe_ctx->prev_odm_pipe != NULL); 3420 3421 int x_plane = pipe_ctx->plane_state->dst_rect.x; 3422 int y_plane = pipe_ctx->plane_state->dst_rect.y; 3423 int x_pos = pos_cpy.x; 3424 int y_pos = pos_cpy.y; 3425 3426 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) { 3427 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) || 3428 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) { 3429 pipe_split_on = true; 3430 } 3431 } 3432 3433 /** 3434 * DC cursor is stream space, HW cursor is plane space and drawn 3435 * as part of the framebuffer. 3436 * 3437 * Cursor position can't be negative, but hotspot can be used to 3438 * shift cursor out of the plane bounds. Hotspot must be smaller 3439 * than the cursor size. 3440 */ 3441 3442 /** 3443 * Translate cursor from stream space to plane space. 3444 * 3445 * If the cursor is scaled then we need to scale the position 3446 * to be in the approximately correct place. We can't do anything 3447 * about the actual size being incorrect, that's a limitation of 3448 * the hardware. 3449 */ 3450 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) { 3451 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height / 3452 pipe_ctx->plane_state->dst_rect.width; 3453 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width / 3454 pipe_ctx->plane_state->dst_rect.height; 3455 } else { 3456 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width / 3457 pipe_ctx->plane_state->dst_rect.width; 3458 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height / 3459 pipe_ctx->plane_state->dst_rect.height; 3460 } 3461 3462 /** 3463 * If the cursor's source viewport is clipped then we need to 3464 * translate the cursor to appear in the correct position on 3465 * the screen. 3466 * 3467 * This translation isn't affected by scaling so it needs to be 3468 * done *after* we adjust the position for the scale factor. 3469 * 3470 * This is only done by opt-in for now since there are still 3471 * some usecases like tiled display that might enable the 3472 * cursor on both streams while expecting dc to clip it. 3473 */ 3474 if (pos_cpy.translate_by_source) { 3475 x_pos += pipe_ctx->plane_state->src_rect.x; 3476 y_pos += pipe_ctx->plane_state->src_rect.y; 3477 } 3478 3479 /** 3480 * If the position is negative then we need to add to the hotspot 3481 * to shift the cursor outside the plane. 3482 */ 3483 3484 if (x_pos < 0) { 3485 pos_cpy.x_hotspot -= x_pos; 3486 x_pos = 0; 3487 } 3488 3489 if (y_pos < 0) { 3490 pos_cpy.y_hotspot -= y_pos; 3491 y_pos = 0; 3492 } 3493 3494 pos_cpy.x = (uint32_t)x_pos; 3495 pos_cpy.y = (uint32_t)y_pos; 3496 3497 if (pipe_ctx->plane_state->address.type 3498 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) 3499 pos_cpy.enable = false; 3500 3501 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx)) 3502 pos_cpy.enable = false; 3503 3504 3505 if (param.rotation == ROTATION_ANGLE_0) { 3506 int viewport_width = 3507 pipe_ctx->plane_res.scl_data.viewport.width; 3508 int viewport_x = 3509 pipe_ctx->plane_res.scl_data.viewport.x; 3510 3511 if (param.mirror) { 3512 if (pipe_split_on || odm_combine_on) { 3513 if (pos_cpy.x >= viewport_width + viewport_x) { 3514 pos_cpy.x = 2 * viewport_width 3515 - pos_cpy.x + 2 * viewport_x; 3516 } else { 3517 uint32_t temp_x = pos_cpy.x; 3518 3519 pos_cpy.x = 2 * viewport_x - pos_cpy.x; 3520 if (temp_x >= viewport_x + 3521 (int)hubp->curs_attr.width || pos_cpy.x 3522 <= (int)hubp->curs_attr.width + 3523 pipe_ctx->plane_state->src_rect.x) { 3524 pos_cpy.x = temp_x + viewport_width; 3525 } 3526 } 3527 } else { 3528 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; 3529 } 3530 } 3531 } 3532 // Swap axis and mirror horizontally 3533 else if (param.rotation == ROTATION_ANGLE_90) { 3534 uint32_t temp_x = pos_cpy.x; 3535 3536 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width - 3537 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x; 3538 pos_cpy.y = temp_x; 3539 } 3540 // Swap axis and mirror vertically 3541 else if (param.rotation == ROTATION_ANGLE_270) { 3542 uint32_t temp_y = pos_cpy.y; 3543 int viewport_height = 3544 pipe_ctx->plane_res.scl_data.viewport.height; 3545 int viewport_y = 3546 pipe_ctx->plane_res.scl_data.viewport.y; 3547 3548 /** 3549 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height 3550 * For pipe split cases: 3551 * - apply offset of viewport.y to normalize pos_cpy.x 3552 * - calculate the pos_cpy.y as before 3553 * - shift pos_cpy.y back by same offset to get final value 3554 * - since we iterate through both pipes, use the lower 3555 * viewport.y for offset 3556 * For non pipe split cases, use the same calculation for 3557 * pos_cpy.y as the 180 degree rotation case below, 3558 * but use pos_cpy.x as our input because we are rotating 3559 * 270 degrees 3560 */ 3561 if (pipe_split_on || odm_combine_on) { 3562 int pos_cpy_x_offset; 3563 int other_pipe_viewport_y; 3564 3565 if (pipe_split_on) { 3566 if (pipe_ctx->bottom_pipe) { 3567 other_pipe_viewport_y = 3568 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y; 3569 } else { 3570 other_pipe_viewport_y = 3571 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y; 3572 } 3573 } else { 3574 if (pipe_ctx->next_odm_pipe) { 3575 other_pipe_viewport_y = 3576 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y; 3577 } else { 3578 other_pipe_viewport_y = 3579 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y; 3580 } 3581 } 3582 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ? 3583 other_pipe_viewport_y : viewport_y; 3584 pos_cpy.x -= pos_cpy_x_offset; 3585 if (pos_cpy.x > viewport_height) { 3586 pos_cpy.x = pos_cpy.x - viewport_height; 3587 pos_cpy.y = viewport_height - pos_cpy.x; 3588 } else { 3589 pos_cpy.y = 2 * viewport_height - pos_cpy.x; 3590 } 3591 pos_cpy.y += pos_cpy_x_offset; 3592 } else { 3593 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x; 3594 } 3595 pos_cpy.x = temp_y; 3596 } 3597 // Mirror horizontally and vertically 3598 else if (param.rotation == ROTATION_ANGLE_180) { 3599 int viewport_width = 3600 pipe_ctx->plane_res.scl_data.viewport.width; 3601 int viewport_x = 3602 pipe_ctx->plane_res.scl_data.viewport.x; 3603 3604 if (!param.mirror) { 3605 if (pipe_split_on || odm_combine_on) { 3606 if (pos_cpy.x >= viewport_width + viewport_x) { 3607 pos_cpy.x = 2 * viewport_width 3608 - pos_cpy.x + 2 * viewport_x; 3609 } else { 3610 uint32_t temp_x = pos_cpy.x; 3611 3612 pos_cpy.x = 2 * viewport_x - pos_cpy.x; 3613 if (temp_x >= viewport_x + 3614 (int)hubp->curs_attr.width || pos_cpy.x 3615 <= (int)hubp->curs_attr.width + 3616 pipe_ctx->plane_state->src_rect.x) { 3617 pos_cpy.x = 2 * viewport_width - temp_x; 3618 } 3619 } 3620 } else { 3621 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; 3622 } 3623 } 3624 3625 /** 3626 * Display groups that are 1xnY, have pos_cpy.y > viewport.height 3627 * Calculation: 3628 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y 3629 * pos_cpy.y_new = viewport.y + delta_from_bottom 3630 * Simplify it as: 3631 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y 3632 */ 3633 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) + 3634 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y; 3635 } 3636 3637 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m); 3638 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height); 3639 } 3640 3641 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx) 3642 { 3643 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; 3644 3645 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( 3646 pipe_ctx->plane_res.hubp, attributes); 3647 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( 3648 pipe_ctx->plane_res.dpp, attributes); 3649 } 3650 3651 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) 3652 { 3653 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level; 3654 struct fixed31_32 multiplier; 3655 struct dpp_cursor_attributes opt_attr = { 0 }; 3656 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier 3657 struct custom_float_format fmt; 3658 3659 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes) 3660 return; 3661 3662 fmt.exponenta_bits = 5; 3663 fmt.mantissa_bits = 10; 3664 fmt.sign = true; 3665 3666 if (sdr_white_level > 80) { 3667 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80); 3668 convert_to_custom_float_format(multiplier, &fmt, &hw_scale); 3669 } 3670 3671 opt_attr.scale = hw_scale; 3672 opt_attr.bias = 0; 3673 3674 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes( 3675 pipe_ctx->plane_res.dpp, &opt_attr); 3676 } 3677 3678 /* 3679 * apply_front_porch_workaround TODO FPGA still need? 3680 * 3681 * This is a workaround for a bug that has existed since R5xx and has not been 3682 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. 3683 */ 3684 static void apply_front_porch_workaround( 3685 struct dc_crtc_timing *timing) 3686 { 3687 if (timing->flags.INTERLACE == 1) { 3688 if (timing->v_front_porch < 2) 3689 timing->v_front_porch = 2; 3690 } else { 3691 if (timing->v_front_porch < 1) 3692 timing->v_front_porch = 1; 3693 } 3694 } 3695 3696 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) 3697 { 3698 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; 3699 struct dc_crtc_timing patched_crtc_timing; 3700 int vesa_sync_start; 3701 int asic_blank_end; 3702 int interlace_factor; 3703 3704 patched_crtc_timing = *dc_crtc_timing; 3705 apply_front_porch_workaround(&patched_crtc_timing); 3706 3707 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1; 3708 3709 vesa_sync_start = patched_crtc_timing.v_addressable + 3710 patched_crtc_timing.v_border_bottom + 3711 patched_crtc_timing.v_front_porch; 3712 3713 asic_blank_end = (patched_crtc_timing.v_total - 3714 vesa_sync_start - 3715 patched_crtc_timing.v_border_top) 3716 * interlace_factor; 3717 3718 return asic_blank_end - 3719 pipe_ctx->pipe_dlg_param.vstartup_start + 1; 3720 } 3721 3722 void dcn10_calc_vupdate_position( 3723 struct dc *dc, 3724 struct pipe_ctx *pipe_ctx, 3725 uint32_t *start_line, 3726 uint32_t *end_line) 3727 { 3728 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 3729 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); 3730 3731 if (vupdate_pos >= 0) 3732 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total); 3733 else 3734 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1; 3735 *end_line = (*start_line + 2) % timing->v_total; 3736 } 3737 3738 static void dcn10_cal_vline_position( 3739 struct dc *dc, 3740 struct pipe_ctx *pipe_ctx, 3741 uint32_t *start_line, 3742 uint32_t *end_line) 3743 { 3744 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 3745 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset; 3746 3747 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) { 3748 if (vline_pos > 0) 3749 vline_pos--; 3750 else if (vline_pos < 0) 3751 vline_pos++; 3752 3753 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); 3754 if (vline_pos >= 0) 3755 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total); 3756 else 3757 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1; 3758 *end_line = (*start_line + 2) % timing->v_total; 3759 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) { 3760 // vsync is line 0 so start_line is just the requested line offset 3761 *start_line = vline_pos; 3762 *end_line = (*start_line + 2) % timing->v_total; 3763 } else 3764 ASSERT(0); 3765 } 3766 3767 void dcn10_setup_periodic_interrupt( 3768 struct dc *dc, 3769 struct pipe_ctx *pipe_ctx) 3770 { 3771 struct timing_generator *tg = pipe_ctx->stream_res.tg; 3772 uint32_t start_line = 0; 3773 uint32_t end_line = 0; 3774 3775 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line); 3776 3777 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); 3778 } 3779 3780 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) 3781 { 3782 struct timing_generator *tg = pipe_ctx->stream_res.tg; 3783 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); 3784 3785 if (start_line < 0) { 3786 ASSERT(0); 3787 start_line = 0; 3788 } 3789 3790 if (tg->funcs->setup_vertical_interrupt2) 3791 tg->funcs->setup_vertical_interrupt2(tg, start_line); 3792 } 3793 3794 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, 3795 struct dc_link_settings *link_settings) 3796 { 3797 struct encoder_unblank_param params = {0}; 3798 struct dc_stream_state *stream = pipe_ctx->stream; 3799 struct dc_link *link = stream->link; 3800 struct dce_hwseq *hws = link->dc->hwseq; 3801 3802 /* only 3 items below are used by unblank */ 3803 params.timing = pipe_ctx->stream->timing; 3804 3805 params.link_settings.link_rate = link_settings->link_rate; 3806 3807 if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 3808 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 3809 params.timing.pix_clk_100hz /= 2; 3810 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); 3811 } 3812 3813 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 3814 hws->funcs.edp_backlight_control(link, true); 3815 } 3816 } 3817 3818 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, 3819 const uint8_t *custom_sdp_message, 3820 unsigned int sdp_message_size) 3821 { 3822 if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 3823 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message( 3824 pipe_ctx->stream_res.stream_enc, 3825 custom_sdp_message, 3826 sdp_message_size); 3827 } 3828 } 3829 enum dc_status dcn10_set_clock(struct dc *dc, 3830 enum dc_clock_type clock_type, 3831 uint32_t clk_khz, 3832 uint32_t stepping) 3833 { 3834 struct dc_state *context = dc->current_state; 3835 struct dc_clock_config clock_cfg = {0}; 3836 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk; 3837 3838 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock) 3839 return DC_FAIL_UNSUPPORTED_1; 3840 3841 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, 3842 context, clock_type, &clock_cfg); 3843 3844 if (clk_khz > clock_cfg.max_clock_khz) 3845 return DC_FAIL_CLK_EXCEED_MAX; 3846 3847 if (clk_khz < clock_cfg.min_clock_khz) 3848 return DC_FAIL_CLK_BELOW_MIN; 3849 3850 if (clk_khz < clock_cfg.bw_requirequired_clock_khz) 3851 return DC_FAIL_CLK_BELOW_CFG_REQUIRED; 3852 3853 /*update internal request clock for update clock use*/ 3854 if (clock_type == DC_CLOCK_TYPE_DISPCLK) 3855 current_clocks->dispclk_khz = clk_khz; 3856 else if (clock_type == DC_CLOCK_TYPE_DPPCLK) 3857 current_clocks->dppclk_khz = clk_khz; 3858 else 3859 return DC_ERROR_UNEXPECTED; 3860 3861 if (dc->clk_mgr->funcs->update_clocks) 3862 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, 3863 context, true); 3864 return DC_OK; 3865 3866 } 3867 3868 void dcn10_get_clock(struct dc *dc, 3869 enum dc_clock_type clock_type, 3870 struct dc_clock_config *clock_cfg) 3871 { 3872 struct dc_state *context = dc->current_state; 3873 3874 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock) 3875 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg); 3876 3877 } 3878 3879 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits) 3880 { 3881 struct resource_pool *pool = dc->res_pool; 3882 int i; 3883 3884 for (i = 0; i < pool->pipe_count; i++) { 3885 struct hubp *hubp = pool->hubps[i]; 3886 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); 3887 3888 hubp->funcs->hubp_read_state(hubp); 3889 3890 if (!s->blank_en) 3891 dcc_en_bits[i] = s->dcc_en ? 1 : 0; 3892 } 3893 } 3894