1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 #include <linux/delay.h> 26 27 #include "dm_services.h" 28 #include "dm_helpers.h" 29 #include "core_types.h" 30 #include "resource.h" 31 #include "dcn20/dcn20_resource.h" 32 #include "dce110/dce110_hw_sequencer.h" 33 #include "dcn10/dcn10_hw_sequencer.h" 34 #include "dcn20_hwseq.h" 35 #include "dce/dce_hwseq.h" 36 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 37 #include "dcn20/dcn20_dsc.h" 38 #endif 39 #include "abm.h" 40 #include "clk_mgr.h" 41 #include "dmcu.h" 42 #include "hubp.h" 43 #include "timing_generator.h" 44 #include "opp.h" 45 #include "ipp.h" 46 #include "mpc.h" 47 #include "mcif_wb.h" 48 #include "reg_helper.h" 49 #include "dcn10/dcn10_cm_common.h" 50 #include "dcn10/dcn10_hubbub.h" 51 #include "dcn10/dcn10_optc.h" 52 #include "dc_link_dp.h" 53 #include "vm_helper.h" 54 #include "dccg.h" 55 56 #define DC_LOGGER_INIT(logger) 57 58 #define CTX \ 59 hws->ctx 60 #define REG(reg)\ 61 hws->regs->reg 62 63 #undef FN 64 #define FN(reg_name, field_name) \ 65 hws->shifts->field_name, hws->masks->field_name 66 67 static void dcn20_enable_power_gating_plane( 68 struct dce_hwseq *hws, 69 bool enable) 70 { 71 bool force_on = 1; /* disable power gating */ 72 73 if (enable) 74 force_on = 0; 75 76 /* DCHUBP0/1/2/3/4/5 */ 77 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); 78 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on); 79 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on); 80 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on); 81 if (REG(DOMAIN8_PG_CONFIG)) 82 REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 83 if (REG(DOMAIN10_PG_CONFIG)) 84 REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 85 86 /* DPP0/1/2/3/4/5 */ 87 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on); 88 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on); 89 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on); 90 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); 91 if (REG(DOMAIN9_PG_CONFIG)) 92 REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 93 if (REG(DOMAIN11_PG_CONFIG)) 94 REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 95 96 /* DCS0/1/2/3/4/5 */ 97 REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on); 98 REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on); 99 REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on); 100 if (REG(DOMAIN19_PG_CONFIG)) 101 REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on); 102 if (REG(DOMAIN20_PG_CONFIG)) 103 REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on); 104 if (REG(DOMAIN21_PG_CONFIG)) 105 REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); 106 } 107 108 void dcn20_dccg_init(struct dce_hwseq *hws) 109 { 110 /* 111 * set MICROSECOND_TIME_BASE_DIV 112 * 100Mhz refclk -> 0x120264 113 * 27Mhz refclk -> 0x12021b 114 * 48Mhz refclk -> 0x120230 115 * 116 */ 117 REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); 118 119 /* 120 * set MILLISECOND_TIME_BASE_DIV 121 * 100Mhz refclk -> 0x1186a0 122 * 27Mhz refclk -> 0x106978 123 * 48Mhz refclk -> 0x10bb80 124 * 125 */ 126 REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); 127 128 /* This value is dependent on the hardware pipeline delay so set once per SOC */ 129 REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); 130 } 131 void dcn20_display_init(struct dc *dc) 132 { 133 struct dce_hwseq *hws = dc->hwseq; 134 135 /* RBBMIF 136 * disable RBBMIF timeout detection for all clients 137 * Ensure RBBMIF does not drop register accesses due to the per-client timeout 138 */ 139 REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); 140 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 141 142 /* DCCG */ 143 dcn20_dccg_init(hws); 144 145 REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0); 146 147 /* DCHUB/MMHUBBUB 148 * set global timer refclk divider 149 * 100Mhz refclk -> 2 150 * 27Mhz refclk -> 1 151 * 48Mhz refclk -> 1 152 */ 153 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); 154 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 155 REG_WRITE(REFCLK_CNTL, 0); 156 157 /* OPTC 158 * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc 159 */ 160 161 /* AZ 162 * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, 163 * if not, it should be programmed according to the ref clock 164 */ 165 REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); 166 /* Enable controller clock gating */ 167 REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); 168 } 169 170 void dcn20_disable_vga( 171 struct dce_hwseq *hws) 172 { 173 REG_WRITE(D1VGA_CONTROL, 0); 174 REG_WRITE(D2VGA_CONTROL, 0); 175 REG_WRITE(D3VGA_CONTROL, 0); 176 REG_WRITE(D4VGA_CONTROL, 0); 177 REG_WRITE(D5VGA_CONTROL, 0); 178 REG_WRITE(D6VGA_CONTROL, 0); 179 } 180 181 void dcn20_program_tripleBuffer( 182 const struct dc *dc, 183 struct pipe_ctx *pipe_ctx, 184 bool enableTripleBuffer) 185 { 186 if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { 187 pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( 188 pipe_ctx->plane_res.hubp, 189 enableTripleBuffer); 190 } 191 } 192 193 /* Blank pixel data during initialization */ 194 void dcn20_init_blank( 195 struct dc *dc, 196 struct timing_generator *tg) 197 { 198 enum dc_color_space color_space; 199 struct tg_color black_color = {0}; 200 struct output_pixel_processor *opp = NULL; 201 struct output_pixel_processor *bottom_opp = NULL; 202 uint32_t num_opps, opp_id_src0, opp_id_src1; 203 uint32_t otg_active_width, otg_active_height; 204 205 /* program opp dpg blank color */ 206 color_space = COLOR_SPACE_SRGB; 207 color_space_to_black_color(dc, color_space, &black_color); 208 209 /* get the OTG active size */ 210 tg->funcs->get_otg_active_size(tg, 211 &otg_active_width, 212 &otg_active_height); 213 214 /* get the OPTC source */ 215 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 216 ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); 217 opp = dc->res_pool->opps[opp_id_src0]; 218 219 if (num_opps == 2) { 220 otg_active_width = otg_active_width / 2; 221 ASSERT(opp_id_src1 < dc->res_pool->res_cap->num_opp); 222 bottom_opp = dc->res_pool->opps[opp_id_src1]; 223 } 224 225 opp->funcs->opp_set_disp_pattern_generator( 226 opp, 227 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 228 COLOR_DEPTH_UNDEFINED, 229 &black_color, 230 otg_active_width, 231 otg_active_height); 232 233 if (num_opps == 2) { 234 bottom_opp->funcs->opp_set_disp_pattern_generator( 235 bottom_opp, 236 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 237 COLOR_DEPTH_UNDEFINED, 238 &black_color, 239 otg_active_width, 240 otg_active_height); 241 } 242 243 dcn20_hwss_wait_for_blank_complete(opp); 244 } 245 246 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 247 static void dcn20_dsc_pg_control( 248 struct dce_hwseq *hws, 249 unsigned int dsc_inst, 250 bool power_on) 251 { 252 uint32_t power_gate = power_on ? 0 : 1; 253 uint32_t pwr_status = power_on ? 0 : 2; 254 uint32_t org_ip_request_cntl = 0; 255 256 if (hws->ctx->dc->debug.disable_dsc_power_gate) 257 return; 258 259 if (REG(DOMAIN16_PG_CONFIG) == 0) 260 return; 261 262 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 263 if (org_ip_request_cntl == 0) 264 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 265 266 switch (dsc_inst) { 267 case 0: /* DSC0 */ 268 REG_UPDATE(DOMAIN16_PG_CONFIG, 269 DOMAIN16_POWER_GATE, power_gate); 270 271 REG_WAIT(DOMAIN16_PG_STATUS, 272 DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 273 1, 1000); 274 break; 275 case 1: /* DSC1 */ 276 REG_UPDATE(DOMAIN17_PG_CONFIG, 277 DOMAIN17_POWER_GATE, power_gate); 278 279 REG_WAIT(DOMAIN17_PG_STATUS, 280 DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 281 1, 1000); 282 break; 283 case 2: /* DSC2 */ 284 REG_UPDATE(DOMAIN18_PG_CONFIG, 285 DOMAIN18_POWER_GATE, power_gate); 286 287 REG_WAIT(DOMAIN18_PG_STATUS, 288 DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 289 1, 1000); 290 break; 291 case 3: /* DSC3 */ 292 REG_UPDATE(DOMAIN19_PG_CONFIG, 293 DOMAIN19_POWER_GATE, power_gate); 294 295 REG_WAIT(DOMAIN19_PG_STATUS, 296 DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 297 1, 1000); 298 break; 299 case 4: /* DSC4 */ 300 REG_UPDATE(DOMAIN20_PG_CONFIG, 301 DOMAIN20_POWER_GATE, power_gate); 302 303 REG_WAIT(DOMAIN20_PG_STATUS, 304 DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 305 1, 1000); 306 break; 307 case 5: /* DSC5 */ 308 REG_UPDATE(DOMAIN21_PG_CONFIG, 309 DOMAIN21_POWER_GATE, power_gate); 310 311 REG_WAIT(DOMAIN21_PG_STATUS, 312 DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 313 1, 1000); 314 break; 315 default: 316 BREAK_TO_DEBUGGER(); 317 break; 318 } 319 320 if (org_ip_request_cntl == 0) 321 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); 322 } 323 #endif 324 325 static void dcn20_dpp_pg_control( 326 struct dce_hwseq *hws, 327 unsigned int dpp_inst, 328 bool power_on) 329 { 330 uint32_t power_gate = power_on ? 0 : 1; 331 uint32_t pwr_status = power_on ? 0 : 2; 332 333 if (hws->ctx->dc->debug.disable_dpp_power_gate) 334 return; 335 if (REG(DOMAIN1_PG_CONFIG) == 0) 336 return; 337 338 switch (dpp_inst) { 339 case 0: /* DPP0 */ 340 REG_UPDATE(DOMAIN1_PG_CONFIG, 341 DOMAIN1_POWER_GATE, power_gate); 342 343 REG_WAIT(DOMAIN1_PG_STATUS, 344 DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 345 1, 1000); 346 break; 347 case 1: /* DPP1 */ 348 REG_UPDATE(DOMAIN3_PG_CONFIG, 349 DOMAIN3_POWER_GATE, power_gate); 350 351 REG_WAIT(DOMAIN3_PG_STATUS, 352 DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 353 1, 1000); 354 break; 355 case 2: /* DPP2 */ 356 REG_UPDATE(DOMAIN5_PG_CONFIG, 357 DOMAIN5_POWER_GATE, power_gate); 358 359 REG_WAIT(DOMAIN5_PG_STATUS, 360 DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 361 1, 1000); 362 break; 363 case 3: /* DPP3 */ 364 REG_UPDATE(DOMAIN7_PG_CONFIG, 365 DOMAIN7_POWER_GATE, power_gate); 366 367 REG_WAIT(DOMAIN7_PG_STATUS, 368 DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 369 1, 1000); 370 break; 371 case 4: /* DPP4 */ 372 REG_UPDATE(DOMAIN9_PG_CONFIG, 373 DOMAIN9_POWER_GATE, power_gate); 374 375 REG_WAIT(DOMAIN9_PG_STATUS, 376 DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 377 1, 1000); 378 break; 379 case 5: /* DPP5 */ 380 /* 381 * Do not power gate DPP5, should be left at HW default, power on permanently. 382 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 383 * reset. 384 * REG_UPDATE(DOMAIN11_PG_CONFIG, 385 * DOMAIN11_POWER_GATE, power_gate); 386 * 387 * REG_WAIT(DOMAIN11_PG_STATUS, 388 * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, 389 * 1, 1000); 390 */ 391 break; 392 default: 393 BREAK_TO_DEBUGGER(); 394 break; 395 } 396 } 397 398 399 static void dcn20_hubp_pg_control( 400 struct dce_hwseq *hws, 401 unsigned int hubp_inst, 402 bool power_on) 403 { 404 uint32_t power_gate = power_on ? 0 : 1; 405 uint32_t pwr_status = power_on ? 0 : 2; 406 407 if (hws->ctx->dc->debug.disable_hubp_power_gate) 408 return; 409 if (REG(DOMAIN0_PG_CONFIG) == 0) 410 return; 411 412 switch (hubp_inst) { 413 case 0: /* DCHUBP0 */ 414 REG_UPDATE(DOMAIN0_PG_CONFIG, 415 DOMAIN0_POWER_GATE, power_gate); 416 417 REG_WAIT(DOMAIN0_PG_STATUS, 418 DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 419 1, 1000); 420 break; 421 case 1: /* DCHUBP1 */ 422 REG_UPDATE(DOMAIN2_PG_CONFIG, 423 DOMAIN2_POWER_GATE, power_gate); 424 425 REG_WAIT(DOMAIN2_PG_STATUS, 426 DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 427 1, 1000); 428 break; 429 case 2: /* DCHUBP2 */ 430 REG_UPDATE(DOMAIN4_PG_CONFIG, 431 DOMAIN4_POWER_GATE, power_gate); 432 433 REG_WAIT(DOMAIN4_PG_STATUS, 434 DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 435 1, 1000); 436 break; 437 case 3: /* DCHUBP3 */ 438 REG_UPDATE(DOMAIN6_PG_CONFIG, 439 DOMAIN6_POWER_GATE, power_gate); 440 441 REG_WAIT(DOMAIN6_PG_STATUS, 442 DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 443 1, 1000); 444 break; 445 case 4: /* DCHUBP4 */ 446 REG_UPDATE(DOMAIN8_PG_CONFIG, 447 DOMAIN8_POWER_GATE, power_gate); 448 449 REG_WAIT(DOMAIN8_PG_STATUS, 450 DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 451 1, 1000); 452 break; 453 case 5: /* DCHUBP5 */ 454 /* 455 * Do not power gate DCHUB5, should be left at HW default, power on permanently. 456 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 457 * reset. 458 * REG_UPDATE(DOMAIN10_PG_CONFIG, 459 * DOMAIN10_POWER_GATE, power_gate); 460 * 461 * REG_WAIT(DOMAIN10_PG_STATUS, 462 * DOMAIN10_PGFSM_PWR_STATUS, pwr_status, 463 * 1, 1000); 464 */ 465 break; 466 default: 467 BREAK_TO_DEBUGGER(); 468 break; 469 } 470 } 471 472 473 /* disable HW used by plane. 474 * note: cannot disable until disconnect is complete 475 */ 476 static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 477 { 478 struct hubp *hubp = pipe_ctx->plane_res.hubp; 479 struct dpp *dpp = pipe_ctx->plane_res.dpp; 480 481 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 482 483 /* In flip immediate with pipe splitting case GSL is used for 484 * synchronization so we must disable it when the plane is disabled. 485 */ 486 if (pipe_ctx->stream_res.gsl_group != 0) 487 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); 488 489 dc->hwss.set_flip_control_gsl(pipe_ctx, false); 490 491 hubp->funcs->hubp_clk_cntl(hubp, false); 492 493 dpp->funcs->dpp_dppclk_control(dpp, false, false); 494 495 hubp->power_gated = true; 496 dc->optimized_required = false; /* We're powering off, no need to optimize */ 497 498 dc->hwss.plane_atomic_power_down(dc, 499 pipe_ctx->plane_res.dpp, 500 pipe_ctx->plane_res.hubp); 501 502 pipe_ctx->stream = NULL; 503 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 504 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); 505 pipe_ctx->top_pipe = NULL; 506 pipe_ctx->bottom_pipe = NULL; 507 pipe_ctx->plane_state = NULL; 508 } 509 510 511 void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 512 { 513 DC_LOGGER_INIT(dc->ctx->logger); 514 515 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 516 return; 517 518 dcn20_plane_atomic_disable(dc, pipe_ctx); 519 520 DC_LOG_DC("Power down front end %d\n", 521 pipe_ctx->pipe_idx); 522 } 523 524 enum dc_status dcn20_enable_stream_timing( 525 struct pipe_ctx *pipe_ctx, 526 struct dc_state *context, 527 struct dc *dc) 528 { 529 struct dc_stream_state *stream = pipe_ctx->stream; 530 struct drr_params params = {0}; 531 unsigned int event_triggers = 0; 532 struct pipe_ctx *odm_pipe; 533 int opp_cnt = 1; 534 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 535 536 /* by upper caller loop, pipe0 is parent pipe and be called first. 537 * back end is set up by for pipe0. Other children pipe share back end 538 * with pipe 0. No program is needed. 539 */ 540 if (pipe_ctx->top_pipe != NULL) 541 return DC_OK; 542 543 /* TODO check if timing_changed, disable stream if timing changed */ 544 545 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 546 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 547 opp_cnt++; 548 } 549 550 if (opp_cnt > 1) 551 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 552 pipe_ctx->stream_res.tg, 553 opp_inst, opp_cnt, 554 &pipe_ctx->stream->timing); 555 556 /* HW program guide assume display already disable 557 * by unplug sequence. OTG assume stop. 558 */ 559 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 560 561 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 562 pipe_ctx->clock_source, 563 &pipe_ctx->stream_res.pix_clk_params, 564 &pipe_ctx->pll_settings)) { 565 BREAK_TO_DEBUGGER(); 566 return DC_ERROR_UNEXPECTED; 567 } 568 569 pipe_ctx->stream_res.tg->funcs->program_timing( 570 pipe_ctx->stream_res.tg, 571 &stream->timing, 572 pipe_ctx->pipe_dlg_param.vready_offset, 573 pipe_ctx->pipe_dlg_param.vstartup_start, 574 pipe_ctx->pipe_dlg_param.vupdate_offset, 575 pipe_ctx->pipe_dlg_param.vupdate_width, 576 pipe_ctx->stream->signal, 577 true); 578 579 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 580 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( 581 odm_pipe->stream_res.opp, 582 true); 583 584 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 585 pipe_ctx->stream_res.opp, 586 true); 587 588 dc->hwss.blank_pixel_data(dc, pipe_ctx, true); 589 590 /* VTG is within DCHUB command block. DCFCLK is always on */ 591 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 592 BREAK_TO_DEBUGGER(); 593 return DC_ERROR_UNEXPECTED; 594 } 595 596 dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp); 597 598 params.vertical_total_min = stream->adjust.v_total_min; 599 params.vertical_total_max = stream->adjust.v_total_max; 600 params.vertical_total_mid = stream->adjust.v_total_mid; 601 params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; 602 if (pipe_ctx->stream_res.tg->funcs->set_drr) 603 pipe_ctx->stream_res.tg->funcs->set_drr( 604 pipe_ctx->stream_res.tg, ¶ms); 605 606 // DRR should set trigger event to monitor surface update event 607 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) 608 event_triggers = 0x80; 609 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) 610 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 611 pipe_ctx->stream_res.tg, event_triggers); 612 613 /* TODO program crtc source select for non-virtual signal*/ 614 /* TODO program FMT */ 615 /* TODO setup link_enc */ 616 /* TODO set stream attributes */ 617 /* TODO program audio */ 618 /* TODO enable stream if timing changed */ 619 /* TODO unblank stream if DP */ 620 621 return DC_OK; 622 } 623 624 void dcn20_program_output_csc(struct dc *dc, 625 struct pipe_ctx *pipe_ctx, 626 enum dc_color_space colorspace, 627 uint16_t *matrix, 628 int opp_id) 629 { 630 struct mpc *mpc = dc->res_pool->mpc; 631 enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A; 632 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 633 634 if (mpc->funcs->power_on_mpc_mem_pwr) 635 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 636 637 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 638 if (mpc->funcs->set_output_csc != NULL) 639 mpc->funcs->set_output_csc(mpc, 640 opp_id, 641 matrix, 642 ocsc_mode); 643 } else { 644 if (mpc->funcs->set_ocsc_default != NULL) 645 mpc->funcs->set_ocsc_default(mpc, 646 opp_id, 647 colorspace, 648 ocsc_mode); 649 } 650 } 651 652 bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, 653 const struct dc_stream_state *stream) 654 { 655 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 656 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 657 struct pwl_params *params = NULL; 658 /* 659 * program OGAM only for the top pipe 660 * if there is a pipe split then fix diagnostic is required: 661 * how to pass OGAM parameter for stream. 662 * if programming for all pipes is required then remove condition 663 * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic. 664 */ 665 if (mpc->funcs->power_on_mpc_mem_pwr) 666 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 667 if (pipe_ctx->top_pipe == NULL 668 && mpc->funcs->set_output_gamma && stream->out_transfer_func) { 669 if (stream->out_transfer_func->type == TF_TYPE_HWPWL) 670 params = &stream->out_transfer_func->pwl; 671 else if (pipe_ctx->stream->out_transfer_func->type == 672 TF_TYPE_DISTRIBUTED_POINTS && 673 cm_helper_translate_curve_to_hw_format( 674 stream->out_transfer_func, 675 &mpc->blender_params, false)) 676 params = &mpc->blender_params; 677 /* 678 * there is no ROM 679 */ 680 if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) 681 BREAK_TO_DEBUGGER(); 682 } 683 /* 684 * if above if is not executed then 'params' equal to 0 and set in bypass 685 */ 686 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 687 688 return true; 689 } 690 691 bool dcn20_set_blend_lut( 692 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 693 { 694 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 695 bool result = true; 696 struct pwl_params *blend_lut = NULL; 697 698 if (plane_state->blend_tf) { 699 if (plane_state->blend_tf->type == TF_TYPE_HWPWL) 700 blend_lut = &plane_state->blend_tf->pwl; 701 else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 702 cm_helper_translate_curve_to_hw_format( 703 plane_state->blend_tf, 704 &dpp_base->regamma_params, false); 705 blend_lut = &dpp_base->regamma_params; 706 } 707 } 708 result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); 709 710 return result; 711 } 712 713 bool dcn20_set_shaper_3dlut( 714 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 715 { 716 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 717 bool result = true; 718 struct pwl_params *shaper_lut = NULL; 719 720 if (plane_state->in_shaper_func) { 721 if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) 722 shaper_lut = &plane_state->in_shaper_func->pwl; 723 else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 724 cm_helper_translate_curve_to_hw_format( 725 plane_state->in_shaper_func, 726 &dpp_base->shaper_params, true); 727 shaper_lut = &dpp_base->shaper_params; 728 } 729 } 730 731 result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); 732 if (plane_state->lut3d_func && 733 plane_state->lut3d_func->state.bits.initialized == 1) 734 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, 735 &plane_state->lut3d_func->lut_3d); 736 else 737 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); 738 739 if (plane_state->lut3d_func && 740 plane_state->lut3d_func->state.bits.initialized == 1 && 741 plane_state->lut3d_func->hdr_multiplier != 0) 742 dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 743 plane_state->lut3d_func->hdr_multiplier); 744 else 745 dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000); 746 747 return result; 748 } 749 750 bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, 751 const struct dc_plane_state *plane_state) 752 { 753 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 754 const struct dc_transfer_func *tf = NULL; 755 bool result = true; 756 bool use_degamma_ram = false; 757 758 if (dpp_base == NULL || plane_state == NULL) 759 return false; 760 761 dcn20_set_shaper_3dlut(pipe_ctx, plane_state); 762 dcn20_set_blend_lut(pipe_ctx, plane_state); 763 764 if (plane_state->in_transfer_func) 765 tf = plane_state->in_transfer_func; 766 767 768 if (tf == NULL) { 769 dpp_base->funcs->dpp_set_degamma(dpp_base, 770 IPP_DEGAMMA_MODE_BYPASS); 771 return true; 772 } 773 774 if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) 775 use_degamma_ram = true; 776 777 if (use_degamma_ram == true) { 778 if (tf->type == TF_TYPE_HWPWL) 779 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 780 &tf->pwl); 781 else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 782 cm_helper_translate_curve_to_degamma_hw_format(tf, 783 &dpp_base->degamma_params); 784 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 785 &dpp_base->degamma_params); 786 } 787 return true; 788 } 789 /* handle here the optimized cases when de-gamma ROM could be used. 790 * 791 */ 792 if (tf->type == TF_TYPE_PREDEFINED) { 793 switch (tf->tf) { 794 case TRANSFER_FUNCTION_SRGB: 795 dpp_base->funcs->dpp_set_degamma(dpp_base, 796 IPP_DEGAMMA_MODE_HW_sRGB); 797 break; 798 case TRANSFER_FUNCTION_BT709: 799 dpp_base->funcs->dpp_set_degamma(dpp_base, 800 IPP_DEGAMMA_MODE_HW_xvYCC); 801 break; 802 case TRANSFER_FUNCTION_LINEAR: 803 dpp_base->funcs->dpp_set_degamma(dpp_base, 804 IPP_DEGAMMA_MODE_BYPASS); 805 break; 806 case TRANSFER_FUNCTION_PQ: 807 default: 808 result = false; 809 break; 810 } 811 } else if (tf->type == TF_TYPE_BYPASS) 812 dpp_base->funcs->dpp_set_degamma(dpp_base, 813 IPP_DEGAMMA_MODE_BYPASS); 814 else { 815 /* 816 * if we are here, we did not handle correctly. 817 * fix is required for this use case 818 */ 819 BREAK_TO_DEBUGGER(); 820 dpp_base->funcs->dpp_set_degamma(dpp_base, 821 IPP_DEGAMMA_MODE_BYPASS); 822 } 823 824 return result; 825 } 826 827 static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 828 { 829 struct pipe_ctx *odm_pipe; 830 int opp_cnt = 1; 831 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 832 833 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 834 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 835 opp_cnt++; 836 } 837 838 if (opp_cnt > 1) 839 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 840 pipe_ctx->stream_res.tg, 841 opp_inst, opp_cnt, 842 &pipe_ctx->stream->timing); 843 else 844 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 845 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 846 } 847 848 void dcn20_blank_pixel_data( 849 struct dc *dc, 850 struct pipe_ctx *pipe_ctx, 851 bool blank) 852 { 853 struct tg_color black_color = {0}; 854 struct stream_resource *stream_res = &pipe_ctx->stream_res; 855 struct dc_stream_state *stream = pipe_ctx->stream; 856 enum dc_color_space color_space = stream->output_color_space; 857 enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; 858 struct pipe_ctx *odm_pipe; 859 int odm_cnt = 1; 860 861 int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; 862 int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; 863 864 /* get opp dpg blank color */ 865 color_space_to_black_color(dc, color_space, &black_color); 866 867 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 868 odm_cnt++; 869 870 width = width / odm_cnt; 871 872 if (blank) { 873 if (stream_res->abm) 874 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); 875 876 if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) 877 test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; 878 } else { 879 test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; 880 } 881 882 stream_res->opp->funcs->opp_set_disp_pattern_generator( 883 stream_res->opp, 884 test_pattern, 885 stream->timing.display_color_depth, 886 &black_color, 887 width, 888 height); 889 890 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 891 odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( 892 odm_pipe->stream_res.opp, 893 dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? 894 CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, 895 stream->timing.display_color_depth, 896 &black_color, 897 width, 898 height); 899 } 900 901 if (!blank) 902 if (stream_res->abm) { 903 stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1); 904 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); 905 } 906 } 907 908 909 static void dcn20_power_on_plane( 910 struct dce_hwseq *hws, 911 struct pipe_ctx *pipe_ctx) 912 { 913 DC_LOGGER_INIT(hws->ctx->logger); 914 if (REG(DC_IP_REQUEST_CNTL)) { 915 REG_SET(DC_IP_REQUEST_CNTL, 0, 916 IP_REQUEST_EN, 1); 917 dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); 918 dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); 919 REG_SET(DC_IP_REQUEST_CNTL, 0, 920 IP_REQUEST_EN, 0); 921 DC_LOG_DEBUG( 922 "Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst); 923 } 924 } 925 926 void dcn20_enable_plane( 927 struct dc *dc, 928 struct pipe_ctx *pipe_ctx, 929 struct dc_state *context) 930 { 931 //if (dc->debug.sanity_checks) { 932 // dcn10_verify_allow_pstate_change_high(dc); 933 //} 934 dcn20_power_on_plane(dc->hwseq, pipe_ctx); 935 936 /* enable DCFCLK current DCHUB */ 937 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); 938 939 /* initialize HUBP on power up */ 940 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); 941 942 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 943 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 944 pipe_ctx->stream_res.opp, 945 true); 946 947 /* TODO: enable/disable in dm as per update type. 948 if (plane_state) { 949 DC_LOG_DC(dc->ctx->logger, 950 "Pipe:%d 0x%x: addr hi:0x%x, " 951 "addr low:0x%x, " 952 "src: %d, %d, %d," 953 " %d; dst: %d, %d, %d, %d;\n", 954 pipe_ctx->pipe_idx, 955 plane_state, 956 plane_state->address.grph.addr.high_part, 957 plane_state->address.grph.addr.low_part, 958 plane_state->src_rect.x, 959 plane_state->src_rect.y, 960 plane_state->src_rect.width, 961 plane_state->src_rect.height, 962 plane_state->dst_rect.x, 963 plane_state->dst_rect.y, 964 plane_state->dst_rect.width, 965 plane_state->dst_rect.height); 966 967 DC_LOG_DC(dc->ctx->logger, 968 "Pipe %d: width, height, x, y format:%d\n" 969 "viewport:%d, %d, %d, %d\n" 970 "recout: %d, %d, %d, %d\n", 971 pipe_ctx->pipe_idx, 972 plane_state->format, 973 pipe_ctx->plane_res.scl_data.viewport.width, 974 pipe_ctx->plane_res.scl_data.viewport.height, 975 pipe_ctx->plane_res.scl_data.viewport.x, 976 pipe_ctx->plane_res.scl_data.viewport.y, 977 pipe_ctx->plane_res.scl_data.recout.width, 978 pipe_ctx->plane_res.scl_data.recout.height, 979 pipe_ctx->plane_res.scl_data.recout.x, 980 pipe_ctx->plane_res.scl_data.recout.y); 981 print_rq_dlg_ttu(dc, pipe_ctx); 982 } 983 */ 984 if (dc->vm_pa_config.valid) { 985 struct vm_system_aperture_param apt; 986 987 apt.sys_default.quad_part = 0; 988 989 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; 990 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; 991 992 // Program system aperture settings 993 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); 994 } 995 996 // if (dc->debug.sanity_checks) { 997 // dcn10_verify_allow_pstate_change_high(dc); 998 // } 999 } 1000 1001 1002 void dcn20_pipe_control_lock_global( 1003 struct dc *dc, 1004 struct pipe_ctx *pipe, 1005 bool lock) 1006 { 1007 if (lock) { 1008 pipe->stream_res.tg->funcs->lock_doublebuffer_enable( 1009 pipe->stream_res.tg); 1010 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1011 } else { 1012 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1013 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1014 CRTC_STATE_VACTIVE); 1015 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1016 CRTC_STATE_VBLANK); 1017 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1018 CRTC_STATE_VACTIVE); 1019 pipe->stream_res.tg->funcs->lock_doublebuffer_disable( 1020 pipe->stream_res.tg); 1021 } 1022 } 1023 1024 void dcn20_pipe_control_lock( 1025 struct dc *dc, 1026 struct pipe_ctx *pipe, 1027 bool lock) 1028 { 1029 bool flip_immediate = false; 1030 1031 /* use TG master update lock to lock everything on the TG 1032 * therefore only top pipe need to lock 1033 */ 1034 if (pipe->top_pipe) 1035 return; 1036 1037 if (pipe->plane_state != NULL) 1038 flip_immediate = pipe->plane_state->flip_immediate; 1039 1040 if (flip_immediate && lock) { 1041 const int TIMEOUT_FOR_FLIP_PENDING = 100000; 1042 int i; 1043 1044 for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { 1045 if (!pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) 1046 break; 1047 udelay(1); 1048 } 1049 1050 if (pipe->bottom_pipe != NULL) { 1051 for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { 1052 if (!pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) 1053 break; 1054 udelay(1); 1055 } 1056 } 1057 } 1058 1059 /* In flip immediate and pipe splitting case, we need to use GSL 1060 * for synchronization. Only do setup on locking and on flip type change. 1061 */ 1062 if (lock && pipe->bottom_pipe != NULL) 1063 if ((flip_immediate && pipe->stream_res.gsl_group == 0) || 1064 (!flip_immediate && pipe->stream_res.gsl_group > 0)) 1065 dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate); 1066 1067 if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) { 1068 if (lock) 1069 pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); 1070 else 1071 pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg); 1072 } else { 1073 if (lock) 1074 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1075 else 1076 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1077 } 1078 } 1079 1080 static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) 1081 { 1082 new_pipe->update_flags.raw = 0; 1083 1084 /* Exit on unchanged, unused pipe */ 1085 if (!old_pipe->plane_state && !new_pipe->plane_state) 1086 return; 1087 /* Detect pipe enable/disable */ 1088 if (!old_pipe->plane_state && new_pipe->plane_state) { 1089 new_pipe->update_flags.bits.enable = 1; 1090 new_pipe->update_flags.bits.mpcc = 1; 1091 new_pipe->update_flags.bits.dppclk = 1; 1092 new_pipe->update_flags.bits.hubp_interdependent = 1; 1093 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1094 new_pipe->update_flags.bits.gamut_remap = 1; 1095 new_pipe->update_flags.bits.scaler = 1; 1096 new_pipe->update_flags.bits.viewport = 1; 1097 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1098 new_pipe->update_flags.bits.odm = 1; 1099 new_pipe->update_flags.bits.global_sync = 1; 1100 } 1101 return; 1102 } 1103 if (old_pipe->plane_state && !new_pipe->plane_state) { 1104 new_pipe->update_flags.bits.disable = 1; 1105 return; 1106 } 1107 1108 /* Detect top pipe only changes */ 1109 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1110 /* Detect odm changes */ 1111 if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe 1112 && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) 1113 || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) 1114 || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) 1115 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1116 new_pipe->update_flags.bits.odm = 1; 1117 1118 /* Detect global sync changes */ 1119 if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset 1120 || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start 1121 || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset 1122 || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) 1123 new_pipe->update_flags.bits.global_sync = 1; 1124 } 1125 1126 /* 1127 * Detect opp / tg change, only set on change, not on enable 1128 * Assume mpcc inst = pipe index, if not this code needs to be updated 1129 * since mpcc is what is affected by these. In fact all of our sequence 1130 * makes this assumption at the moment with how hubp reset is matched to 1131 * same index mpcc reset. 1132 */ 1133 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1134 new_pipe->update_flags.bits.opp_changed = 1; 1135 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) 1136 new_pipe->update_flags.bits.tg_changed = 1; 1137 1138 /* Detect mpcc blending changes, only dpp inst and bot matter here */ 1139 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp 1140 || old_pipe->stream_res.opp != new_pipe->stream_res.opp 1141 || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe) 1142 || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe) 1143 || (old_pipe->bottom_pipe && new_pipe->bottom_pipe 1144 && old_pipe->bottom_pipe->plane_res.mpcc_inst 1145 != new_pipe->bottom_pipe->plane_res.mpcc_inst)) 1146 new_pipe->update_flags.bits.mpcc = 1; 1147 1148 /* Detect dppclk change */ 1149 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) 1150 new_pipe->update_flags.bits.dppclk = 1; 1151 1152 /* Check for scl update */ 1153 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) 1154 new_pipe->update_flags.bits.scaler = 1; 1155 /* Check for vp update */ 1156 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) 1157 || memcmp(&old_pipe->plane_res.scl_data.viewport_c, 1158 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) 1159 new_pipe->update_flags.bits.viewport = 1; 1160 1161 /* Detect dlg/ttu/rq updates */ 1162 { 1163 struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; 1164 struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; 1165 struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; 1166 struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; 1167 1168 /* Detect pipe interdependent updates */ 1169 if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || 1170 old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || 1171 old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || 1172 old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || 1173 old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || 1174 old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || 1175 old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || 1176 old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || 1177 old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || 1178 old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || 1179 old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || 1180 old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || 1181 old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || 1182 old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || 1183 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || 1184 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || 1185 old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || 1186 old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { 1187 old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; 1188 old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; 1189 old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; 1190 old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; 1191 old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; 1192 old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; 1193 old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; 1194 old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; 1195 old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; 1196 old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; 1197 old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; 1198 old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; 1199 old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; 1200 old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; 1201 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; 1202 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; 1203 old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; 1204 old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; 1205 new_pipe->update_flags.bits.hubp_interdependent = 1; 1206 } 1207 /* Detect any other updates to ttu/rq/dlg */ 1208 if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || 1209 memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || 1210 memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) 1211 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1212 } 1213 } 1214 1215 static void dcn20_update_dchubp_dpp( 1216 struct dc *dc, 1217 struct pipe_ctx *pipe_ctx, 1218 struct dc_state *context) 1219 { 1220 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1221 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1222 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1223 1224 if (pipe_ctx->update_flags.bits.dppclk) 1225 dpp->funcs->dpp_dppclk_control(dpp, false, true); 1226 1227 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1228 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. 1229 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG 1230 */ 1231 if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { 1232 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); 1233 1234 hubp->funcs->hubp_setup( 1235 hubp, 1236 &pipe_ctx->dlg_regs, 1237 &pipe_ctx->ttu_regs, 1238 &pipe_ctx->rq_regs, 1239 &pipe_ctx->pipe_dlg_param); 1240 } 1241 if (pipe_ctx->update_flags.bits.hubp_interdependent) 1242 hubp->funcs->hubp_setup_interdependent( 1243 hubp, 1244 &pipe_ctx->dlg_regs, 1245 &pipe_ctx->ttu_regs); 1246 1247 if (pipe_ctx->update_flags.bits.enable || 1248 plane_state->update_flags.bits.bpp_change || 1249 plane_state->update_flags.bits.input_csc_change || 1250 plane_state->update_flags.bits.color_space_change || 1251 plane_state->update_flags.bits.coeff_reduction_change) { 1252 struct dc_bias_and_scale bns_params = {0}; 1253 1254 // program the input csc 1255 dpp->funcs->dpp_setup(dpp, 1256 plane_state->format, 1257 EXPANSION_MODE_ZERO, 1258 plane_state->input_csc_color_matrix, 1259 plane_state->color_space, 1260 NULL); 1261 1262 if (dpp->funcs->dpp_program_bias_and_scale) { 1263 //TODO :for CNVC set scale and bias registers if necessary 1264 dcn10_build_prescale_params(&bns_params, plane_state); 1265 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 1266 } 1267 } 1268 1269 if (pipe_ctx->update_flags.bits.mpcc 1270 || plane_state->update_flags.bits.global_alpha_change 1271 || plane_state->update_flags.bits.per_pixel_alpha_change) { 1272 /* Need mpcc to be idle if changing opp */ 1273 if (pipe_ctx->update_flags.bits.opp_changed) { 1274 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; 1275 int mpcc_inst; 1276 1277 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { 1278 if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) 1279 continue; 1280 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); 1281 old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; 1282 } 1283 } 1284 dc->hwss.update_mpcc(dc, pipe_ctx); 1285 } 1286 1287 if (pipe_ctx->update_flags.bits.scaler || 1288 plane_state->update_flags.bits.scaling_change || 1289 plane_state->update_flags.bits.position_change || 1290 plane_state->update_flags.bits.per_pixel_alpha_change || 1291 pipe_ctx->stream->update_flags.bits.scaling) { 1292 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; 1293 ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); 1294 /* scaler configuration */ 1295 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( 1296 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); 1297 } 1298 1299 if (pipe_ctx->update_flags.bits.viewport || 1300 (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || 1301 (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) 1302 hubp->funcs->mem_program_viewport( 1303 hubp, 1304 &pipe_ctx->plane_res.scl_data.viewport, 1305 &pipe_ctx->plane_res.scl_data.viewport_c); 1306 1307 /* Any updates are handled in dc interface, just need to apply existing for plane enable */ 1308 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) 1309 && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { 1310 dc->hwss.set_cursor_position(pipe_ctx); 1311 dc->hwss.set_cursor_attribute(pipe_ctx); 1312 1313 if (dc->hwss.set_cursor_sdr_white_level) 1314 dc->hwss.set_cursor_sdr_white_level(pipe_ctx); 1315 } 1316 1317 /* Any updates are handled in dc interface, just need 1318 * to apply existing for plane enable / opp change */ 1319 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed 1320 || pipe_ctx->stream->update_flags.bits.gamut_remap 1321 || pipe_ctx->stream->update_flags.bits.out_csc) { 1322 /* dpp/cm gamut remap*/ 1323 dc->hwss.program_gamut_remap(pipe_ctx); 1324 1325 /*call the dcn2 method which uses mpc csc*/ 1326 dc->hwss.program_output_csc(dc, 1327 pipe_ctx, 1328 pipe_ctx->stream->output_color_space, 1329 pipe_ctx->stream->csc_color_matrix.matrix, 1330 hubp->opp_id); 1331 } 1332 1333 if (pipe_ctx->update_flags.bits.enable || 1334 pipe_ctx->update_flags.bits.opp_changed || 1335 plane_state->update_flags.bits.pixel_format_change || 1336 plane_state->update_flags.bits.horizontal_mirror_change || 1337 plane_state->update_flags.bits.rotation_change || 1338 plane_state->update_flags.bits.swizzle_change || 1339 plane_state->update_flags.bits.dcc_change || 1340 plane_state->update_flags.bits.bpp_change || 1341 plane_state->update_flags.bits.scaling_change || 1342 plane_state->update_flags.bits.plane_size_change) { 1343 struct plane_size size = plane_state->plane_size; 1344 1345 size.surface_size = pipe_ctx->plane_res.scl_data.viewport; 1346 hubp->funcs->hubp_program_surface_config( 1347 hubp, 1348 plane_state->format, 1349 &plane_state->tiling_info, 1350 &size, 1351 plane_state->rotation, 1352 &plane_state->dcc, 1353 plane_state->horizontal_mirror, 1354 0); 1355 hubp->power_gated = false; 1356 } 1357 1358 if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) 1359 dc->hwss.update_plane_addr(dc, pipe_ctx); 1360 1361 if (pipe_ctx->update_flags.bits.enable) 1362 hubp->funcs->set_blank(hubp, false); 1363 } 1364 1365 1366 static void dcn20_program_pipe( 1367 struct dc *dc, 1368 struct pipe_ctx *pipe_ctx, 1369 struct dc_state *context) 1370 { 1371 /* Only need to unblank on top pipe */ 1372 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) 1373 && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) 1374 dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); 1375 1376 if (pipe_ctx->update_flags.bits.global_sync) { 1377 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1378 pipe_ctx->stream_res.tg, 1379 pipe_ctx->pipe_dlg_param.vready_offset, 1380 pipe_ctx->pipe_dlg_param.vstartup_start, 1381 pipe_ctx->pipe_dlg_param.vupdate_offset, 1382 pipe_ctx->pipe_dlg_param.vupdate_width); 1383 1384 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1385 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1386 1387 if (dc->hwss.setup_vupdate_interrupt) 1388 dc->hwss.setup_vupdate_interrupt(pipe_ctx); 1389 } 1390 1391 if (pipe_ctx->update_flags.bits.odm) 1392 dc->hwss.update_odm(dc, context, pipe_ctx); 1393 1394 if (pipe_ctx->update_flags.bits.enable) 1395 dcn20_enable_plane(dc, pipe_ctx, context); 1396 1397 if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) 1398 dcn20_update_dchubp_dpp(dc, pipe_ctx, context); 1399 1400 if (pipe_ctx->update_flags.bits.enable 1401 || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) 1402 set_hdr_multiplier(pipe_ctx); 1403 1404 if (pipe_ctx->update_flags.bits.enable || 1405 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1406 pipe_ctx->plane_state->update_flags.bits.gamma_change) 1407 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); 1408 1409 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 1410 * only do gamma programming for powering on, internal memcmp to avoid 1411 * updating on slave planes 1412 */ 1413 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) 1414 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); 1415 1416 /* If the pipe has been enabled or has a different opp, we 1417 * should reprogram the fmt. This deals with cases where 1418 * interation between mpc and odm combine on different streams 1419 * causes a different pipe to be chosen to odm combine with. 1420 */ 1421 if (pipe_ctx->update_flags.bits.enable 1422 || pipe_ctx->update_flags.bits.opp_changed) { 1423 1424 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 1425 pipe_ctx->stream_res.opp, 1426 COLOR_SPACE_YCBCR601, 1427 pipe_ctx->stream->timing.display_color_depth, 1428 pipe_ctx->stream->signal); 1429 1430 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 1431 pipe_ctx->stream_res.opp, 1432 &pipe_ctx->stream->bit_depth_params, 1433 &pipe_ctx->stream->clamping); 1434 } 1435 } 1436 1437 static bool does_pipe_need_lock(struct pipe_ctx *pipe) 1438 { 1439 if ((pipe->plane_state && pipe->plane_state->update_flags.raw) 1440 || pipe->update_flags.raw) 1441 return true; 1442 if (pipe->bottom_pipe) 1443 return does_pipe_need_lock(pipe->bottom_pipe); 1444 1445 return false; 1446 } 1447 1448 static void dcn20_program_front_end_for_ctx( 1449 struct dc *dc, 1450 struct dc_state *context) 1451 { 1452 const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; 1453 int i; 1454 bool pipe_locked[MAX_PIPES] = {false}; 1455 DC_LOGGER_INIT(dc->ctx->logger); 1456 1457 /* Carry over GSL groups in case the context is changing. */ 1458 for (i = 0; i < dc->res_pool->pipe_count; i++) 1459 if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) 1460 context->res_ctx.pipe_ctx[i].stream_res.gsl_group = 1461 dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; 1462 1463 /* Set pipe update flags and lock pipes */ 1464 for (i = 0; i < dc->res_pool->pipe_count; i++) 1465 dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], 1466 &context->res_ctx.pipe_ctx[i]); 1467 for (i = 0; i < dc->res_pool->pipe_count; i++) 1468 if (!context->res_ctx.pipe_ctx[i].top_pipe && 1469 does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { 1470 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1471 1472 if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) 1473 dc->hwss.pipe_control_lock(dc, pipe_ctx, true); 1474 if (!pipe_ctx->update_flags.bits.enable) 1475 dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); 1476 pipe_locked[i] = true; 1477 } 1478 1479 /* OTG blank before disabling all front ends */ 1480 for (i = 0; i < dc->res_pool->pipe_count; i++) 1481 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1482 && !context->res_ctx.pipe_ctx[i].top_pipe 1483 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe 1484 && context->res_ctx.pipe_ctx[i].stream) 1485 dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 1486 1487 /* Disconnect mpcc */ 1488 for (i = 0; i < dc->res_pool->pipe_count; i++) 1489 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1490 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { 1491 dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1492 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); 1493 } 1494 1495 /* 1496 * Program all updated pipes, order matters for mpcc setup. Start with 1497 * top pipe and program all pipes that follow in order 1498 */ 1499 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1500 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1501 1502 if (pipe->plane_state && !pipe->top_pipe) { 1503 while (pipe) { 1504 dcn20_program_pipe(dc, pipe, context); 1505 pipe = pipe->bottom_pipe; 1506 } 1507 /* Program secondary blending tree and writeback pipes */ 1508 pipe = &context->res_ctx.pipe_ctx[i]; 1509 if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 1510 && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) 1511 && dc->hwss.program_all_writeback_pipes_in_tree) 1512 dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); 1513 } 1514 } 1515 1516 /* Unlock all locked pipes */ 1517 for (i = 0; i < dc->res_pool->pipe_count; i++) 1518 if (pipe_locked[i]) { 1519 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1520 1521 if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) 1522 dc->hwss.pipe_control_lock(dc, pipe_ctx, false); 1523 if (!pipe_ctx->update_flags.bits.enable) 1524 dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); 1525 } 1526 1527 for (i = 0; i < dc->res_pool->pipe_count; i++) 1528 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 1529 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1530 1531 /* 1532 * If we are enabling a pipe, we need to wait for pending clear as this is a critical 1533 * part of the enable operation otherwise, DM may request an immediate flip which 1534 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which 1535 * is unsupported on DCN. 1536 */ 1537 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1538 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1539 1540 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { 1541 struct hubp *hubp = pipe->plane_res.hubp; 1542 int j = 0; 1543 1544 for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS 1545 && hubp->funcs->hubp_is_flip_pending(hubp); j++) 1546 msleep(1); 1547 } 1548 } 1549 1550 /* WA to apply WM setting*/ 1551 if (dc->hwseq->wa.DEGVIDCN21) 1552 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); 1553 } 1554 1555 1556 void dcn20_prepare_bandwidth( 1557 struct dc *dc, 1558 struct dc_state *context) 1559 { 1560 struct hubbub *hubbub = dc->res_pool->hubbub; 1561 1562 dc->clk_mgr->funcs->update_clocks( 1563 dc->clk_mgr, 1564 context, 1565 false); 1566 1567 /* program dchubbub watermarks */ 1568 hubbub->funcs->program_watermarks(hubbub, 1569 &context->bw_ctx.bw.dcn.watermarks, 1570 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1571 false); 1572 } 1573 1574 void dcn20_optimize_bandwidth( 1575 struct dc *dc, 1576 struct dc_state *context) 1577 { 1578 struct hubbub *hubbub = dc->res_pool->hubbub; 1579 1580 /* program dchubbub watermarks */ 1581 hubbub->funcs->program_watermarks(hubbub, 1582 &context->bw_ctx.bw.dcn.watermarks, 1583 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1584 true); 1585 1586 dc->clk_mgr->funcs->update_clocks( 1587 dc->clk_mgr, 1588 context, 1589 true); 1590 } 1591 1592 bool dcn20_update_bandwidth( 1593 struct dc *dc, 1594 struct dc_state *context) 1595 { 1596 int i; 1597 1598 /* recalculate DML parameters */ 1599 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) 1600 return false; 1601 1602 /* apply updated bandwidth parameters */ 1603 dc->hwss.prepare_bandwidth(dc, context); 1604 1605 /* update hubp configs for all pipes */ 1606 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1607 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1608 1609 if (pipe_ctx->plane_state == NULL) 1610 continue; 1611 1612 if (pipe_ctx->top_pipe == NULL) { 1613 bool blank = !is_pipe_tree_visible(pipe_ctx); 1614 1615 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1616 pipe_ctx->stream_res.tg, 1617 pipe_ctx->pipe_dlg_param.vready_offset, 1618 pipe_ctx->pipe_dlg_param.vstartup_start, 1619 pipe_ctx->pipe_dlg_param.vupdate_offset, 1620 pipe_ctx->pipe_dlg_param.vupdate_width); 1621 1622 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1623 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1624 1625 if (pipe_ctx->prev_odm_pipe == NULL) 1626 dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); 1627 1628 if (dc->hwss.setup_vupdate_interrupt) 1629 dc->hwss.setup_vupdate_interrupt(pipe_ctx); 1630 } 1631 1632 pipe_ctx->plane_res.hubp->funcs->hubp_setup( 1633 pipe_ctx->plane_res.hubp, 1634 &pipe_ctx->dlg_regs, 1635 &pipe_ctx->ttu_regs, 1636 &pipe_ctx->rq_regs, 1637 &pipe_ctx->pipe_dlg_param); 1638 } 1639 1640 return true; 1641 } 1642 1643 static void dcn20_enable_writeback( 1644 struct dc *dc, 1645 const struct dc_stream_status *stream_status, 1646 struct dc_writeback_info *wb_info, 1647 struct dc_state *context) 1648 { 1649 struct dwbc *dwb; 1650 struct mcif_wb *mcif_wb; 1651 struct timing_generator *optc; 1652 1653 ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES); 1654 ASSERT(wb_info->wb_enabled); 1655 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 1656 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 1657 1658 /* set the OPTC source mux */ 1659 ASSERT(stream_status->primary_otg_inst < MAX_PIPES); 1660 optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst]; 1661 optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); 1662 /* set MCIF_WB buffer and arbitration configuration */ 1663 mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); 1664 mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); 1665 /* Enable MCIF_WB */ 1666 mcif_wb->funcs->enable_mcif(mcif_wb); 1667 /* Enable DWB */ 1668 dwb->funcs->enable(dwb, &wb_info->dwb_params); 1669 /* TODO: add sequence to enable/disable warmup */ 1670 } 1671 1672 void dcn20_disable_writeback( 1673 struct dc *dc, 1674 unsigned int dwb_pipe_inst) 1675 { 1676 struct dwbc *dwb; 1677 struct mcif_wb *mcif_wb; 1678 1679 ASSERT(dwb_pipe_inst < MAX_DWB_PIPES); 1680 dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 1681 mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; 1682 1683 dwb->funcs->disable(dwb); 1684 mcif_wb->funcs->disable_mcif(mcif_wb); 1685 } 1686 1687 bool dcn20_hwss_wait_for_blank_complete( 1688 struct output_pixel_processor *opp) 1689 { 1690 int counter; 1691 1692 for (counter = 0; counter < 1000; counter++) { 1693 if (opp->funcs->dpg_is_blanked(opp)) 1694 break; 1695 1696 udelay(100); 1697 } 1698 1699 if (counter == 1000) { 1700 dm_error("DC: failed to blank crtc!\n"); 1701 return false; 1702 } 1703 1704 return true; 1705 } 1706 1707 bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) 1708 { 1709 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1710 1711 if (!hubp) 1712 return false; 1713 return hubp->funcs->dmdata_status_done(hubp); 1714 } 1715 1716 static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 1717 { 1718 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1719 struct dce_hwseq *hws = dc->hwseq; 1720 1721 if (pipe_ctx->stream_res.dsc) { 1722 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1723 1724 dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); 1725 while (odm_pipe) { 1726 dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); 1727 odm_pipe = odm_pipe->next_odm_pipe; 1728 } 1729 } 1730 #endif 1731 } 1732 1733 static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 1734 { 1735 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1736 struct dce_hwseq *hws = dc->hwseq; 1737 1738 if (pipe_ctx->stream_res.dsc) { 1739 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1740 1741 dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); 1742 while (odm_pipe) { 1743 dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); 1744 odm_pipe = odm_pipe->next_odm_pipe; 1745 } 1746 } 1747 #endif 1748 } 1749 1750 void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) 1751 { 1752 struct dc_dmdata_attributes attr = { 0 }; 1753 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1754 1755 attr.dmdata_mode = DMDATA_HW_MODE; 1756 attr.dmdata_size = 1757 dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36; 1758 attr.address.quad_part = 1759 pipe_ctx->stream->dmdata_address.quad_part; 1760 attr.dmdata_dl_delta = 0; 1761 attr.dmdata_qos_mode = 0; 1762 attr.dmdata_qos_level = 0; 1763 attr.dmdata_repeat = 1; /* always repeat */ 1764 attr.dmdata_updated = 1; 1765 attr.dmdata_sw_data = NULL; 1766 1767 hubp->funcs->dmdata_set_attributes(hubp, &attr); 1768 } 1769 1770 void dcn20_disable_stream(struct pipe_ctx *pipe_ctx) 1771 { 1772 dce110_disable_stream(pipe_ctx); 1773 } 1774 1775 static void dcn20_init_vm_ctx( 1776 struct dce_hwseq *hws, 1777 struct dc *dc, 1778 struct dc_virtual_addr_space_config *va_config, 1779 int vmid) 1780 { 1781 struct dcn_hubbub_virt_addr_config config; 1782 1783 if (vmid == 0) { 1784 ASSERT(0); /* VMID cannot be 0 for vm context */ 1785 return; 1786 } 1787 1788 config.page_table_start_addr = va_config->page_table_start_addr; 1789 config.page_table_end_addr = va_config->page_table_end_addr; 1790 config.page_table_block_size = va_config->page_table_block_size_in_bytes; 1791 config.page_table_depth = va_config->page_table_depth; 1792 config.page_table_base_addr = va_config->page_table_base_addr; 1793 1794 dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); 1795 } 1796 1797 static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 1798 { 1799 struct dcn_hubbub_phys_addr_config config; 1800 1801 config.system_aperture.fb_top = pa_config->system_aperture.fb_top; 1802 config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; 1803 config.system_aperture.fb_base = pa_config->system_aperture.fb_base; 1804 config.system_aperture.agp_top = pa_config->system_aperture.agp_top; 1805 config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; 1806 config.system_aperture.agp_base = pa_config->system_aperture.agp_base; 1807 config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; 1808 config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; 1809 config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; 1810 config.page_table_default_page_addr = pa_config->page_table_default_page_addr; 1811 1812 return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); 1813 } 1814 1815 static bool patch_address_for_sbs_tb_stereo( 1816 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr) 1817 { 1818 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1819 bool sec_split = pipe_ctx->top_pipe && 1820 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; 1821 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && 1822 (pipe_ctx->stream->timing.timing_3d_format == 1823 TIMING_3D_FORMAT_SIDE_BY_SIDE || 1824 pipe_ctx->stream->timing.timing_3d_format == 1825 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { 1826 *addr = plane_state->address.grph_stereo.left_addr; 1827 plane_state->address.grph_stereo.left_addr = 1828 plane_state->address.grph_stereo.right_addr; 1829 return true; 1830 } 1831 1832 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && 1833 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { 1834 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; 1835 plane_state->address.grph_stereo.right_addr = 1836 plane_state->address.grph_stereo.left_addr; 1837 } 1838 return false; 1839 } 1840 1841 1842 static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) 1843 { 1844 bool addr_patched = false; 1845 PHYSICAL_ADDRESS_LOC addr; 1846 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1847 1848 if (plane_state == NULL) 1849 return; 1850 1851 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); 1852 1853 // Call Helper to track VMID use 1854 vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst); 1855 1856 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 1857 pipe_ctx->plane_res.hubp, 1858 &plane_state->address, 1859 plane_state->flip_immediate); 1860 1861 plane_state->status.requested_address = plane_state->address; 1862 1863 if (plane_state->flip_immediate) 1864 plane_state->status.current_address = plane_state->address; 1865 1866 if (addr_patched) 1867 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; 1868 } 1869 1870 void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, 1871 struct dc_link_settings *link_settings) 1872 { 1873 struct encoder_unblank_param params = { { 0 } }; 1874 struct dc_stream_state *stream = pipe_ctx->stream; 1875 struct dc_link *link = stream->link; 1876 struct pipe_ctx *odm_pipe; 1877 1878 params.opp_cnt = 1; 1879 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1880 params.opp_cnt++; 1881 } 1882 /* only 3 items below are used by unblank */ 1883 params.timing = pipe_ctx->stream->timing; 1884 1885 params.link_settings.link_rate = link_settings->link_rate; 1886 1887 if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 1888 if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) 1889 params.timing.pix_clk_100hz /= 2; 1890 pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( 1891 pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); 1892 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); 1893 } 1894 1895 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1896 link->dc->hwss.edp_backlight_control(link, true); 1897 } 1898 } 1899 1900 void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) 1901 { 1902 struct timing_generator *tg = pipe_ctx->stream_res.tg; 1903 int start_line = get_vupdate_offset_from_vsync(pipe_ctx); 1904 1905 if (start_line < 0) 1906 start_line = 0; 1907 1908 if (tg->funcs->setup_vertical_interrupt2) 1909 tg->funcs->setup_vertical_interrupt2(tg, start_line); 1910 } 1911 1912 static void dcn20_reset_back_end_for_pipe( 1913 struct dc *dc, 1914 struct pipe_ctx *pipe_ctx, 1915 struct dc_state *context) 1916 { 1917 int i; 1918 DC_LOGGER_INIT(dc->ctx->logger); 1919 if (pipe_ctx->stream_res.stream_enc == NULL) { 1920 pipe_ctx->stream = NULL; 1921 return; 1922 } 1923 1924 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 1925 /* DPMS may already disable */ 1926 if (!pipe_ctx->stream->dpms_off) 1927 core_link_disable_stream(pipe_ctx); 1928 else if (pipe_ctx->stream_res.audio) 1929 dc->hwss.disable_audio_stream(pipe_ctx); 1930 1931 /* free acquired resources */ 1932 if (pipe_ctx->stream_res.audio) { 1933 /*disable az_endpoint*/ 1934 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 1935 1936 /*free audio*/ 1937 if (dc->caps.dynamic_audio == true) { 1938 /*we have to dynamic arbitrate the audio endpoints*/ 1939 /*we free the resource, need reset is_audio_acquired*/ 1940 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 1941 pipe_ctx->stream_res.audio, false); 1942 pipe_ctx->stream_res.audio = NULL; 1943 } 1944 } 1945 } 1946 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1947 else if (pipe_ctx->stream_res.dsc) { 1948 dp_set_dsc_enable(pipe_ctx, false); 1949 } 1950 #endif 1951 1952 /* by upper caller loop, parent pipe: pipe0, will be reset last. 1953 * back end share by all pipes and will be disable only when disable 1954 * parent pipe. 1955 */ 1956 if (pipe_ctx->top_pipe == NULL) { 1957 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 1958 1959 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 1960 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) 1961 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 1962 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1963 1964 if (pipe_ctx->stream_res.tg->funcs->set_drr) 1965 pipe_ctx->stream_res.tg->funcs->set_drr( 1966 pipe_ctx->stream_res.tg, NULL); 1967 } 1968 1969 for (i = 0; i < dc->res_pool->pipe_count; i++) 1970 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) 1971 break; 1972 1973 if (i == dc->res_pool->pipe_count) 1974 return; 1975 1976 pipe_ctx->stream = NULL; 1977 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 1978 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 1979 } 1980 1981 static void dcn20_reset_hw_ctx_wrap( 1982 struct dc *dc, 1983 struct dc_state *context) 1984 { 1985 int i; 1986 1987 /* Reset Back End*/ 1988 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 1989 struct pipe_ctx *pipe_ctx_old = 1990 &dc->current_state->res_ctx.pipe_ctx[i]; 1991 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1992 1993 if (!pipe_ctx_old->stream) 1994 continue; 1995 1996 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 1997 continue; 1998 1999 if (!pipe_ctx->stream || 2000 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 2001 struct clock_source *old_clk = pipe_ctx_old->clock_source; 2002 2003 dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 2004 if (dc->hwss.enable_stream_gating) 2005 dc->hwss.enable_stream_gating(dc, pipe_ctx); 2006 if (old_clk) 2007 old_clk->funcs->cs_power_down(old_clk); 2008 } 2009 } 2010 } 2011 2012 void dcn20_get_mpctree_visual_confirm_color( 2013 struct pipe_ctx *pipe_ctx, 2014 struct tg_color *color) 2015 { 2016 const struct tg_color pipe_colors[6] = { 2017 {MAX_TG_COLOR_VALUE, 0, 0}, // red 2018 {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow 2019 {0, MAX_TG_COLOR_VALUE, 0}, // blue 2020 {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple 2021 {0, 0, MAX_TG_COLOR_VALUE}, // green 2022 {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange 2023 }; 2024 2025 struct pipe_ctx *top_pipe = pipe_ctx; 2026 2027 while (top_pipe->top_pipe) { 2028 top_pipe = top_pipe->top_pipe; 2029 } 2030 2031 *color = pipe_colors[top_pipe->pipe_idx]; 2032 } 2033 2034 static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) 2035 { 2036 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2037 struct mpcc_blnd_cfg blnd_cfg = { {0} }; 2038 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; 2039 int mpcc_id; 2040 struct mpcc *new_mpcc; 2041 struct mpc *mpc = dc->res_pool->mpc; 2042 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); 2043 2044 // input to MPCC is always RGB, by default leave black_color at 0 2045 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { 2046 dcn10_get_hdr_visual_confirm_color( 2047 pipe_ctx, &blnd_cfg.black_color); 2048 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { 2049 dcn10_get_surface_visual_confirm_color( 2050 pipe_ctx, &blnd_cfg.black_color); 2051 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { 2052 dcn20_get_mpctree_visual_confirm_color( 2053 pipe_ctx, &blnd_cfg.black_color); 2054 } 2055 2056 if (per_pixel_alpha) 2057 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; 2058 else 2059 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; 2060 2061 blnd_cfg.overlap_only = false; 2062 blnd_cfg.global_gain = 0xff; 2063 2064 if (pipe_ctx->plane_state->global_alpha) 2065 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; 2066 else 2067 blnd_cfg.global_alpha = 0xff; 2068 2069 blnd_cfg.background_color_bpc = 4; 2070 blnd_cfg.bottom_gain_mode = 0; 2071 blnd_cfg.top_gain = 0x1f000; 2072 blnd_cfg.bottom_inside_gain = 0x1f000; 2073 blnd_cfg.bottom_outside_gain = 0x1f000; 2074 blnd_cfg.pre_multiplied_alpha = per_pixel_alpha; 2075 2076 /* 2077 * TODO: remove hack 2078 * Note: currently there is a bug in init_hw such that 2079 * on resume from hibernate, BIOS sets up MPCC0, and 2080 * we do mpcc_remove but the mpcc cannot go to idle 2081 * after remove. This cause us to pick mpcc1 here, 2082 * which causes a pstate hang for yet unknown reason. 2083 */ 2084 mpcc_id = hubp->inst; 2085 2086 /* If there is no full update, don't need to touch MPC tree*/ 2087 if (!pipe_ctx->plane_state->update_flags.bits.full_update) { 2088 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); 2089 return; 2090 } 2091 2092 /* check if this MPCC is already being used */ 2093 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); 2094 /* remove MPCC if being used */ 2095 if (new_mpcc != NULL) 2096 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc); 2097 else 2098 if (dc->debug.sanity_checks) 2099 mpc->funcs->assert_mpcc_idle_before_connect( 2100 dc->res_pool->mpc, mpcc_id); 2101 2102 /* Call MPC to insert new plane */ 2103 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, 2104 mpc_tree_params, 2105 &blnd_cfg, 2106 NULL, 2107 NULL, 2108 hubp->inst, 2109 mpcc_id); 2110 2111 ASSERT(new_mpcc != NULL); 2112 hubp->opp_id = pipe_ctx->stream_res.opp->inst; 2113 hubp->mpcc_id = mpcc_id; 2114 } 2115 2116 static int find_free_gsl_group(const struct dc *dc) 2117 { 2118 if (dc->res_pool->gsl_groups.gsl_0 == 0) 2119 return 1; 2120 if (dc->res_pool->gsl_groups.gsl_1 == 0) 2121 return 2; 2122 if (dc->res_pool->gsl_groups.gsl_2 == 0) 2123 return 3; 2124 2125 return 0; 2126 } 2127 2128 /* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) 2129 * This is only used to lock pipes in pipe splitting case with immediate flip 2130 * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, 2131 * so we get tearing with freesync since we cannot flip multiple pipes 2132 * atomically. 2133 * We use GSL for this: 2134 * - immediate flip: find first available GSL group if not already assigned 2135 * program gsl with that group, set current OTG as master 2136 * and always us 0x4 = AND of flip_ready from all pipes 2137 * - vsync flip: disable GSL if used 2138 * 2139 * Groups in stream_res are stored as +1 from HW registers, i.e. 2140 * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 2141 * Using a magic value like -1 would require tracking all inits/resets 2142 */ 2143 void dcn20_setup_gsl_group_as_lock( 2144 const struct dc *dc, 2145 struct pipe_ctx *pipe_ctx, 2146 bool enable) 2147 { 2148 struct gsl_params gsl; 2149 int group_idx; 2150 2151 memset(&gsl, 0, sizeof(struct gsl_params)); 2152 2153 if (enable) { 2154 /* return if group already assigned since GSL was set up 2155 * for vsync flip, we would unassign so it can't be "left over" 2156 */ 2157 if (pipe_ctx->stream_res.gsl_group > 0) 2158 return; 2159 2160 group_idx = find_free_gsl_group(dc); 2161 ASSERT(group_idx != 0); 2162 pipe_ctx->stream_res.gsl_group = group_idx; 2163 2164 /* set gsl group reg field and mark resource used */ 2165 switch (group_idx) { 2166 case 1: 2167 gsl.gsl0_en = 1; 2168 dc->res_pool->gsl_groups.gsl_0 = 1; 2169 break; 2170 case 2: 2171 gsl.gsl1_en = 1; 2172 dc->res_pool->gsl_groups.gsl_1 = 1; 2173 break; 2174 case 3: 2175 gsl.gsl2_en = 1; 2176 dc->res_pool->gsl_groups.gsl_2 = 1; 2177 break; 2178 default: 2179 BREAK_TO_DEBUGGER(); 2180 return; // invalid case 2181 } 2182 gsl.gsl_master_en = 1; 2183 } else { 2184 group_idx = pipe_ctx->stream_res.gsl_group; 2185 if (group_idx == 0) 2186 return; // if not in use, just return 2187 2188 pipe_ctx->stream_res.gsl_group = 0; 2189 2190 /* unset gsl group reg field and mark resource free */ 2191 switch (group_idx) { 2192 case 1: 2193 gsl.gsl0_en = 0; 2194 dc->res_pool->gsl_groups.gsl_0 = 0; 2195 break; 2196 case 2: 2197 gsl.gsl1_en = 0; 2198 dc->res_pool->gsl_groups.gsl_1 = 0; 2199 break; 2200 case 3: 2201 gsl.gsl2_en = 0; 2202 dc->res_pool->gsl_groups.gsl_2 = 0; 2203 break; 2204 default: 2205 BREAK_TO_DEBUGGER(); 2206 return; 2207 } 2208 gsl.gsl_master_en = 0; 2209 } 2210 2211 /* at this point we want to program whether it's to enable or disable */ 2212 if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && 2213 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { 2214 pipe_ctx->stream_res.tg->funcs->set_gsl( 2215 pipe_ctx->stream_res.tg, 2216 &gsl); 2217 2218 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( 2219 pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); 2220 } else 2221 BREAK_TO_DEBUGGER(); 2222 } 2223 2224 static void dcn20_set_flip_control_gsl( 2225 struct pipe_ctx *pipe_ctx, 2226 bool flip_immediate) 2227 { 2228 if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) 2229 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( 2230 pipe_ctx->plane_res.hubp, flip_immediate); 2231 2232 } 2233 2234 static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) 2235 { 2236 enum dc_lane_count lane_count = 2237 pipe_ctx->stream->link->cur_link_settings.lane_count; 2238 2239 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 2240 struct dc_link *link = pipe_ctx->stream->link; 2241 2242 uint32_t active_total_with_borders; 2243 uint32_t early_control = 0; 2244 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2245 2246 /* For MST, there are multiply stream go to only one link. 2247 * connect DIG back_end to front_end while enable_stream and 2248 * disconnect them during disable_stream 2249 * BY this, it is logic clean to separate stream and link 2250 */ 2251 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, 2252 pipe_ctx->stream_res.stream_enc->id, true); 2253 2254 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { 2255 if (link->dc->hwss.program_dmdata_engine) 2256 link->dc->hwss.program_dmdata_engine(pipe_ctx); 2257 } 2258 2259 link->dc->hwss.update_info_frame(pipe_ctx); 2260 2261 /* enable early control to avoid corruption on DP monitor*/ 2262 active_total_with_borders = 2263 timing->h_addressable 2264 + timing->h_border_left 2265 + timing->h_border_right; 2266 2267 if (lane_count != 0) 2268 early_control = active_total_with_borders % lane_count; 2269 2270 if (early_control == 0) 2271 early_control = lane_count; 2272 2273 tg->funcs->set_early_control(tg, early_control); 2274 2275 /* enable audio only within mode set */ 2276 if (pipe_ctx->stream_res.audio != NULL) { 2277 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2278 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 2279 } 2280 } 2281 2282 static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) 2283 { 2284 struct dc_stream_state *stream = pipe_ctx->stream; 2285 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2286 bool enable = false; 2287 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 2288 enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) 2289 ? dmdata_dp 2290 : dmdata_hdmi; 2291 2292 /* if using dynamic meta, don't set up generic infopackets */ 2293 if (pipe_ctx->stream->dmdata_address.quad_part != 0) { 2294 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; 2295 enable = true; 2296 } 2297 2298 if (!hubp) 2299 return; 2300 2301 if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) 2302 return; 2303 2304 stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, 2305 hubp->inst, mode); 2306 } 2307 2308 static void dcn20_fpga_init_hw(struct dc *dc) 2309 { 2310 int i, j; 2311 struct dce_hwseq *hws = dc->hwseq; 2312 struct resource_pool *res_pool = dc->res_pool; 2313 struct dc_state *context = dc->current_state; 2314 2315 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 2316 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 2317 2318 // Initialize the dccg 2319 if (res_pool->dccg->funcs->dccg_init) 2320 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 2321 2322 //Enable ability to power gate / don't force power on permanently 2323 dc->hwss.enable_power_gating_plane(hws, true); 2324 2325 // Specific to FPGA dccg and registers 2326 REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); 2327 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 2328 2329 dcn20_dccg_init(hws); 2330 2331 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); 2332 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 2333 REG_WRITE(REFCLK_CNTL, 0); 2334 // 2335 2336 2337 /* Blank pixel data with OPP DPG */ 2338 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2339 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2340 2341 if (tg->funcs->is_tg_enabled(tg)) 2342 dcn20_init_blank(dc, tg); 2343 } 2344 2345 for (i = 0; i < res_pool->timing_generator_count; i++) { 2346 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2347 2348 if (tg->funcs->is_tg_enabled(tg)) 2349 tg->funcs->lock(tg); 2350 } 2351 2352 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2353 struct dpp *dpp = res_pool->dpps[i]; 2354 2355 dpp->funcs->dpp_reset(dpp); 2356 } 2357 2358 /* Reset all MPCC muxes */ 2359 res_pool->mpc->funcs->mpc_init(res_pool->mpc); 2360 2361 /* initialize OPP mpc_tree parameter */ 2362 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 2363 res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst; 2364 res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 2365 for (j = 0; j < MAX_PIPES; j++) 2366 res_pool->opps[i]->mpcc_disconnect_pending[j] = false; 2367 } 2368 2369 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2370 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2371 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2372 struct hubp *hubp = dc->res_pool->hubps[i]; 2373 struct dpp *dpp = dc->res_pool->dpps[i]; 2374 2375 pipe_ctx->stream_res.tg = tg; 2376 pipe_ctx->pipe_idx = i; 2377 2378 pipe_ctx->plane_res.hubp = hubp; 2379 pipe_ctx->plane_res.dpp = dpp; 2380 pipe_ctx->plane_res.mpcc_inst = dpp->inst; 2381 hubp->mpcc_id = dpp->inst; 2382 hubp->opp_id = OPP_ID_INVALID; 2383 hubp->power_gated = false; 2384 pipe_ctx->stream_res.opp = NULL; 2385 2386 hubp->funcs->hubp_init(hubp); 2387 2388 //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 2389 //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 2390 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 2391 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 2392 /*to do*/ 2393 hwss1_plane_atomic_disconnect(dc, pipe_ctx); 2394 } 2395 2396 /* initialize DWB pointer to MCIF_WB */ 2397 for (i = 0; i < res_pool->res_cap->num_dwb; i++) 2398 res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i]; 2399 2400 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2401 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2402 2403 if (tg->funcs->is_tg_enabled(tg)) 2404 tg->funcs->unlock(tg); 2405 } 2406 2407 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2408 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2409 2410 dc->hwss.disable_plane(dc, pipe_ctx); 2411 2412 pipe_ctx->stream_res.tg = NULL; 2413 pipe_ctx->plane_res.hubp = NULL; 2414 } 2415 2416 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2417 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2418 2419 tg->funcs->tg_init(tg); 2420 } 2421 } 2422 2423 void dcn20_hw_sequencer_construct(struct dc *dc) 2424 { 2425 dcn10_hw_sequencer_construct(dc); 2426 dc->hwss.unblank_stream = dcn20_unblank_stream; 2427 dc->hwss.update_plane_addr = dcn20_update_plane_addr; 2428 dc->hwss.enable_stream_timing = dcn20_enable_stream_timing; 2429 dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; 2430 dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; 2431 dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; 2432 dc->hwss.apply_ctx_for_surface = NULL; 2433 dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; 2434 dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; 2435 dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; 2436 dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; 2437 dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth; 2438 dc->hwss.update_bandwidth = dcn20_update_bandwidth; 2439 dc->hwss.enable_writeback = dcn20_enable_writeback; 2440 dc->hwss.disable_writeback = dcn20_disable_writeback; 2441 dc->hwss.program_output_csc = dcn20_program_output_csc; 2442 dc->hwss.update_odm = dcn20_update_odm; 2443 dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; 2444 dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; 2445 dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; 2446 dc->hwss.enable_stream = dcn20_enable_stream; 2447 dc->hwss.disable_stream = dcn20_disable_stream; 2448 dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; 2449 dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; 2450 dc->hwss.disable_stream_gating = dcn20_disable_stream_gating; 2451 dc->hwss.enable_stream_gating = dcn20_enable_stream_gating; 2452 dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt; 2453 dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap; 2454 dc->hwss.update_mpcc = dcn20_update_mpcc; 2455 dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl; 2456 dc->hwss.init_blank = dcn20_init_blank; 2457 dc->hwss.disable_plane = dcn20_disable_plane; 2458 dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable; 2459 dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; 2460 dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; 2461 dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; 2462 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 2463 dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; 2464 #else 2465 dc->hwss.dsc_pg_control = NULL; 2466 #endif 2467 dc->hwss.disable_vga = dcn20_disable_vga; 2468 2469 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 2470 dc->hwss.init_hw = dcn20_fpga_init_hw; 2471 dc->hwss.init_pipes = NULL; 2472 } 2473 2474 2475 } 2476