1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 #include <linux/delay.h> 26 27 #include "dm_services.h" 28 #include "dm_helpers.h" 29 #include "core_types.h" 30 #include "resource.h" 31 #include "dcn20/dcn20_resource.h" 32 #include "dce110/dce110_hw_sequencer.h" 33 #include "dcn10/dcn10_hw_sequencer.h" 34 #include "dcn20_hwseq.h" 35 #include "dce/dce_hwseq.h" 36 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 37 #include "dcn20/dcn20_dsc.h" 38 #endif 39 #include "abm.h" 40 #include "clk_mgr.h" 41 #include "dmcu.h" 42 #include "hubp.h" 43 #include "timing_generator.h" 44 #include "opp.h" 45 #include "ipp.h" 46 #include "mpc.h" 47 #include "mcif_wb.h" 48 #include "reg_helper.h" 49 #include "dcn10/dcn10_cm_common.h" 50 #include "dcn10/dcn10_hubbub.h" 51 #include "dcn10/dcn10_optc.h" 52 #include "dc_link_dp.h" 53 #include "vm_helper.h" 54 #include "dccg.h" 55 56 #define DC_LOGGER_INIT(logger) 57 58 #define CTX \ 59 hws->ctx 60 #define REG(reg)\ 61 hws->regs->reg 62 63 #undef FN 64 #define FN(reg_name, field_name) \ 65 hws->shifts->field_name, hws->masks->field_name 66 67 static void dcn20_enable_power_gating_plane( 68 struct dce_hwseq *hws, 69 bool enable) 70 { 71 bool force_on = 1; /* disable power gating */ 72 73 if (enable) 74 force_on = 0; 75 76 /* DCHUBP0/1/2/3/4/5 */ 77 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); 78 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on); 79 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on); 80 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on); 81 if (REG(DOMAIN8_PG_CONFIG)) 82 REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 83 if (REG(DOMAIN10_PG_CONFIG)) 84 REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 85 86 /* DPP0/1/2/3/4/5 */ 87 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on); 88 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on); 89 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on); 90 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); 91 if (REG(DOMAIN9_PG_CONFIG)) 92 REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 93 if (REG(DOMAIN11_PG_CONFIG)) 94 REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 95 96 /* DCS0/1/2/3/4/5 */ 97 REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on); 98 REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on); 99 REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on); 100 if (REG(DOMAIN19_PG_CONFIG)) 101 REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on); 102 if (REG(DOMAIN20_PG_CONFIG)) 103 REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on); 104 if (REG(DOMAIN21_PG_CONFIG)) 105 REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); 106 } 107 108 void dcn20_dccg_init(struct dce_hwseq *hws) 109 { 110 /* 111 * set MICROSECOND_TIME_BASE_DIV 112 * 100Mhz refclk -> 0x120264 113 * 27Mhz refclk -> 0x12021b 114 * 48Mhz refclk -> 0x120230 115 * 116 */ 117 REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); 118 119 /* 120 * set MILLISECOND_TIME_BASE_DIV 121 * 100Mhz refclk -> 0x1186a0 122 * 27Mhz refclk -> 0x106978 123 * 48Mhz refclk -> 0x10bb80 124 * 125 */ 126 REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); 127 128 /* This value is dependent on the hardware pipeline delay so set once per SOC */ 129 REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); 130 } 131 void dcn20_display_init(struct dc *dc) 132 { 133 struct dce_hwseq *hws = dc->hwseq; 134 135 /* RBBMIF 136 * disable RBBMIF timeout detection for all clients 137 * Ensure RBBMIF does not drop register accesses due to the per-client timeout 138 */ 139 REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); 140 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 141 142 /* DCCG */ 143 dcn20_dccg_init(hws); 144 145 REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 0); 146 147 /* DCHUB/MMHUBBUB 148 * set global timer refclk divider 149 * 100Mhz refclk -> 2 150 * 27Mhz refclk -> 1 151 * 48Mhz refclk -> 1 152 */ 153 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); 154 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 155 REG_WRITE(REFCLK_CNTL, 0); 156 157 /* OPTC 158 * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc 159 */ 160 161 /* AZ 162 * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, 163 * if not, it should be programmed according to the ref clock 164 */ 165 REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); 166 /* Enable controller clock gating */ 167 REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); 168 } 169 170 void dcn20_disable_vga( 171 struct dce_hwseq *hws) 172 { 173 REG_WRITE(D1VGA_CONTROL, 0); 174 REG_WRITE(D2VGA_CONTROL, 0); 175 REG_WRITE(D3VGA_CONTROL, 0); 176 REG_WRITE(D4VGA_CONTROL, 0); 177 REG_WRITE(D5VGA_CONTROL, 0); 178 REG_WRITE(D6VGA_CONTROL, 0); 179 } 180 181 void dcn20_program_tripleBuffer( 182 const struct dc *dc, 183 struct pipe_ctx *pipe_ctx, 184 bool enableTripleBuffer) 185 { 186 if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { 187 pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( 188 pipe_ctx->plane_res.hubp, 189 enableTripleBuffer); 190 } 191 } 192 193 /* Blank pixel data during initialization */ 194 void dcn20_init_blank( 195 struct dc *dc, 196 struct timing_generator *tg) 197 { 198 enum dc_color_space color_space; 199 struct tg_color black_color = {0}; 200 struct output_pixel_processor *opp = NULL; 201 struct output_pixel_processor *bottom_opp = NULL; 202 uint32_t num_opps, opp_id_src0, opp_id_src1; 203 uint32_t otg_active_width, otg_active_height; 204 205 /* program opp dpg blank color */ 206 color_space = COLOR_SPACE_SRGB; 207 color_space_to_black_color(dc, color_space, &black_color); 208 209 /* get the OTG active size */ 210 tg->funcs->get_otg_active_size(tg, 211 &otg_active_width, 212 &otg_active_height); 213 214 /* get the OPTC source */ 215 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 216 ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); 217 opp = dc->res_pool->opps[opp_id_src0]; 218 219 if (num_opps == 2) { 220 otg_active_width = otg_active_width / 2; 221 ASSERT(opp_id_src1 < dc->res_pool->res_cap->num_opp); 222 bottom_opp = dc->res_pool->opps[opp_id_src1]; 223 } 224 225 opp->funcs->opp_set_disp_pattern_generator( 226 opp, 227 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 228 COLOR_DEPTH_UNDEFINED, 229 &black_color, 230 otg_active_width, 231 otg_active_height); 232 233 if (num_opps == 2) { 234 bottom_opp->funcs->opp_set_disp_pattern_generator( 235 bottom_opp, 236 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 237 COLOR_DEPTH_UNDEFINED, 238 &black_color, 239 otg_active_width, 240 otg_active_height); 241 } 242 243 dcn20_hwss_wait_for_blank_complete(opp); 244 } 245 246 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 247 static void dcn20_dsc_pg_control( 248 struct dce_hwseq *hws, 249 unsigned int dsc_inst, 250 bool power_on) 251 { 252 uint32_t power_gate = power_on ? 0 : 1; 253 uint32_t pwr_status = power_on ? 0 : 2; 254 uint32_t org_ip_request_cntl = 0; 255 256 if (hws->ctx->dc->debug.disable_dsc_power_gate) 257 return; 258 259 if (REG(DOMAIN16_PG_CONFIG) == 0) 260 return; 261 262 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 263 if (org_ip_request_cntl == 0) 264 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 265 266 switch (dsc_inst) { 267 case 0: /* DSC0 */ 268 REG_UPDATE(DOMAIN16_PG_CONFIG, 269 DOMAIN16_POWER_GATE, power_gate); 270 271 REG_WAIT(DOMAIN16_PG_STATUS, 272 DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 273 1, 1000); 274 break; 275 case 1: /* DSC1 */ 276 REG_UPDATE(DOMAIN17_PG_CONFIG, 277 DOMAIN17_POWER_GATE, power_gate); 278 279 REG_WAIT(DOMAIN17_PG_STATUS, 280 DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 281 1, 1000); 282 break; 283 case 2: /* DSC2 */ 284 REG_UPDATE(DOMAIN18_PG_CONFIG, 285 DOMAIN18_POWER_GATE, power_gate); 286 287 REG_WAIT(DOMAIN18_PG_STATUS, 288 DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 289 1, 1000); 290 break; 291 case 3: /* DSC3 */ 292 REG_UPDATE(DOMAIN19_PG_CONFIG, 293 DOMAIN19_POWER_GATE, power_gate); 294 295 REG_WAIT(DOMAIN19_PG_STATUS, 296 DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 297 1, 1000); 298 break; 299 case 4: /* DSC4 */ 300 REG_UPDATE(DOMAIN20_PG_CONFIG, 301 DOMAIN20_POWER_GATE, power_gate); 302 303 REG_WAIT(DOMAIN20_PG_STATUS, 304 DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 305 1, 1000); 306 break; 307 case 5: /* DSC5 */ 308 REG_UPDATE(DOMAIN21_PG_CONFIG, 309 DOMAIN21_POWER_GATE, power_gate); 310 311 REG_WAIT(DOMAIN21_PG_STATUS, 312 DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 313 1, 1000); 314 break; 315 default: 316 BREAK_TO_DEBUGGER(); 317 break; 318 } 319 320 if (org_ip_request_cntl == 0) 321 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); 322 } 323 #endif 324 325 static void dcn20_dpp_pg_control( 326 struct dce_hwseq *hws, 327 unsigned int dpp_inst, 328 bool power_on) 329 { 330 uint32_t power_gate = power_on ? 0 : 1; 331 uint32_t pwr_status = power_on ? 0 : 2; 332 333 if (hws->ctx->dc->debug.disable_dpp_power_gate) 334 return; 335 if (REG(DOMAIN1_PG_CONFIG) == 0) 336 return; 337 338 switch (dpp_inst) { 339 case 0: /* DPP0 */ 340 REG_UPDATE(DOMAIN1_PG_CONFIG, 341 DOMAIN1_POWER_GATE, power_gate); 342 343 REG_WAIT(DOMAIN1_PG_STATUS, 344 DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 345 1, 1000); 346 break; 347 case 1: /* DPP1 */ 348 REG_UPDATE(DOMAIN3_PG_CONFIG, 349 DOMAIN3_POWER_GATE, power_gate); 350 351 REG_WAIT(DOMAIN3_PG_STATUS, 352 DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 353 1, 1000); 354 break; 355 case 2: /* DPP2 */ 356 REG_UPDATE(DOMAIN5_PG_CONFIG, 357 DOMAIN5_POWER_GATE, power_gate); 358 359 REG_WAIT(DOMAIN5_PG_STATUS, 360 DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 361 1, 1000); 362 break; 363 case 3: /* DPP3 */ 364 REG_UPDATE(DOMAIN7_PG_CONFIG, 365 DOMAIN7_POWER_GATE, power_gate); 366 367 REG_WAIT(DOMAIN7_PG_STATUS, 368 DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 369 1, 1000); 370 break; 371 case 4: /* DPP4 */ 372 REG_UPDATE(DOMAIN9_PG_CONFIG, 373 DOMAIN9_POWER_GATE, power_gate); 374 375 REG_WAIT(DOMAIN9_PG_STATUS, 376 DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 377 1, 1000); 378 break; 379 case 5: /* DPP5 */ 380 /* 381 * Do not power gate DPP5, should be left at HW default, power on permanently. 382 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 383 * reset. 384 * REG_UPDATE(DOMAIN11_PG_CONFIG, 385 * DOMAIN11_POWER_GATE, power_gate); 386 * 387 * REG_WAIT(DOMAIN11_PG_STATUS, 388 * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, 389 * 1, 1000); 390 */ 391 break; 392 default: 393 BREAK_TO_DEBUGGER(); 394 break; 395 } 396 } 397 398 399 static void dcn20_hubp_pg_control( 400 struct dce_hwseq *hws, 401 unsigned int hubp_inst, 402 bool power_on) 403 { 404 uint32_t power_gate = power_on ? 0 : 1; 405 uint32_t pwr_status = power_on ? 0 : 2; 406 407 if (hws->ctx->dc->debug.disable_hubp_power_gate) 408 return; 409 if (REG(DOMAIN0_PG_CONFIG) == 0) 410 return; 411 412 switch (hubp_inst) { 413 case 0: /* DCHUBP0 */ 414 REG_UPDATE(DOMAIN0_PG_CONFIG, 415 DOMAIN0_POWER_GATE, power_gate); 416 417 REG_WAIT(DOMAIN0_PG_STATUS, 418 DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 419 1, 1000); 420 break; 421 case 1: /* DCHUBP1 */ 422 REG_UPDATE(DOMAIN2_PG_CONFIG, 423 DOMAIN2_POWER_GATE, power_gate); 424 425 REG_WAIT(DOMAIN2_PG_STATUS, 426 DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 427 1, 1000); 428 break; 429 case 2: /* DCHUBP2 */ 430 REG_UPDATE(DOMAIN4_PG_CONFIG, 431 DOMAIN4_POWER_GATE, power_gate); 432 433 REG_WAIT(DOMAIN4_PG_STATUS, 434 DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 435 1, 1000); 436 break; 437 case 3: /* DCHUBP3 */ 438 REG_UPDATE(DOMAIN6_PG_CONFIG, 439 DOMAIN6_POWER_GATE, power_gate); 440 441 REG_WAIT(DOMAIN6_PG_STATUS, 442 DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 443 1, 1000); 444 break; 445 case 4: /* DCHUBP4 */ 446 REG_UPDATE(DOMAIN8_PG_CONFIG, 447 DOMAIN8_POWER_GATE, power_gate); 448 449 REG_WAIT(DOMAIN8_PG_STATUS, 450 DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 451 1, 1000); 452 break; 453 case 5: /* DCHUBP5 */ 454 /* 455 * Do not power gate DCHUB5, should be left at HW default, power on permanently. 456 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 457 * reset. 458 * REG_UPDATE(DOMAIN10_PG_CONFIG, 459 * DOMAIN10_POWER_GATE, power_gate); 460 * 461 * REG_WAIT(DOMAIN10_PG_STATUS, 462 * DOMAIN10_PGFSM_PWR_STATUS, pwr_status, 463 * 1, 1000); 464 */ 465 break; 466 default: 467 BREAK_TO_DEBUGGER(); 468 break; 469 } 470 } 471 472 473 /* disable HW used by plane. 474 * note: cannot disable until disconnect is complete 475 */ 476 static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 477 { 478 struct hubp *hubp = pipe_ctx->plane_res.hubp; 479 struct dpp *dpp = pipe_ctx->plane_res.dpp; 480 481 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 482 483 /* In flip immediate with pipe splitting case GSL is used for 484 * synchronization so we must disable it when the plane is disabled. 485 */ 486 if (pipe_ctx->stream_res.gsl_group != 0) 487 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); 488 489 dc->hwss.set_flip_control_gsl(pipe_ctx, false); 490 491 hubp->funcs->hubp_clk_cntl(hubp, false); 492 493 dpp->funcs->dpp_dppclk_control(dpp, false, false); 494 495 hubp->power_gated = true; 496 dc->optimized_required = false; /* We're powering off, no need to optimize */ 497 498 dc->hwss.plane_atomic_power_down(dc, 499 pipe_ctx->plane_res.dpp, 500 pipe_ctx->plane_res.hubp); 501 502 pipe_ctx->stream = NULL; 503 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 504 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); 505 pipe_ctx->top_pipe = NULL; 506 pipe_ctx->bottom_pipe = NULL; 507 pipe_ctx->plane_state = NULL; 508 } 509 510 511 void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 512 { 513 DC_LOGGER_INIT(dc->ctx->logger); 514 515 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 516 return; 517 518 dcn20_plane_atomic_disable(dc, pipe_ctx); 519 520 DC_LOG_DC("Power down front end %d\n", 521 pipe_ctx->pipe_idx); 522 } 523 524 enum dc_status dcn20_enable_stream_timing( 525 struct pipe_ctx *pipe_ctx, 526 struct dc_state *context, 527 struct dc *dc) 528 { 529 struct dc_stream_state *stream = pipe_ctx->stream; 530 struct drr_params params = {0}; 531 unsigned int event_triggers = 0; 532 struct pipe_ctx *odm_pipe; 533 int opp_cnt = 1; 534 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 535 536 /* by upper caller loop, pipe0 is parent pipe and be called first. 537 * back end is set up by for pipe0. Other children pipe share back end 538 * with pipe 0. No program is needed. 539 */ 540 if (pipe_ctx->top_pipe != NULL) 541 return DC_OK; 542 543 /* TODO check if timing_changed, disable stream if timing changed */ 544 545 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 546 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 547 opp_cnt++; 548 } 549 550 if (opp_cnt > 1) 551 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 552 pipe_ctx->stream_res.tg, 553 opp_inst, opp_cnt, 554 &pipe_ctx->stream->timing); 555 556 /* HW program guide assume display already disable 557 * by unplug sequence. OTG assume stop. 558 */ 559 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 560 561 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 562 pipe_ctx->clock_source, 563 &pipe_ctx->stream_res.pix_clk_params, 564 &pipe_ctx->pll_settings)) { 565 BREAK_TO_DEBUGGER(); 566 return DC_ERROR_UNEXPECTED; 567 } 568 569 pipe_ctx->stream_res.tg->funcs->program_timing( 570 pipe_ctx->stream_res.tg, 571 &stream->timing, 572 pipe_ctx->pipe_dlg_param.vready_offset, 573 pipe_ctx->pipe_dlg_param.vstartup_start, 574 pipe_ctx->pipe_dlg_param.vupdate_offset, 575 pipe_ctx->pipe_dlg_param.vupdate_width, 576 pipe_ctx->stream->signal, 577 true); 578 579 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 580 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( 581 odm_pipe->stream_res.opp, 582 true); 583 584 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 585 pipe_ctx->stream_res.opp, 586 true); 587 588 dc->hwss.blank_pixel_data(dc, pipe_ctx, true); 589 590 /* VTG is within DCHUB command block. DCFCLK is always on */ 591 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 592 BREAK_TO_DEBUGGER(); 593 return DC_ERROR_UNEXPECTED; 594 } 595 596 dcn20_hwss_wait_for_blank_complete(pipe_ctx->stream_res.opp); 597 598 params.vertical_total_min = stream->adjust.v_total_min; 599 params.vertical_total_max = stream->adjust.v_total_max; 600 params.vertical_total_mid = stream->adjust.v_total_mid; 601 params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; 602 if (pipe_ctx->stream_res.tg->funcs->set_drr) 603 pipe_ctx->stream_res.tg->funcs->set_drr( 604 pipe_ctx->stream_res.tg, ¶ms); 605 606 // DRR should set trigger event to monitor surface update event 607 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) 608 event_triggers = 0x80; 609 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) 610 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 611 pipe_ctx->stream_res.tg, event_triggers); 612 613 /* TODO program crtc source select for non-virtual signal*/ 614 /* TODO program FMT */ 615 /* TODO setup link_enc */ 616 /* TODO set stream attributes */ 617 /* TODO program audio */ 618 /* TODO enable stream if timing changed */ 619 /* TODO unblank stream if DP */ 620 621 return DC_OK; 622 } 623 624 void dcn20_program_output_csc(struct dc *dc, 625 struct pipe_ctx *pipe_ctx, 626 enum dc_color_space colorspace, 627 uint16_t *matrix, 628 int opp_id) 629 { 630 struct mpc *mpc = dc->res_pool->mpc; 631 enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A; 632 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 633 634 if (mpc->funcs->power_on_mpc_mem_pwr) 635 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 636 637 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 638 if (mpc->funcs->set_output_csc != NULL) 639 mpc->funcs->set_output_csc(mpc, 640 opp_id, 641 matrix, 642 ocsc_mode); 643 } else { 644 if (mpc->funcs->set_ocsc_default != NULL) 645 mpc->funcs->set_ocsc_default(mpc, 646 opp_id, 647 colorspace, 648 ocsc_mode); 649 } 650 } 651 652 bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, 653 const struct dc_stream_state *stream) 654 { 655 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 656 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 657 struct pwl_params *params = NULL; 658 /* 659 * program OGAM only for the top pipe 660 * if there is a pipe split then fix diagnostic is required: 661 * how to pass OGAM parameter for stream. 662 * if programming for all pipes is required then remove condition 663 * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic. 664 */ 665 if (mpc->funcs->power_on_mpc_mem_pwr) 666 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 667 if (pipe_ctx->top_pipe == NULL 668 && mpc->funcs->set_output_gamma && stream->out_transfer_func) { 669 if (stream->out_transfer_func->type == TF_TYPE_HWPWL) 670 params = &stream->out_transfer_func->pwl; 671 else if (pipe_ctx->stream->out_transfer_func->type == 672 TF_TYPE_DISTRIBUTED_POINTS && 673 cm_helper_translate_curve_to_hw_format( 674 stream->out_transfer_func, 675 &mpc->blender_params, false)) 676 params = &mpc->blender_params; 677 /* 678 * there is no ROM 679 */ 680 if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) 681 BREAK_TO_DEBUGGER(); 682 } 683 /* 684 * if above if is not executed then 'params' equal to 0 and set in bypass 685 */ 686 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 687 688 return true; 689 } 690 691 bool dcn20_set_blend_lut( 692 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 693 { 694 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 695 bool result = true; 696 struct pwl_params *blend_lut = NULL; 697 698 if (plane_state->blend_tf) { 699 if (plane_state->blend_tf->type == TF_TYPE_HWPWL) 700 blend_lut = &plane_state->blend_tf->pwl; 701 else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 702 cm_helper_translate_curve_to_hw_format( 703 plane_state->blend_tf, 704 &dpp_base->regamma_params, false); 705 blend_lut = &dpp_base->regamma_params; 706 } 707 } 708 result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); 709 710 return result; 711 } 712 713 bool dcn20_set_shaper_3dlut( 714 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 715 { 716 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 717 bool result = true; 718 struct pwl_params *shaper_lut = NULL; 719 720 if (plane_state->in_shaper_func) { 721 if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) 722 shaper_lut = &plane_state->in_shaper_func->pwl; 723 else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 724 cm_helper_translate_curve_to_hw_format( 725 plane_state->in_shaper_func, 726 &dpp_base->shaper_params, true); 727 shaper_lut = &dpp_base->shaper_params; 728 } 729 } 730 731 result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); 732 if (plane_state->lut3d_func && 733 plane_state->lut3d_func->state.bits.initialized == 1) 734 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, 735 &plane_state->lut3d_func->lut_3d); 736 else 737 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); 738 739 if (plane_state->lut3d_func && 740 plane_state->lut3d_func->state.bits.initialized == 1 && 741 plane_state->lut3d_func->hdr_multiplier != 0) 742 dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 743 plane_state->lut3d_func->hdr_multiplier); 744 else 745 dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, 0x1f000); 746 747 return result; 748 } 749 750 bool dcn20_set_input_transfer_func(struct pipe_ctx *pipe_ctx, 751 const struct dc_plane_state *plane_state) 752 { 753 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 754 const struct dc_transfer_func *tf = NULL; 755 bool result = true; 756 bool use_degamma_ram = false; 757 758 if (dpp_base == NULL || plane_state == NULL) 759 return false; 760 761 dcn20_set_shaper_3dlut(pipe_ctx, plane_state); 762 dcn20_set_blend_lut(pipe_ctx, plane_state); 763 764 if (plane_state->in_transfer_func) 765 tf = plane_state->in_transfer_func; 766 767 768 if (tf == NULL) { 769 dpp_base->funcs->dpp_set_degamma(dpp_base, 770 IPP_DEGAMMA_MODE_BYPASS); 771 return true; 772 } 773 774 if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) 775 use_degamma_ram = true; 776 777 if (use_degamma_ram == true) { 778 if (tf->type == TF_TYPE_HWPWL) 779 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 780 &tf->pwl); 781 else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 782 cm_helper_translate_curve_to_degamma_hw_format(tf, 783 &dpp_base->degamma_params); 784 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 785 &dpp_base->degamma_params); 786 } 787 return true; 788 } 789 /* handle here the optimized cases when de-gamma ROM could be used. 790 * 791 */ 792 if (tf->type == TF_TYPE_PREDEFINED) { 793 switch (tf->tf) { 794 case TRANSFER_FUNCTION_SRGB: 795 dpp_base->funcs->dpp_set_degamma(dpp_base, 796 IPP_DEGAMMA_MODE_HW_sRGB); 797 break; 798 case TRANSFER_FUNCTION_BT709: 799 dpp_base->funcs->dpp_set_degamma(dpp_base, 800 IPP_DEGAMMA_MODE_HW_xvYCC); 801 break; 802 case TRANSFER_FUNCTION_LINEAR: 803 dpp_base->funcs->dpp_set_degamma(dpp_base, 804 IPP_DEGAMMA_MODE_BYPASS); 805 break; 806 case TRANSFER_FUNCTION_PQ: 807 default: 808 result = false; 809 break; 810 } 811 } else if (tf->type == TF_TYPE_BYPASS) 812 dpp_base->funcs->dpp_set_degamma(dpp_base, 813 IPP_DEGAMMA_MODE_BYPASS); 814 else { 815 /* 816 * if we are here, we did not handle correctly. 817 * fix is required for this use case 818 */ 819 BREAK_TO_DEBUGGER(); 820 dpp_base->funcs->dpp_set_degamma(dpp_base, 821 IPP_DEGAMMA_MODE_BYPASS); 822 } 823 824 return result; 825 } 826 827 static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 828 { 829 struct pipe_ctx *odm_pipe; 830 int opp_cnt = 1; 831 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 832 833 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 834 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 835 opp_cnt++; 836 } 837 838 if (opp_cnt > 1) 839 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 840 pipe_ctx->stream_res.tg, 841 opp_inst, opp_cnt, 842 &pipe_ctx->stream->timing); 843 else 844 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 845 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 846 } 847 848 void dcn20_blank_pixel_data( 849 struct dc *dc, 850 struct pipe_ctx *pipe_ctx, 851 bool blank) 852 { 853 struct tg_color black_color = {0}; 854 struct stream_resource *stream_res = &pipe_ctx->stream_res; 855 struct dc_stream_state *stream = pipe_ctx->stream; 856 enum dc_color_space color_space = stream->output_color_space; 857 enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; 858 struct pipe_ctx *odm_pipe; 859 int odm_cnt = 1; 860 861 int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; 862 int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; 863 864 /* get opp dpg blank color */ 865 color_space_to_black_color(dc, color_space, &black_color); 866 867 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 868 odm_cnt++; 869 870 width = width / odm_cnt; 871 872 if (blank) { 873 if (stream_res->abm) 874 stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); 875 876 if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) 877 test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; 878 } else { 879 test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; 880 } 881 882 stream_res->opp->funcs->opp_set_disp_pattern_generator( 883 stream_res->opp, 884 test_pattern, 885 stream->timing.display_color_depth, 886 &black_color, 887 width, 888 height); 889 890 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 891 odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( 892 odm_pipe->stream_res.opp, 893 dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? 894 CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, 895 stream->timing.display_color_depth, 896 &black_color, 897 width, 898 height); 899 } 900 901 if (!blank) 902 if (stream_res->abm) { 903 stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1); 904 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); 905 } 906 } 907 908 909 static void dcn20_power_on_plane( 910 struct dce_hwseq *hws, 911 struct pipe_ctx *pipe_ctx) 912 { 913 DC_LOGGER_INIT(hws->ctx->logger); 914 if (REG(DC_IP_REQUEST_CNTL)) { 915 REG_SET(DC_IP_REQUEST_CNTL, 0, 916 IP_REQUEST_EN, 1); 917 dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); 918 dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); 919 REG_SET(DC_IP_REQUEST_CNTL, 0, 920 IP_REQUEST_EN, 0); 921 DC_LOG_DEBUG( 922 "Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst); 923 } 924 } 925 926 void dcn20_enable_plane( 927 struct dc *dc, 928 struct pipe_ctx *pipe_ctx, 929 struct dc_state *context) 930 { 931 //if (dc->debug.sanity_checks) { 932 // dcn10_verify_allow_pstate_change_high(dc); 933 //} 934 dcn20_power_on_plane(dc->hwseq, pipe_ctx); 935 936 /* enable DCFCLK current DCHUB */ 937 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); 938 939 /* initialize HUBP on power up */ 940 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); 941 942 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 943 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 944 pipe_ctx->stream_res.opp, 945 true); 946 947 /* TODO: enable/disable in dm as per update type. 948 if (plane_state) { 949 DC_LOG_DC(dc->ctx->logger, 950 "Pipe:%d 0x%x: addr hi:0x%x, " 951 "addr low:0x%x, " 952 "src: %d, %d, %d," 953 " %d; dst: %d, %d, %d, %d;\n", 954 pipe_ctx->pipe_idx, 955 plane_state, 956 plane_state->address.grph.addr.high_part, 957 plane_state->address.grph.addr.low_part, 958 plane_state->src_rect.x, 959 plane_state->src_rect.y, 960 plane_state->src_rect.width, 961 plane_state->src_rect.height, 962 plane_state->dst_rect.x, 963 plane_state->dst_rect.y, 964 plane_state->dst_rect.width, 965 plane_state->dst_rect.height); 966 967 DC_LOG_DC(dc->ctx->logger, 968 "Pipe %d: width, height, x, y format:%d\n" 969 "viewport:%d, %d, %d, %d\n" 970 "recout: %d, %d, %d, %d\n", 971 pipe_ctx->pipe_idx, 972 plane_state->format, 973 pipe_ctx->plane_res.scl_data.viewport.width, 974 pipe_ctx->plane_res.scl_data.viewport.height, 975 pipe_ctx->plane_res.scl_data.viewport.x, 976 pipe_ctx->plane_res.scl_data.viewport.y, 977 pipe_ctx->plane_res.scl_data.recout.width, 978 pipe_ctx->plane_res.scl_data.recout.height, 979 pipe_ctx->plane_res.scl_data.recout.x, 980 pipe_ctx->plane_res.scl_data.recout.y); 981 print_rq_dlg_ttu(dc, pipe_ctx); 982 } 983 */ 984 if (dc->vm_pa_config.valid) { 985 struct vm_system_aperture_param apt; 986 987 apt.sys_default.quad_part = 0; 988 989 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; 990 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; 991 992 // Program system aperture settings 993 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); 994 } 995 996 // if (dc->debug.sanity_checks) { 997 // dcn10_verify_allow_pstate_change_high(dc); 998 // } 999 } 1000 1001 1002 void dcn20_pipe_control_lock_global( 1003 struct dc *dc, 1004 struct pipe_ctx *pipe, 1005 bool lock) 1006 { 1007 if (lock) { 1008 pipe->stream_res.tg->funcs->lock_doublebuffer_enable( 1009 pipe->stream_res.tg); 1010 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1011 } else { 1012 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1013 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1014 CRTC_STATE_VACTIVE); 1015 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1016 CRTC_STATE_VBLANK); 1017 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, 1018 CRTC_STATE_VACTIVE); 1019 pipe->stream_res.tg->funcs->lock_doublebuffer_disable( 1020 pipe->stream_res.tg); 1021 } 1022 } 1023 1024 void dcn20_pipe_control_lock( 1025 struct dc *dc, 1026 struct pipe_ctx *pipe, 1027 bool lock) 1028 { 1029 bool flip_immediate = false; 1030 1031 /* use TG master update lock to lock everything on the TG 1032 * therefore only top pipe need to lock 1033 */ 1034 if (pipe->top_pipe) 1035 return; 1036 1037 if (pipe->plane_state != NULL) 1038 flip_immediate = pipe->plane_state->flip_immediate; 1039 1040 /* In flip immediate and pipe splitting case, we need to use GSL 1041 * for synchronization. Only do setup on locking and on flip type change. 1042 */ 1043 if (lock && pipe->bottom_pipe != NULL) 1044 if ((flip_immediate && pipe->stream_res.gsl_group == 0) || 1045 (!flip_immediate && pipe->stream_res.gsl_group > 0)) 1046 dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate); 1047 1048 if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) { 1049 if (lock) 1050 pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); 1051 else 1052 pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg); 1053 } else { 1054 if (lock) 1055 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1056 else 1057 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1058 } 1059 } 1060 1061 static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) 1062 { 1063 new_pipe->update_flags.raw = 0; 1064 1065 /* Exit on unchanged, unused pipe */ 1066 if (!old_pipe->plane_state && !new_pipe->plane_state) 1067 return; 1068 /* Detect pipe enable/disable */ 1069 if (!old_pipe->plane_state && new_pipe->plane_state) { 1070 new_pipe->update_flags.bits.enable = 1; 1071 new_pipe->update_flags.bits.mpcc = 1; 1072 new_pipe->update_flags.bits.dppclk = 1; 1073 new_pipe->update_flags.bits.hubp_interdependent = 1; 1074 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1075 new_pipe->update_flags.bits.gamut_remap = 1; 1076 new_pipe->update_flags.bits.scaler = 1; 1077 new_pipe->update_flags.bits.viewport = 1; 1078 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1079 new_pipe->update_flags.bits.odm = 1; 1080 new_pipe->update_flags.bits.global_sync = 1; 1081 } 1082 return; 1083 } 1084 if (old_pipe->plane_state && !new_pipe->plane_state) { 1085 new_pipe->update_flags.bits.disable = 1; 1086 return; 1087 } 1088 1089 /* Detect top pipe only changes */ 1090 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1091 /* Detect odm changes */ 1092 if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe 1093 && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) 1094 || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) 1095 || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) 1096 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1097 new_pipe->update_flags.bits.odm = 1; 1098 1099 /* Detect global sync changes */ 1100 if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset 1101 || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start 1102 || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset 1103 || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) 1104 new_pipe->update_flags.bits.global_sync = 1; 1105 } 1106 1107 /* 1108 * Detect opp / tg change, only set on change, not on enable 1109 * Assume mpcc inst = pipe index, if not this code needs to be updated 1110 * since mpcc is what is affected by these. In fact all of our sequence 1111 * makes this assumption at the moment with how hubp reset is matched to 1112 * same index mpcc reset. 1113 */ 1114 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1115 new_pipe->update_flags.bits.opp_changed = 1; 1116 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) 1117 new_pipe->update_flags.bits.tg_changed = 1; 1118 1119 /* Detect mpcc blending changes, only dpp inst and bot matter here */ 1120 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp 1121 || old_pipe->stream_res.opp != new_pipe->stream_res.opp 1122 || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe) 1123 || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe) 1124 || (old_pipe->bottom_pipe && new_pipe->bottom_pipe 1125 && old_pipe->bottom_pipe->plane_res.mpcc_inst 1126 != new_pipe->bottom_pipe->plane_res.mpcc_inst)) 1127 new_pipe->update_flags.bits.mpcc = 1; 1128 1129 /* Detect dppclk change */ 1130 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) 1131 new_pipe->update_flags.bits.dppclk = 1; 1132 1133 /* Check for scl update */ 1134 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) 1135 new_pipe->update_flags.bits.scaler = 1; 1136 /* Check for vp update */ 1137 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) 1138 || memcmp(&old_pipe->plane_res.scl_data.viewport_c, 1139 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) 1140 new_pipe->update_flags.bits.viewport = 1; 1141 1142 /* Detect dlg/ttu/rq updates */ 1143 { 1144 struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; 1145 struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; 1146 struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; 1147 struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; 1148 1149 /* Detect pipe interdependent updates */ 1150 if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || 1151 old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || 1152 old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || 1153 old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || 1154 old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || 1155 old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || 1156 old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || 1157 old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || 1158 old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || 1159 old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || 1160 old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || 1161 old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || 1162 old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || 1163 old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || 1164 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || 1165 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || 1166 old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || 1167 old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { 1168 old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; 1169 old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; 1170 old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; 1171 old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; 1172 old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; 1173 old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; 1174 old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; 1175 old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; 1176 old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; 1177 old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; 1178 old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; 1179 old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; 1180 old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; 1181 old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; 1182 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; 1183 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; 1184 old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; 1185 old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; 1186 new_pipe->update_flags.bits.hubp_interdependent = 1; 1187 } 1188 /* Detect any other updates to ttu/rq/dlg */ 1189 if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || 1190 memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || 1191 memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) 1192 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1193 } 1194 } 1195 1196 static void dcn20_update_dchubp_dpp( 1197 struct dc *dc, 1198 struct pipe_ctx *pipe_ctx, 1199 struct dc_state *context) 1200 { 1201 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1202 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1203 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1204 1205 if (pipe_ctx->update_flags.bits.dppclk) 1206 dpp->funcs->dpp_dppclk_control(dpp, false, true); 1207 1208 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1209 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. 1210 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG 1211 */ 1212 if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { 1213 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); 1214 1215 hubp->funcs->hubp_setup( 1216 hubp, 1217 &pipe_ctx->dlg_regs, 1218 &pipe_ctx->ttu_regs, 1219 &pipe_ctx->rq_regs, 1220 &pipe_ctx->pipe_dlg_param); 1221 } 1222 if (pipe_ctx->update_flags.bits.hubp_interdependent) 1223 hubp->funcs->hubp_setup_interdependent( 1224 hubp, 1225 &pipe_ctx->dlg_regs, 1226 &pipe_ctx->ttu_regs); 1227 1228 if (pipe_ctx->update_flags.bits.enable || 1229 plane_state->update_flags.bits.bpp_change || 1230 plane_state->update_flags.bits.input_csc_change || 1231 plane_state->update_flags.bits.color_space_change || 1232 plane_state->update_flags.bits.coeff_reduction_change) { 1233 struct dc_bias_and_scale bns_params = {0}; 1234 1235 // program the input csc 1236 dpp->funcs->dpp_setup(dpp, 1237 plane_state->format, 1238 EXPANSION_MODE_ZERO, 1239 plane_state->input_csc_color_matrix, 1240 plane_state->color_space, 1241 NULL); 1242 1243 if (dpp->funcs->dpp_program_bias_and_scale) { 1244 //TODO :for CNVC set scale and bias registers if necessary 1245 dcn10_build_prescale_params(&bns_params, plane_state); 1246 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 1247 } 1248 } 1249 1250 if (pipe_ctx->update_flags.bits.mpcc 1251 || plane_state->update_flags.bits.global_alpha_change 1252 || plane_state->update_flags.bits.per_pixel_alpha_change) { 1253 /* Need mpcc to be idle if changing opp */ 1254 if (pipe_ctx->update_flags.bits.opp_changed) { 1255 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; 1256 int mpcc_inst; 1257 1258 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { 1259 if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) 1260 continue; 1261 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); 1262 old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; 1263 } 1264 } 1265 dc->hwss.update_mpcc(dc, pipe_ctx); 1266 } 1267 1268 if (pipe_ctx->update_flags.bits.scaler || 1269 plane_state->update_flags.bits.scaling_change || 1270 plane_state->update_flags.bits.position_change || 1271 plane_state->update_flags.bits.per_pixel_alpha_change || 1272 pipe_ctx->stream->update_flags.bits.scaling) { 1273 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; 1274 ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); 1275 /* scaler configuration */ 1276 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( 1277 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); 1278 } 1279 1280 if (pipe_ctx->update_flags.bits.viewport || 1281 (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || 1282 (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) 1283 hubp->funcs->mem_program_viewport( 1284 hubp, 1285 &pipe_ctx->plane_res.scl_data.viewport, 1286 &pipe_ctx->plane_res.scl_data.viewport_c); 1287 1288 /* Any updates are handled in dc interface, just need to apply existing for plane enable */ 1289 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) 1290 && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { 1291 dc->hwss.set_cursor_position(pipe_ctx); 1292 dc->hwss.set_cursor_attribute(pipe_ctx); 1293 1294 if (dc->hwss.set_cursor_sdr_white_level) 1295 dc->hwss.set_cursor_sdr_white_level(pipe_ctx); 1296 } 1297 1298 /* Any updates are handled in dc interface, just need 1299 * to apply existing for plane enable / opp change */ 1300 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed 1301 || pipe_ctx->stream->update_flags.bits.gamut_remap 1302 || pipe_ctx->stream->update_flags.bits.out_csc) { 1303 /* dpp/cm gamut remap*/ 1304 dc->hwss.program_gamut_remap(pipe_ctx); 1305 1306 /*call the dcn2 method which uses mpc csc*/ 1307 dc->hwss.program_output_csc(dc, 1308 pipe_ctx, 1309 pipe_ctx->stream->output_color_space, 1310 pipe_ctx->stream->csc_color_matrix.matrix, 1311 hubp->opp_id); 1312 } 1313 1314 if (pipe_ctx->update_flags.bits.enable || 1315 pipe_ctx->update_flags.bits.opp_changed || 1316 plane_state->update_flags.bits.pixel_format_change || 1317 plane_state->update_flags.bits.horizontal_mirror_change || 1318 plane_state->update_flags.bits.rotation_change || 1319 plane_state->update_flags.bits.swizzle_change || 1320 plane_state->update_flags.bits.dcc_change || 1321 plane_state->update_flags.bits.bpp_change || 1322 plane_state->update_flags.bits.scaling_change || 1323 plane_state->update_flags.bits.plane_size_change) { 1324 struct plane_size size = plane_state->plane_size; 1325 1326 size.surface_size = pipe_ctx->plane_res.scl_data.viewport; 1327 hubp->funcs->hubp_program_surface_config( 1328 hubp, 1329 plane_state->format, 1330 &plane_state->tiling_info, 1331 &size, 1332 plane_state->rotation, 1333 &plane_state->dcc, 1334 plane_state->horizontal_mirror, 1335 0); 1336 hubp->power_gated = false; 1337 } 1338 1339 if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) 1340 dc->hwss.update_plane_addr(dc, pipe_ctx); 1341 1342 if (pipe_ctx->update_flags.bits.enable) 1343 hubp->funcs->set_blank(hubp, false); 1344 } 1345 1346 1347 static void dcn20_program_pipe( 1348 struct dc *dc, 1349 struct pipe_ctx *pipe_ctx, 1350 struct dc_state *context) 1351 { 1352 /* Only need to unblank on top pipe */ 1353 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) 1354 && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) 1355 dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); 1356 1357 if (pipe_ctx->update_flags.bits.global_sync) { 1358 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1359 pipe_ctx->stream_res.tg, 1360 pipe_ctx->pipe_dlg_param.vready_offset, 1361 pipe_ctx->pipe_dlg_param.vstartup_start, 1362 pipe_ctx->pipe_dlg_param.vupdate_offset, 1363 pipe_ctx->pipe_dlg_param.vupdate_width); 1364 1365 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1366 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1367 1368 if (dc->hwss.setup_vupdate_interrupt) 1369 dc->hwss.setup_vupdate_interrupt(pipe_ctx); 1370 } 1371 1372 if (pipe_ctx->update_flags.bits.odm) 1373 dc->hwss.update_odm(dc, context, pipe_ctx); 1374 1375 if (pipe_ctx->update_flags.bits.enable) 1376 dcn20_enable_plane(dc, pipe_ctx, context); 1377 1378 if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) 1379 dcn20_update_dchubp_dpp(dc, pipe_ctx, context); 1380 1381 if (pipe_ctx->update_flags.bits.enable 1382 || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) 1383 set_hdr_multiplier(pipe_ctx); 1384 1385 if (pipe_ctx->update_flags.bits.enable || 1386 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1387 pipe_ctx->plane_state->update_flags.bits.gamma_change) 1388 dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); 1389 1390 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 1391 * only do gamma programming for powering on, internal memcmp to avoid 1392 * updating on slave planes 1393 */ 1394 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) 1395 dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); 1396 1397 /* If the pipe has been enabled or has a different opp, we 1398 * should reprogram the fmt. This deals with cases where 1399 * interation between mpc and odm combine on different streams 1400 * causes a different pipe to be chosen to odm combine with. 1401 */ 1402 if (pipe_ctx->update_flags.bits.enable 1403 || pipe_ctx->update_flags.bits.opp_changed) { 1404 1405 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 1406 pipe_ctx->stream_res.opp, 1407 COLOR_SPACE_YCBCR601, 1408 pipe_ctx->stream->timing.display_color_depth, 1409 pipe_ctx->stream->signal); 1410 1411 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 1412 pipe_ctx->stream_res.opp, 1413 &pipe_ctx->stream->bit_depth_params, 1414 &pipe_ctx->stream->clamping); 1415 } 1416 } 1417 1418 static bool does_pipe_need_lock(struct pipe_ctx *pipe) 1419 { 1420 if ((pipe->plane_state && pipe->plane_state->update_flags.raw) 1421 || pipe->update_flags.raw) 1422 return true; 1423 if (pipe->bottom_pipe) 1424 return does_pipe_need_lock(pipe->bottom_pipe); 1425 1426 return false; 1427 } 1428 1429 static void dcn20_program_front_end_for_ctx( 1430 struct dc *dc, 1431 struct dc_state *context) 1432 { 1433 const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; 1434 int i; 1435 bool pipe_locked[MAX_PIPES] = {false}; 1436 DC_LOGGER_INIT(dc->ctx->logger); 1437 1438 /* Carry over GSL groups in case the context is changing. */ 1439 for (i = 0; i < dc->res_pool->pipe_count; i++) 1440 if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) 1441 context->res_ctx.pipe_ctx[i].stream_res.gsl_group = 1442 dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; 1443 1444 /* Set pipe update flags and lock pipes */ 1445 for (i = 0; i < dc->res_pool->pipe_count; i++) 1446 dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], 1447 &context->res_ctx.pipe_ctx[i]); 1448 for (i = 0; i < dc->res_pool->pipe_count; i++) 1449 if (!context->res_ctx.pipe_ctx[i].top_pipe && 1450 does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { 1451 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1452 1453 if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) 1454 dc->hwss.pipe_control_lock(dc, pipe_ctx, true); 1455 if (!pipe_ctx->update_flags.bits.enable) 1456 dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); 1457 pipe_locked[i] = true; 1458 } 1459 1460 /* OTG blank before disabling all front ends */ 1461 for (i = 0; i < dc->res_pool->pipe_count; i++) 1462 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1463 && !context->res_ctx.pipe_ctx[i].top_pipe 1464 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe 1465 && context->res_ctx.pipe_ctx[i].stream) 1466 dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 1467 1468 /* Disconnect mpcc */ 1469 for (i = 0; i < dc->res_pool->pipe_count; i++) 1470 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1471 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { 1472 dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1473 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); 1474 } 1475 1476 /* 1477 * Program all updated pipes, order matters for mpcc setup. Start with 1478 * top pipe and program all pipes that follow in order 1479 */ 1480 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1481 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1482 1483 if (pipe->plane_state && !pipe->top_pipe) { 1484 while (pipe) { 1485 dcn20_program_pipe(dc, pipe, context); 1486 pipe = pipe->bottom_pipe; 1487 } 1488 /* Program secondary blending tree and writeback pipes */ 1489 pipe = &context->res_ctx.pipe_ctx[i]; 1490 if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 1491 && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) 1492 && dc->hwss.program_all_writeback_pipes_in_tree) 1493 dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); 1494 } 1495 } 1496 1497 /* Unlock all locked pipes */ 1498 for (i = 0; i < dc->res_pool->pipe_count; i++) 1499 if (pipe_locked[i]) { 1500 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1501 1502 if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) 1503 dc->hwss.pipe_control_lock(dc, pipe_ctx, false); 1504 if (!pipe_ctx->update_flags.bits.enable) 1505 dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); 1506 } 1507 1508 for (i = 0; i < dc->res_pool->pipe_count; i++) 1509 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 1510 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1511 1512 /* 1513 * If we are enabling a pipe, we need to wait for pending clear as this is a critical 1514 * part of the enable operation otherwise, DM may request an immediate flip which 1515 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which 1516 * is unsupported on DCN. 1517 */ 1518 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1519 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1520 1521 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { 1522 struct hubp *hubp = pipe->plane_res.hubp; 1523 int j = 0; 1524 1525 for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS 1526 && hubp->funcs->hubp_is_flip_pending(hubp); j++) 1527 msleep(1); 1528 } 1529 } 1530 1531 /* WA to apply WM setting*/ 1532 if (dc->hwseq->wa.DEGVIDCN21) 1533 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); 1534 } 1535 1536 1537 void dcn20_prepare_bandwidth( 1538 struct dc *dc, 1539 struct dc_state *context) 1540 { 1541 struct hubbub *hubbub = dc->res_pool->hubbub; 1542 1543 dc->clk_mgr->funcs->update_clocks( 1544 dc->clk_mgr, 1545 context, 1546 false); 1547 1548 /* program dchubbub watermarks */ 1549 hubbub->funcs->program_watermarks(hubbub, 1550 &context->bw_ctx.bw.dcn.watermarks, 1551 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1552 false); 1553 } 1554 1555 void dcn20_optimize_bandwidth( 1556 struct dc *dc, 1557 struct dc_state *context) 1558 { 1559 struct hubbub *hubbub = dc->res_pool->hubbub; 1560 1561 /* program dchubbub watermarks */ 1562 hubbub->funcs->program_watermarks(hubbub, 1563 &context->bw_ctx.bw.dcn.watermarks, 1564 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1565 true); 1566 1567 dc->clk_mgr->funcs->update_clocks( 1568 dc->clk_mgr, 1569 context, 1570 true); 1571 } 1572 1573 bool dcn20_update_bandwidth( 1574 struct dc *dc, 1575 struct dc_state *context) 1576 { 1577 int i; 1578 1579 /* recalculate DML parameters */ 1580 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) 1581 return false; 1582 1583 /* apply updated bandwidth parameters */ 1584 dc->hwss.prepare_bandwidth(dc, context); 1585 1586 /* update hubp configs for all pipes */ 1587 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1588 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1589 1590 if (pipe_ctx->plane_state == NULL) 1591 continue; 1592 1593 if (pipe_ctx->top_pipe == NULL) { 1594 bool blank = !is_pipe_tree_visible(pipe_ctx); 1595 1596 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1597 pipe_ctx->stream_res.tg, 1598 pipe_ctx->pipe_dlg_param.vready_offset, 1599 pipe_ctx->pipe_dlg_param.vstartup_start, 1600 pipe_ctx->pipe_dlg_param.vupdate_offset, 1601 pipe_ctx->pipe_dlg_param.vupdate_width); 1602 1603 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1604 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1605 1606 if (pipe_ctx->prev_odm_pipe == NULL) 1607 dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); 1608 1609 if (dc->hwss.setup_vupdate_interrupt) 1610 dc->hwss.setup_vupdate_interrupt(pipe_ctx); 1611 } 1612 1613 pipe_ctx->plane_res.hubp->funcs->hubp_setup( 1614 pipe_ctx->plane_res.hubp, 1615 &pipe_ctx->dlg_regs, 1616 &pipe_ctx->ttu_regs, 1617 &pipe_ctx->rq_regs, 1618 &pipe_ctx->pipe_dlg_param); 1619 } 1620 1621 return true; 1622 } 1623 1624 static void dcn20_enable_writeback( 1625 struct dc *dc, 1626 const struct dc_stream_status *stream_status, 1627 struct dc_writeback_info *wb_info, 1628 struct dc_state *context) 1629 { 1630 struct dwbc *dwb; 1631 struct mcif_wb *mcif_wb; 1632 struct timing_generator *optc; 1633 1634 ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES); 1635 ASSERT(wb_info->wb_enabled); 1636 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 1637 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 1638 1639 /* set the OPTC source mux */ 1640 ASSERT(stream_status->primary_otg_inst < MAX_PIPES); 1641 optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst]; 1642 optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); 1643 /* set MCIF_WB buffer and arbitration configuration */ 1644 mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); 1645 mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); 1646 /* Enable MCIF_WB */ 1647 mcif_wb->funcs->enable_mcif(mcif_wb); 1648 /* Enable DWB */ 1649 dwb->funcs->enable(dwb, &wb_info->dwb_params); 1650 /* TODO: add sequence to enable/disable warmup */ 1651 } 1652 1653 void dcn20_disable_writeback( 1654 struct dc *dc, 1655 unsigned int dwb_pipe_inst) 1656 { 1657 struct dwbc *dwb; 1658 struct mcif_wb *mcif_wb; 1659 1660 ASSERT(dwb_pipe_inst < MAX_DWB_PIPES); 1661 dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 1662 mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; 1663 1664 dwb->funcs->disable(dwb); 1665 mcif_wb->funcs->disable_mcif(mcif_wb); 1666 } 1667 1668 bool dcn20_hwss_wait_for_blank_complete( 1669 struct output_pixel_processor *opp) 1670 { 1671 int counter; 1672 1673 for (counter = 0; counter < 1000; counter++) { 1674 if (opp->funcs->dpg_is_blanked(opp)) 1675 break; 1676 1677 udelay(100); 1678 } 1679 1680 if (counter == 1000) { 1681 dm_error("DC: failed to blank crtc!\n"); 1682 return false; 1683 } 1684 1685 return true; 1686 } 1687 1688 bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) 1689 { 1690 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1691 1692 if (!hubp) 1693 return false; 1694 return hubp->funcs->dmdata_status_done(hubp); 1695 } 1696 1697 static void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 1698 { 1699 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1700 struct dce_hwseq *hws = dc->hwseq; 1701 1702 if (pipe_ctx->stream_res.dsc) { 1703 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1704 1705 dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); 1706 while (odm_pipe) { 1707 dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); 1708 odm_pipe = odm_pipe->next_odm_pipe; 1709 } 1710 } 1711 #endif 1712 } 1713 1714 static void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 1715 { 1716 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1717 struct dce_hwseq *hws = dc->hwseq; 1718 1719 if (pipe_ctx->stream_res.dsc) { 1720 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1721 1722 dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); 1723 while (odm_pipe) { 1724 dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); 1725 odm_pipe = odm_pipe->next_odm_pipe; 1726 } 1727 } 1728 #endif 1729 } 1730 1731 void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) 1732 { 1733 struct dc_dmdata_attributes attr = { 0 }; 1734 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1735 1736 attr.dmdata_mode = DMDATA_HW_MODE; 1737 attr.dmdata_size = 1738 dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36; 1739 attr.address.quad_part = 1740 pipe_ctx->stream->dmdata_address.quad_part; 1741 attr.dmdata_dl_delta = 0; 1742 attr.dmdata_qos_mode = 0; 1743 attr.dmdata_qos_level = 0; 1744 attr.dmdata_repeat = 1; /* always repeat */ 1745 attr.dmdata_updated = 1; 1746 attr.dmdata_sw_data = NULL; 1747 1748 hubp->funcs->dmdata_set_attributes(hubp, &attr); 1749 } 1750 1751 void dcn20_disable_stream(struct pipe_ctx *pipe_ctx) 1752 { 1753 dce110_disable_stream(pipe_ctx); 1754 } 1755 1756 static void dcn20_init_vm_ctx( 1757 struct dce_hwseq *hws, 1758 struct dc *dc, 1759 struct dc_virtual_addr_space_config *va_config, 1760 int vmid) 1761 { 1762 struct dcn_hubbub_virt_addr_config config; 1763 1764 if (vmid == 0) { 1765 ASSERT(0); /* VMID cannot be 0 for vm context */ 1766 return; 1767 } 1768 1769 config.page_table_start_addr = va_config->page_table_start_addr; 1770 config.page_table_end_addr = va_config->page_table_end_addr; 1771 config.page_table_block_size = va_config->page_table_block_size_in_bytes; 1772 config.page_table_depth = va_config->page_table_depth; 1773 config.page_table_base_addr = va_config->page_table_base_addr; 1774 1775 dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); 1776 } 1777 1778 static int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 1779 { 1780 struct dcn_hubbub_phys_addr_config config; 1781 1782 config.system_aperture.fb_top = pa_config->system_aperture.fb_top; 1783 config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; 1784 config.system_aperture.fb_base = pa_config->system_aperture.fb_base; 1785 config.system_aperture.agp_top = pa_config->system_aperture.agp_top; 1786 config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; 1787 config.system_aperture.agp_base = pa_config->system_aperture.agp_base; 1788 config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; 1789 config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; 1790 config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; 1791 config.page_table_default_page_addr = pa_config->page_table_default_page_addr; 1792 1793 return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); 1794 } 1795 1796 static bool patch_address_for_sbs_tb_stereo( 1797 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr) 1798 { 1799 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1800 bool sec_split = pipe_ctx->top_pipe && 1801 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; 1802 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && 1803 (pipe_ctx->stream->timing.timing_3d_format == 1804 TIMING_3D_FORMAT_SIDE_BY_SIDE || 1805 pipe_ctx->stream->timing.timing_3d_format == 1806 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { 1807 *addr = plane_state->address.grph_stereo.left_addr; 1808 plane_state->address.grph_stereo.left_addr = 1809 plane_state->address.grph_stereo.right_addr; 1810 return true; 1811 } 1812 1813 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && 1814 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { 1815 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; 1816 plane_state->address.grph_stereo.right_addr = 1817 plane_state->address.grph_stereo.left_addr; 1818 } 1819 return false; 1820 } 1821 1822 1823 static void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) 1824 { 1825 bool addr_patched = false; 1826 PHYSICAL_ADDRESS_LOC addr; 1827 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1828 1829 if (plane_state == NULL) 1830 return; 1831 1832 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); 1833 1834 // Call Helper to track VMID use 1835 vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst); 1836 1837 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 1838 pipe_ctx->plane_res.hubp, 1839 &plane_state->address, 1840 plane_state->flip_immediate); 1841 1842 plane_state->status.requested_address = plane_state->address; 1843 1844 if (plane_state->flip_immediate) 1845 plane_state->status.current_address = plane_state->address; 1846 1847 if (addr_patched) 1848 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; 1849 } 1850 1851 void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, 1852 struct dc_link_settings *link_settings) 1853 { 1854 struct encoder_unblank_param params = { { 0 } }; 1855 struct dc_stream_state *stream = pipe_ctx->stream; 1856 struct dc_link *link = stream->link; 1857 struct pipe_ctx *odm_pipe; 1858 1859 params.opp_cnt = 1; 1860 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1861 params.opp_cnt++; 1862 } 1863 /* only 3 items below are used by unblank */ 1864 params.timing = pipe_ctx->stream->timing; 1865 1866 params.link_settings.link_rate = link_settings->link_rate; 1867 1868 if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 1869 if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) 1870 params.timing.pix_clk_100hz /= 2; 1871 pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( 1872 pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); 1873 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); 1874 } 1875 1876 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1877 link->dc->hwss.edp_backlight_control(link, true); 1878 } 1879 } 1880 1881 void dcn20_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) 1882 { 1883 struct timing_generator *tg = pipe_ctx->stream_res.tg; 1884 int start_line = get_vupdate_offset_from_vsync(pipe_ctx); 1885 1886 if (start_line < 0) 1887 start_line = 0; 1888 1889 if (tg->funcs->setup_vertical_interrupt2) 1890 tg->funcs->setup_vertical_interrupt2(tg, start_line); 1891 } 1892 1893 static void dcn20_reset_back_end_for_pipe( 1894 struct dc *dc, 1895 struct pipe_ctx *pipe_ctx, 1896 struct dc_state *context) 1897 { 1898 int i; 1899 DC_LOGGER_INIT(dc->ctx->logger); 1900 if (pipe_ctx->stream_res.stream_enc == NULL) { 1901 pipe_ctx->stream = NULL; 1902 return; 1903 } 1904 1905 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 1906 /* DPMS may already disable */ 1907 if (!pipe_ctx->stream->dpms_off) 1908 core_link_disable_stream(pipe_ctx); 1909 else if (pipe_ctx->stream_res.audio) 1910 dc->hwss.disable_audio_stream(pipe_ctx); 1911 1912 /* free acquired resources */ 1913 if (pipe_ctx->stream_res.audio) { 1914 /*disable az_endpoint*/ 1915 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 1916 1917 /*free audio*/ 1918 if (dc->caps.dynamic_audio == true) { 1919 /*we have to dynamic arbitrate the audio endpoints*/ 1920 /*we free the resource, need reset is_audio_acquired*/ 1921 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 1922 pipe_ctx->stream_res.audio, false); 1923 pipe_ctx->stream_res.audio = NULL; 1924 } 1925 } 1926 } 1927 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1928 else if (pipe_ctx->stream_res.dsc) { 1929 dp_set_dsc_enable(pipe_ctx, false); 1930 } 1931 #endif 1932 1933 /* by upper caller loop, parent pipe: pipe0, will be reset last. 1934 * back end share by all pipes and will be disable only when disable 1935 * parent pipe. 1936 */ 1937 if (pipe_ctx->top_pipe == NULL) { 1938 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 1939 1940 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 1941 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) 1942 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 1943 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1944 1945 if (pipe_ctx->stream_res.tg->funcs->set_drr) 1946 pipe_ctx->stream_res.tg->funcs->set_drr( 1947 pipe_ctx->stream_res.tg, NULL); 1948 } 1949 1950 for (i = 0; i < dc->res_pool->pipe_count; i++) 1951 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) 1952 break; 1953 1954 if (i == dc->res_pool->pipe_count) 1955 return; 1956 1957 pipe_ctx->stream = NULL; 1958 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 1959 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 1960 } 1961 1962 static void dcn20_reset_hw_ctx_wrap( 1963 struct dc *dc, 1964 struct dc_state *context) 1965 { 1966 int i; 1967 1968 /* Reset Back End*/ 1969 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 1970 struct pipe_ctx *pipe_ctx_old = 1971 &dc->current_state->res_ctx.pipe_ctx[i]; 1972 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1973 1974 if (!pipe_ctx_old->stream) 1975 continue; 1976 1977 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 1978 continue; 1979 1980 if (!pipe_ctx->stream || 1981 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 1982 struct clock_source *old_clk = pipe_ctx_old->clock_source; 1983 1984 dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 1985 if (dc->hwss.enable_stream_gating) 1986 dc->hwss.enable_stream_gating(dc, pipe_ctx); 1987 if (old_clk) 1988 old_clk->funcs->cs_power_down(old_clk); 1989 } 1990 } 1991 } 1992 1993 void dcn20_get_mpctree_visual_confirm_color( 1994 struct pipe_ctx *pipe_ctx, 1995 struct tg_color *color) 1996 { 1997 const struct tg_color pipe_colors[6] = { 1998 {MAX_TG_COLOR_VALUE, 0, 0}, // red 1999 {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow 2000 {0, MAX_TG_COLOR_VALUE, 0}, // blue 2001 {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple 2002 {0, 0, MAX_TG_COLOR_VALUE}, // green 2003 {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange 2004 }; 2005 2006 struct pipe_ctx *top_pipe = pipe_ctx; 2007 2008 while (top_pipe->top_pipe) { 2009 top_pipe = top_pipe->top_pipe; 2010 } 2011 2012 *color = pipe_colors[top_pipe->pipe_idx]; 2013 } 2014 2015 static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) 2016 { 2017 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2018 struct mpcc_blnd_cfg blnd_cfg = { {0} }; 2019 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; 2020 int mpcc_id; 2021 struct mpcc *new_mpcc; 2022 struct mpc *mpc = dc->res_pool->mpc; 2023 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); 2024 2025 // input to MPCC is always RGB, by default leave black_color at 0 2026 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { 2027 dcn10_get_hdr_visual_confirm_color( 2028 pipe_ctx, &blnd_cfg.black_color); 2029 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { 2030 dcn10_get_surface_visual_confirm_color( 2031 pipe_ctx, &blnd_cfg.black_color); 2032 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { 2033 dcn20_get_mpctree_visual_confirm_color( 2034 pipe_ctx, &blnd_cfg.black_color); 2035 } 2036 2037 if (per_pixel_alpha) 2038 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; 2039 else 2040 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; 2041 2042 blnd_cfg.overlap_only = false; 2043 blnd_cfg.global_gain = 0xff; 2044 2045 if (pipe_ctx->plane_state->global_alpha) 2046 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; 2047 else 2048 blnd_cfg.global_alpha = 0xff; 2049 2050 blnd_cfg.background_color_bpc = 4; 2051 blnd_cfg.bottom_gain_mode = 0; 2052 blnd_cfg.top_gain = 0x1f000; 2053 blnd_cfg.bottom_inside_gain = 0x1f000; 2054 blnd_cfg.bottom_outside_gain = 0x1f000; 2055 blnd_cfg.pre_multiplied_alpha = per_pixel_alpha; 2056 2057 /* 2058 * TODO: remove hack 2059 * Note: currently there is a bug in init_hw such that 2060 * on resume from hibernate, BIOS sets up MPCC0, and 2061 * we do mpcc_remove but the mpcc cannot go to idle 2062 * after remove. This cause us to pick mpcc1 here, 2063 * which causes a pstate hang for yet unknown reason. 2064 */ 2065 mpcc_id = hubp->inst; 2066 2067 /* If there is no full update, don't need to touch MPC tree*/ 2068 if (!pipe_ctx->plane_state->update_flags.bits.full_update) { 2069 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); 2070 return; 2071 } 2072 2073 /* check if this MPCC is already being used */ 2074 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); 2075 /* remove MPCC if being used */ 2076 if (new_mpcc != NULL) 2077 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc); 2078 else 2079 if (dc->debug.sanity_checks) 2080 mpc->funcs->assert_mpcc_idle_before_connect( 2081 dc->res_pool->mpc, mpcc_id); 2082 2083 /* Call MPC to insert new plane */ 2084 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, 2085 mpc_tree_params, 2086 &blnd_cfg, 2087 NULL, 2088 NULL, 2089 hubp->inst, 2090 mpcc_id); 2091 2092 ASSERT(new_mpcc != NULL); 2093 hubp->opp_id = pipe_ctx->stream_res.opp->inst; 2094 hubp->mpcc_id = mpcc_id; 2095 } 2096 2097 static int find_free_gsl_group(const struct dc *dc) 2098 { 2099 if (dc->res_pool->gsl_groups.gsl_0 == 0) 2100 return 1; 2101 if (dc->res_pool->gsl_groups.gsl_1 == 0) 2102 return 2; 2103 if (dc->res_pool->gsl_groups.gsl_2 == 0) 2104 return 3; 2105 2106 return 0; 2107 } 2108 2109 /* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) 2110 * This is only used to lock pipes in pipe splitting case with immediate flip 2111 * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, 2112 * so we get tearing with freesync since we cannot flip multiple pipes 2113 * atomically. 2114 * We use GSL for this: 2115 * - immediate flip: find first available GSL group if not already assigned 2116 * program gsl with that group, set current OTG as master 2117 * and always us 0x4 = AND of flip_ready from all pipes 2118 * - vsync flip: disable GSL if used 2119 * 2120 * Groups in stream_res are stored as +1 from HW registers, i.e. 2121 * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 2122 * Using a magic value like -1 would require tracking all inits/resets 2123 */ 2124 void dcn20_setup_gsl_group_as_lock( 2125 const struct dc *dc, 2126 struct pipe_ctx *pipe_ctx, 2127 bool enable) 2128 { 2129 struct gsl_params gsl; 2130 int group_idx; 2131 2132 memset(&gsl, 0, sizeof(struct gsl_params)); 2133 2134 if (enable) { 2135 /* return if group already assigned since GSL was set up 2136 * for vsync flip, we would unassign so it can't be "left over" 2137 */ 2138 if (pipe_ctx->stream_res.gsl_group > 0) 2139 return; 2140 2141 group_idx = find_free_gsl_group(dc); 2142 ASSERT(group_idx != 0); 2143 pipe_ctx->stream_res.gsl_group = group_idx; 2144 2145 /* set gsl group reg field and mark resource used */ 2146 switch (group_idx) { 2147 case 1: 2148 gsl.gsl0_en = 1; 2149 dc->res_pool->gsl_groups.gsl_0 = 1; 2150 break; 2151 case 2: 2152 gsl.gsl1_en = 1; 2153 dc->res_pool->gsl_groups.gsl_1 = 1; 2154 break; 2155 case 3: 2156 gsl.gsl2_en = 1; 2157 dc->res_pool->gsl_groups.gsl_2 = 1; 2158 break; 2159 default: 2160 BREAK_TO_DEBUGGER(); 2161 return; // invalid case 2162 } 2163 gsl.gsl_master_en = 1; 2164 } else { 2165 group_idx = pipe_ctx->stream_res.gsl_group; 2166 if (group_idx == 0) 2167 return; // if not in use, just return 2168 2169 pipe_ctx->stream_res.gsl_group = 0; 2170 2171 /* unset gsl group reg field and mark resource free */ 2172 switch (group_idx) { 2173 case 1: 2174 gsl.gsl0_en = 0; 2175 dc->res_pool->gsl_groups.gsl_0 = 0; 2176 break; 2177 case 2: 2178 gsl.gsl1_en = 0; 2179 dc->res_pool->gsl_groups.gsl_1 = 0; 2180 break; 2181 case 3: 2182 gsl.gsl2_en = 0; 2183 dc->res_pool->gsl_groups.gsl_2 = 0; 2184 break; 2185 default: 2186 BREAK_TO_DEBUGGER(); 2187 return; 2188 } 2189 gsl.gsl_master_en = 0; 2190 } 2191 2192 /* at this point we want to program whether it's to enable or disable */ 2193 if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && 2194 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { 2195 pipe_ctx->stream_res.tg->funcs->set_gsl( 2196 pipe_ctx->stream_res.tg, 2197 &gsl); 2198 2199 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( 2200 pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); 2201 } else 2202 BREAK_TO_DEBUGGER(); 2203 } 2204 2205 static void dcn20_set_flip_control_gsl( 2206 struct pipe_ctx *pipe_ctx, 2207 bool flip_immediate) 2208 { 2209 if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) 2210 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( 2211 pipe_ctx->plane_res.hubp, flip_immediate); 2212 2213 } 2214 2215 static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) 2216 { 2217 enum dc_lane_count lane_count = 2218 pipe_ctx->stream->link->cur_link_settings.lane_count; 2219 2220 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 2221 struct dc_link *link = pipe_ctx->stream->link; 2222 2223 uint32_t active_total_with_borders; 2224 uint32_t early_control = 0; 2225 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2226 2227 /* For MST, there are multiply stream go to only one link. 2228 * connect DIG back_end to front_end while enable_stream and 2229 * disconnect them during disable_stream 2230 * BY this, it is logic clean to separate stream and link 2231 */ 2232 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, 2233 pipe_ctx->stream_res.stream_enc->id, true); 2234 2235 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { 2236 if (link->dc->hwss.program_dmdata_engine) 2237 link->dc->hwss.program_dmdata_engine(pipe_ctx); 2238 } 2239 2240 link->dc->hwss.update_info_frame(pipe_ctx); 2241 2242 /* enable early control to avoid corruption on DP monitor*/ 2243 active_total_with_borders = 2244 timing->h_addressable 2245 + timing->h_border_left 2246 + timing->h_border_right; 2247 2248 if (lane_count != 0) 2249 early_control = active_total_with_borders % lane_count; 2250 2251 if (early_control == 0) 2252 early_control = lane_count; 2253 2254 tg->funcs->set_early_control(tg, early_control); 2255 2256 /* enable audio only within mode set */ 2257 if (pipe_ctx->stream_res.audio != NULL) { 2258 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2259 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 2260 } 2261 } 2262 2263 static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) 2264 { 2265 struct dc_stream_state *stream = pipe_ctx->stream; 2266 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2267 bool enable = false; 2268 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 2269 enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) 2270 ? dmdata_dp 2271 : dmdata_hdmi; 2272 2273 /* if using dynamic meta, don't set up generic infopackets */ 2274 if (pipe_ctx->stream->dmdata_address.quad_part != 0) { 2275 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; 2276 enable = true; 2277 } 2278 2279 if (!hubp) 2280 return; 2281 2282 if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) 2283 return; 2284 2285 stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, 2286 hubp->inst, mode); 2287 } 2288 2289 static void dcn20_fpga_init_hw(struct dc *dc) 2290 { 2291 int i, j; 2292 struct dce_hwseq *hws = dc->hwseq; 2293 struct resource_pool *res_pool = dc->res_pool; 2294 struct dc_state *context = dc->current_state; 2295 2296 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 2297 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 2298 2299 // Initialize the dccg 2300 if (res_pool->dccg->funcs->dccg_init) 2301 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 2302 2303 //Enable ability to power gate / don't force power on permanently 2304 dc->hwss.enable_power_gating_plane(hws, true); 2305 2306 // Specific to FPGA dccg and registers 2307 REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); 2308 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 2309 2310 dcn20_dccg_init(hws); 2311 2312 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); 2313 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 2314 REG_WRITE(REFCLK_CNTL, 0); 2315 // 2316 2317 2318 /* Blank pixel data with OPP DPG */ 2319 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2320 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2321 2322 if (tg->funcs->is_tg_enabled(tg)) 2323 dcn20_init_blank(dc, tg); 2324 } 2325 2326 for (i = 0; i < res_pool->timing_generator_count; i++) { 2327 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2328 2329 if (tg->funcs->is_tg_enabled(tg)) 2330 tg->funcs->lock(tg); 2331 } 2332 2333 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2334 struct dpp *dpp = res_pool->dpps[i]; 2335 2336 dpp->funcs->dpp_reset(dpp); 2337 } 2338 2339 /* Reset all MPCC muxes */ 2340 res_pool->mpc->funcs->mpc_init(res_pool->mpc); 2341 2342 /* initialize OPP mpc_tree parameter */ 2343 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 2344 res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst; 2345 res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 2346 for (j = 0; j < MAX_PIPES; j++) 2347 res_pool->opps[i]->mpcc_disconnect_pending[j] = false; 2348 } 2349 2350 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2351 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2352 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2353 struct hubp *hubp = dc->res_pool->hubps[i]; 2354 struct dpp *dpp = dc->res_pool->dpps[i]; 2355 2356 pipe_ctx->stream_res.tg = tg; 2357 pipe_ctx->pipe_idx = i; 2358 2359 pipe_ctx->plane_res.hubp = hubp; 2360 pipe_ctx->plane_res.dpp = dpp; 2361 pipe_ctx->plane_res.mpcc_inst = dpp->inst; 2362 hubp->mpcc_id = dpp->inst; 2363 hubp->opp_id = OPP_ID_INVALID; 2364 hubp->power_gated = false; 2365 pipe_ctx->stream_res.opp = NULL; 2366 2367 hubp->funcs->hubp_init(hubp); 2368 2369 //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 2370 //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 2371 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 2372 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 2373 /*to do*/ 2374 hwss1_plane_atomic_disconnect(dc, pipe_ctx); 2375 } 2376 2377 /* initialize DWB pointer to MCIF_WB */ 2378 for (i = 0; i < res_pool->res_cap->num_dwb; i++) 2379 res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i]; 2380 2381 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2382 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2383 2384 if (tg->funcs->is_tg_enabled(tg)) 2385 tg->funcs->unlock(tg); 2386 } 2387 2388 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2389 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2390 2391 dc->hwss.disable_plane(dc, pipe_ctx); 2392 2393 pipe_ctx->stream_res.tg = NULL; 2394 pipe_ctx->plane_res.hubp = NULL; 2395 } 2396 2397 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2398 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2399 2400 tg->funcs->tg_init(tg); 2401 } 2402 } 2403 2404 void dcn20_hw_sequencer_construct(struct dc *dc) 2405 { 2406 dcn10_hw_sequencer_construct(dc); 2407 dc->hwss.unblank_stream = dcn20_unblank_stream; 2408 dc->hwss.update_plane_addr = dcn20_update_plane_addr; 2409 dc->hwss.enable_stream_timing = dcn20_enable_stream_timing; 2410 dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; 2411 dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; 2412 dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; 2413 dc->hwss.apply_ctx_for_surface = NULL; 2414 dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; 2415 dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; 2416 dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; 2417 dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; 2418 dc->hwss.prepare_bandwidth = dcn20_prepare_bandwidth; 2419 dc->hwss.update_bandwidth = dcn20_update_bandwidth; 2420 dc->hwss.enable_writeback = dcn20_enable_writeback; 2421 dc->hwss.disable_writeback = dcn20_disable_writeback; 2422 dc->hwss.program_output_csc = dcn20_program_output_csc; 2423 dc->hwss.update_odm = dcn20_update_odm; 2424 dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; 2425 dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; 2426 dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; 2427 dc->hwss.enable_stream = dcn20_enable_stream; 2428 dc->hwss.disable_stream = dcn20_disable_stream; 2429 dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; 2430 dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; 2431 dc->hwss.disable_stream_gating = dcn20_disable_stream_gating; 2432 dc->hwss.enable_stream_gating = dcn20_enable_stream_gating; 2433 dc->hwss.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt; 2434 dc->hwss.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap; 2435 dc->hwss.update_mpcc = dcn20_update_mpcc; 2436 dc->hwss.set_flip_control_gsl = dcn20_set_flip_control_gsl; 2437 dc->hwss.init_blank = dcn20_init_blank; 2438 dc->hwss.disable_plane = dcn20_disable_plane; 2439 dc->hwss.plane_atomic_disable = dcn20_plane_atomic_disable; 2440 dc->hwss.enable_power_gating_plane = dcn20_enable_power_gating_plane; 2441 dc->hwss.dpp_pg_control = dcn20_dpp_pg_control; 2442 dc->hwss.hubp_pg_control = dcn20_hubp_pg_control; 2443 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 2444 dc->hwss.dsc_pg_control = dcn20_dsc_pg_control; 2445 #else 2446 dc->hwss.dsc_pg_control = NULL; 2447 #endif 2448 dc->hwss.disable_vga = dcn20_disable_vga; 2449 2450 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 2451 dc->hwss.init_hw = dcn20_fpga_init_hw; 2452 dc->hwss.init_pipes = NULL; 2453 } 2454 2455 2456 } 2457