1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 #include <linux/delay.h> 26 27 #include "dm_services.h" 28 #include "basics/dc_common.h" 29 #include "dm_helpers.h" 30 #include "core_types.h" 31 #include "resource.h" 32 #include "dcn20_resource.h" 33 #include "dcn20_hwseq.h" 34 #include "dce/dce_hwseq.h" 35 #include "dcn20_dsc.h" 36 #include "dcn20_optc.h" 37 #include "abm.h" 38 #include "clk_mgr.h" 39 #include "dmcu.h" 40 #include "hubp.h" 41 #include "timing_generator.h" 42 #include "opp.h" 43 #include "ipp.h" 44 #include "mpc.h" 45 #include "mcif_wb.h" 46 #include "dchubbub.h" 47 #include "reg_helper.h" 48 #include "dcn10/dcn10_cm_common.h" 49 #include "dc_link_dp.h" 50 #include "vm_helper.h" 51 #include "dccg.h" 52 #include "dc_dmub_srv.h" 53 #include "dce/dmub_hw_lock_mgr.h" 54 #include "hw_sequencer.h" 55 #include "inc/link_dpcd.h" 56 #include "dpcd_defs.h" 57 58 #define DC_LOGGER_INIT(logger) 59 60 #define CTX \ 61 hws->ctx 62 #define REG(reg)\ 63 hws->regs->reg 64 65 #undef FN 66 #define FN(reg_name, field_name) \ 67 hws->shifts->field_name, hws->masks->field_name 68 69 static int find_free_gsl_group(const struct dc *dc) 70 { 71 if (dc->res_pool->gsl_groups.gsl_0 == 0) 72 return 1; 73 if (dc->res_pool->gsl_groups.gsl_1 == 0) 74 return 2; 75 if (dc->res_pool->gsl_groups.gsl_2 == 0) 76 return 3; 77 78 return 0; 79 } 80 81 /* NOTE: This is not a generic setup_gsl function (hence the suffix as_lock) 82 * This is only used to lock pipes in pipe splitting case with immediate flip 83 * Ordinary MPC/OTG locks suppress VUPDATE which doesn't help with immediate, 84 * so we get tearing with freesync since we cannot flip multiple pipes 85 * atomically. 86 * We use GSL for this: 87 * - immediate flip: find first available GSL group if not already assigned 88 * program gsl with that group, set current OTG as master 89 * and always us 0x4 = AND of flip_ready from all pipes 90 * - vsync flip: disable GSL if used 91 * 92 * Groups in stream_res are stored as +1 from HW registers, i.e. 93 * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 94 * Using a magic value like -1 would require tracking all inits/resets 95 */ 96 static void dcn20_setup_gsl_group_as_lock( 97 const struct dc *dc, 98 struct pipe_ctx *pipe_ctx, 99 bool enable) 100 { 101 struct gsl_params gsl; 102 int group_idx; 103 104 memset(&gsl, 0, sizeof(struct gsl_params)); 105 106 if (enable) { 107 /* return if group already assigned since GSL was set up 108 * for vsync flip, we would unassign so it can't be "left over" 109 */ 110 if (pipe_ctx->stream_res.gsl_group > 0) 111 return; 112 113 group_idx = find_free_gsl_group(dc); 114 ASSERT(group_idx != 0); 115 pipe_ctx->stream_res.gsl_group = group_idx; 116 117 /* set gsl group reg field and mark resource used */ 118 switch (group_idx) { 119 case 1: 120 gsl.gsl0_en = 1; 121 dc->res_pool->gsl_groups.gsl_0 = 1; 122 break; 123 case 2: 124 gsl.gsl1_en = 1; 125 dc->res_pool->gsl_groups.gsl_1 = 1; 126 break; 127 case 3: 128 gsl.gsl2_en = 1; 129 dc->res_pool->gsl_groups.gsl_2 = 1; 130 break; 131 default: 132 BREAK_TO_DEBUGGER(); 133 return; // invalid case 134 } 135 gsl.gsl_master_en = 1; 136 } else { 137 group_idx = pipe_ctx->stream_res.gsl_group; 138 if (group_idx == 0) 139 return; // if not in use, just return 140 141 pipe_ctx->stream_res.gsl_group = 0; 142 143 /* unset gsl group reg field and mark resource free */ 144 switch (group_idx) { 145 case 1: 146 gsl.gsl0_en = 0; 147 dc->res_pool->gsl_groups.gsl_0 = 0; 148 break; 149 case 2: 150 gsl.gsl1_en = 0; 151 dc->res_pool->gsl_groups.gsl_1 = 0; 152 break; 153 case 3: 154 gsl.gsl2_en = 0; 155 dc->res_pool->gsl_groups.gsl_2 = 0; 156 break; 157 default: 158 BREAK_TO_DEBUGGER(); 159 return; 160 } 161 gsl.gsl_master_en = 0; 162 } 163 164 /* at this point we want to program whether it's to enable or disable */ 165 if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && 166 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { 167 pipe_ctx->stream_res.tg->funcs->set_gsl( 168 pipe_ctx->stream_res.tg, 169 &gsl); 170 171 pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( 172 pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); 173 } else 174 BREAK_TO_DEBUGGER(); 175 } 176 177 void dcn20_set_flip_control_gsl( 178 struct pipe_ctx *pipe_ctx, 179 bool flip_immediate) 180 { 181 if (pipe_ctx && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl) 182 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_control_surface_gsl( 183 pipe_ctx->plane_res.hubp, flip_immediate); 184 185 } 186 187 void dcn20_enable_power_gating_plane( 188 struct dce_hwseq *hws, 189 bool enable) 190 { 191 bool force_on = true; /* disable power gating */ 192 193 if (enable) 194 force_on = false; 195 196 /* DCHUBP0/1/2/3/4/5 */ 197 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); 198 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on); 199 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on); 200 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on); 201 if (REG(DOMAIN8_PG_CONFIG)) 202 REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 203 if (REG(DOMAIN10_PG_CONFIG)) 204 REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); 205 206 /* DPP0/1/2/3/4/5 */ 207 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on); 208 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on); 209 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on); 210 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); 211 if (REG(DOMAIN9_PG_CONFIG)) 212 REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 213 if (REG(DOMAIN11_PG_CONFIG)) 214 REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); 215 216 /* DCS0/1/2/3/4/5 */ 217 REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on); 218 REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on); 219 REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on); 220 if (REG(DOMAIN19_PG_CONFIG)) 221 REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on); 222 if (REG(DOMAIN20_PG_CONFIG)) 223 REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on); 224 if (REG(DOMAIN21_PG_CONFIG)) 225 REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); 226 } 227 228 void dcn20_dccg_init(struct dce_hwseq *hws) 229 { 230 /* 231 * set MICROSECOND_TIME_BASE_DIV 232 * 100Mhz refclk -> 0x120264 233 * 27Mhz refclk -> 0x12021b 234 * 48Mhz refclk -> 0x120230 235 * 236 */ 237 REG_WRITE(MICROSECOND_TIME_BASE_DIV, 0x120264); 238 239 /* 240 * set MILLISECOND_TIME_BASE_DIV 241 * 100Mhz refclk -> 0x1186a0 242 * 27Mhz refclk -> 0x106978 243 * 48Mhz refclk -> 0x10bb80 244 * 245 */ 246 REG_WRITE(MILLISECOND_TIME_BASE_DIV, 0x1186a0); 247 248 /* This value is dependent on the hardware pipeline delay so set once per SOC */ 249 REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0xe01003c); 250 } 251 252 void dcn20_disable_vga( 253 struct dce_hwseq *hws) 254 { 255 REG_WRITE(D1VGA_CONTROL, 0); 256 REG_WRITE(D2VGA_CONTROL, 0); 257 REG_WRITE(D3VGA_CONTROL, 0); 258 REG_WRITE(D4VGA_CONTROL, 0); 259 REG_WRITE(D5VGA_CONTROL, 0); 260 REG_WRITE(D6VGA_CONTROL, 0); 261 } 262 263 void dcn20_program_triple_buffer( 264 const struct dc *dc, 265 struct pipe_ctx *pipe_ctx, 266 bool enable_triple_buffer) 267 { 268 if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { 269 pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( 270 pipe_ctx->plane_res.hubp, 271 enable_triple_buffer); 272 } 273 } 274 275 /* Blank pixel data during initialization */ 276 void dcn20_init_blank( 277 struct dc *dc, 278 struct timing_generator *tg) 279 { 280 struct dce_hwseq *hws = dc->hwseq; 281 enum dc_color_space color_space; 282 struct tg_color black_color = {0}; 283 struct output_pixel_processor *opp = NULL; 284 struct output_pixel_processor *bottom_opp = NULL; 285 uint32_t num_opps, opp_id_src0, opp_id_src1; 286 uint32_t otg_active_width, otg_active_height; 287 288 /* program opp dpg blank color */ 289 color_space = COLOR_SPACE_SRGB; 290 color_space_to_black_color(dc, color_space, &black_color); 291 292 /* get the OTG active size */ 293 tg->funcs->get_otg_active_size(tg, 294 &otg_active_width, 295 &otg_active_height); 296 297 /* get the OPTC source */ 298 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 299 300 if (opp_id_src0 >= dc->res_pool->res_cap->num_opp) { 301 ASSERT(false); 302 return; 303 } 304 opp = dc->res_pool->opps[opp_id_src0]; 305 306 if (num_opps == 2) { 307 otg_active_width = otg_active_width / 2; 308 309 if (opp_id_src1 >= dc->res_pool->res_cap->num_opp) { 310 ASSERT(false); 311 return; 312 } 313 bottom_opp = dc->res_pool->opps[opp_id_src1]; 314 } 315 316 opp->funcs->opp_set_disp_pattern_generator( 317 opp, 318 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 319 CONTROLLER_DP_COLOR_SPACE_UDEFINED, 320 COLOR_DEPTH_UNDEFINED, 321 &black_color, 322 otg_active_width, 323 otg_active_height, 324 0); 325 326 if (num_opps == 2) { 327 bottom_opp->funcs->opp_set_disp_pattern_generator( 328 bottom_opp, 329 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 330 CONTROLLER_DP_COLOR_SPACE_UDEFINED, 331 COLOR_DEPTH_UNDEFINED, 332 &black_color, 333 otg_active_width, 334 otg_active_height, 335 0); 336 } 337 338 hws->funcs.wait_for_blank_complete(opp); 339 } 340 341 void dcn20_dsc_pg_control( 342 struct dce_hwseq *hws, 343 unsigned int dsc_inst, 344 bool power_on) 345 { 346 uint32_t power_gate = power_on ? 0 : 1; 347 uint32_t pwr_status = power_on ? 0 : 2; 348 uint32_t org_ip_request_cntl = 0; 349 350 if (hws->ctx->dc->debug.disable_dsc_power_gate) 351 return; 352 353 if (REG(DOMAIN16_PG_CONFIG) == 0) 354 return; 355 356 REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); 357 if (org_ip_request_cntl == 0) 358 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); 359 360 switch (dsc_inst) { 361 case 0: /* DSC0 */ 362 REG_UPDATE(DOMAIN16_PG_CONFIG, 363 DOMAIN16_POWER_GATE, power_gate); 364 365 REG_WAIT(DOMAIN16_PG_STATUS, 366 DOMAIN16_PGFSM_PWR_STATUS, pwr_status, 367 1, 1000); 368 break; 369 case 1: /* DSC1 */ 370 REG_UPDATE(DOMAIN17_PG_CONFIG, 371 DOMAIN17_POWER_GATE, power_gate); 372 373 REG_WAIT(DOMAIN17_PG_STATUS, 374 DOMAIN17_PGFSM_PWR_STATUS, pwr_status, 375 1, 1000); 376 break; 377 case 2: /* DSC2 */ 378 REG_UPDATE(DOMAIN18_PG_CONFIG, 379 DOMAIN18_POWER_GATE, power_gate); 380 381 REG_WAIT(DOMAIN18_PG_STATUS, 382 DOMAIN18_PGFSM_PWR_STATUS, pwr_status, 383 1, 1000); 384 break; 385 case 3: /* DSC3 */ 386 REG_UPDATE(DOMAIN19_PG_CONFIG, 387 DOMAIN19_POWER_GATE, power_gate); 388 389 REG_WAIT(DOMAIN19_PG_STATUS, 390 DOMAIN19_PGFSM_PWR_STATUS, pwr_status, 391 1, 1000); 392 break; 393 case 4: /* DSC4 */ 394 REG_UPDATE(DOMAIN20_PG_CONFIG, 395 DOMAIN20_POWER_GATE, power_gate); 396 397 REG_WAIT(DOMAIN20_PG_STATUS, 398 DOMAIN20_PGFSM_PWR_STATUS, pwr_status, 399 1, 1000); 400 break; 401 case 5: /* DSC5 */ 402 REG_UPDATE(DOMAIN21_PG_CONFIG, 403 DOMAIN21_POWER_GATE, power_gate); 404 405 REG_WAIT(DOMAIN21_PG_STATUS, 406 DOMAIN21_PGFSM_PWR_STATUS, pwr_status, 407 1, 1000); 408 break; 409 default: 410 BREAK_TO_DEBUGGER(); 411 break; 412 } 413 414 if (org_ip_request_cntl == 0) 415 REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); 416 } 417 418 void dcn20_dpp_pg_control( 419 struct dce_hwseq *hws, 420 unsigned int dpp_inst, 421 bool power_on) 422 { 423 uint32_t power_gate = power_on ? 0 : 1; 424 uint32_t pwr_status = power_on ? 0 : 2; 425 426 if (hws->ctx->dc->debug.disable_dpp_power_gate) 427 return; 428 if (REG(DOMAIN1_PG_CONFIG) == 0) 429 return; 430 431 switch (dpp_inst) { 432 case 0: /* DPP0 */ 433 REG_UPDATE(DOMAIN1_PG_CONFIG, 434 DOMAIN1_POWER_GATE, power_gate); 435 436 REG_WAIT(DOMAIN1_PG_STATUS, 437 DOMAIN1_PGFSM_PWR_STATUS, pwr_status, 438 1, 1000); 439 break; 440 case 1: /* DPP1 */ 441 REG_UPDATE(DOMAIN3_PG_CONFIG, 442 DOMAIN3_POWER_GATE, power_gate); 443 444 REG_WAIT(DOMAIN3_PG_STATUS, 445 DOMAIN3_PGFSM_PWR_STATUS, pwr_status, 446 1, 1000); 447 break; 448 case 2: /* DPP2 */ 449 REG_UPDATE(DOMAIN5_PG_CONFIG, 450 DOMAIN5_POWER_GATE, power_gate); 451 452 REG_WAIT(DOMAIN5_PG_STATUS, 453 DOMAIN5_PGFSM_PWR_STATUS, pwr_status, 454 1, 1000); 455 break; 456 case 3: /* DPP3 */ 457 REG_UPDATE(DOMAIN7_PG_CONFIG, 458 DOMAIN7_POWER_GATE, power_gate); 459 460 REG_WAIT(DOMAIN7_PG_STATUS, 461 DOMAIN7_PGFSM_PWR_STATUS, pwr_status, 462 1, 1000); 463 break; 464 case 4: /* DPP4 */ 465 REG_UPDATE(DOMAIN9_PG_CONFIG, 466 DOMAIN9_POWER_GATE, power_gate); 467 468 REG_WAIT(DOMAIN9_PG_STATUS, 469 DOMAIN9_PGFSM_PWR_STATUS, pwr_status, 470 1, 1000); 471 break; 472 case 5: /* DPP5 */ 473 /* 474 * Do not power gate DPP5, should be left at HW default, power on permanently. 475 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 476 * reset. 477 * REG_UPDATE(DOMAIN11_PG_CONFIG, 478 * DOMAIN11_POWER_GATE, power_gate); 479 * 480 * REG_WAIT(DOMAIN11_PG_STATUS, 481 * DOMAIN11_PGFSM_PWR_STATUS, pwr_status, 482 * 1, 1000); 483 */ 484 break; 485 default: 486 BREAK_TO_DEBUGGER(); 487 break; 488 } 489 } 490 491 492 void dcn20_hubp_pg_control( 493 struct dce_hwseq *hws, 494 unsigned int hubp_inst, 495 bool power_on) 496 { 497 uint32_t power_gate = power_on ? 0 : 1; 498 uint32_t pwr_status = power_on ? 0 : 2; 499 500 if (hws->ctx->dc->debug.disable_hubp_power_gate) 501 return; 502 if (REG(DOMAIN0_PG_CONFIG) == 0) 503 return; 504 505 switch (hubp_inst) { 506 case 0: /* DCHUBP0 */ 507 REG_UPDATE(DOMAIN0_PG_CONFIG, 508 DOMAIN0_POWER_GATE, power_gate); 509 510 REG_WAIT(DOMAIN0_PG_STATUS, 511 DOMAIN0_PGFSM_PWR_STATUS, pwr_status, 512 1, 1000); 513 break; 514 case 1: /* DCHUBP1 */ 515 REG_UPDATE(DOMAIN2_PG_CONFIG, 516 DOMAIN2_POWER_GATE, power_gate); 517 518 REG_WAIT(DOMAIN2_PG_STATUS, 519 DOMAIN2_PGFSM_PWR_STATUS, pwr_status, 520 1, 1000); 521 break; 522 case 2: /* DCHUBP2 */ 523 REG_UPDATE(DOMAIN4_PG_CONFIG, 524 DOMAIN4_POWER_GATE, power_gate); 525 526 REG_WAIT(DOMAIN4_PG_STATUS, 527 DOMAIN4_PGFSM_PWR_STATUS, pwr_status, 528 1, 1000); 529 break; 530 case 3: /* DCHUBP3 */ 531 REG_UPDATE(DOMAIN6_PG_CONFIG, 532 DOMAIN6_POWER_GATE, power_gate); 533 534 REG_WAIT(DOMAIN6_PG_STATUS, 535 DOMAIN6_PGFSM_PWR_STATUS, pwr_status, 536 1, 1000); 537 break; 538 case 4: /* DCHUBP4 */ 539 REG_UPDATE(DOMAIN8_PG_CONFIG, 540 DOMAIN8_POWER_GATE, power_gate); 541 542 REG_WAIT(DOMAIN8_PG_STATUS, 543 DOMAIN8_PGFSM_PWR_STATUS, pwr_status, 544 1, 1000); 545 break; 546 case 5: /* DCHUBP5 */ 547 /* 548 * Do not power gate DCHUB5, should be left at HW default, power on permanently. 549 * PG on Pipe5 is De-featured, attempting to put it to PG state may result in hard 550 * reset. 551 * REG_UPDATE(DOMAIN10_PG_CONFIG, 552 * DOMAIN10_POWER_GATE, power_gate); 553 * 554 * REG_WAIT(DOMAIN10_PG_STATUS, 555 * DOMAIN10_PGFSM_PWR_STATUS, pwr_status, 556 * 1, 1000); 557 */ 558 break; 559 default: 560 BREAK_TO_DEBUGGER(); 561 break; 562 } 563 } 564 565 566 /* disable HW used by plane. 567 * note: cannot disable until disconnect is complete 568 */ 569 void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) 570 { 571 struct dce_hwseq *hws = dc->hwseq; 572 struct hubp *hubp = pipe_ctx->plane_res.hubp; 573 struct dpp *dpp = pipe_ctx->plane_res.dpp; 574 575 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); 576 577 /* In flip immediate with pipe splitting case GSL is used for 578 * synchronization so we must disable it when the plane is disabled. 579 */ 580 if (pipe_ctx->stream_res.gsl_group != 0) 581 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); 582 583 dc->hwss.set_flip_control_gsl(pipe_ctx, false); 584 585 hubp->funcs->hubp_clk_cntl(hubp, false); 586 587 dpp->funcs->dpp_dppclk_control(dpp, false, false); 588 589 hubp->power_gated = true; 590 591 hws->funcs.plane_atomic_power_down(dc, 592 pipe_ctx->plane_res.dpp, 593 pipe_ctx->plane_res.hubp); 594 595 pipe_ctx->stream = NULL; 596 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); 597 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); 598 pipe_ctx->top_pipe = NULL; 599 pipe_ctx->bottom_pipe = NULL; 600 pipe_ctx->plane_state = NULL; 601 } 602 603 604 void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 605 { 606 DC_LOGGER_INIT(dc->ctx->logger); 607 608 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 609 return; 610 611 dcn20_plane_atomic_disable(dc, pipe_ctx); 612 613 DC_LOG_DC("Power down front end %d\n", 614 pipe_ctx->pipe_idx); 615 } 616 617 static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, 618 int opp_cnt) 619 { 620 bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); 621 int flow_ctrl_cnt; 622 623 if (opp_cnt >= 2) 624 hblank_halved = true; 625 626 flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - 627 stream->timing.h_border_left - 628 stream->timing.h_border_right; 629 630 if (hblank_halved) 631 flow_ctrl_cnt /= 2; 632 633 /* ODM combine 4:1 case */ 634 if (opp_cnt == 4) 635 flow_ctrl_cnt /= 2; 636 637 return flow_ctrl_cnt; 638 } 639 640 enum dc_status dcn20_enable_stream_timing( 641 struct pipe_ctx *pipe_ctx, 642 struct dc_state *context, 643 struct dc *dc) 644 { 645 struct dce_hwseq *hws = dc->hwseq; 646 struct dc_stream_state *stream = pipe_ctx->stream; 647 struct drr_params params = {0}; 648 unsigned int event_triggers = 0; 649 struct pipe_ctx *odm_pipe; 650 int opp_cnt = 1; 651 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 652 bool interlace = stream->timing.flags.INTERLACE; 653 int i; 654 struct mpc_dwb_flow_control flow_control; 655 struct mpc *mpc = dc->res_pool->mpc; 656 bool rate_control_2x_pclk = (interlace || optc2_is_two_pixels_per_containter(&stream->timing)); 657 658 /* by upper caller loop, pipe0 is parent pipe and be called first. 659 * back end is set up by for pipe0. Other children pipe share back end 660 * with pipe 0. No program is needed. 661 */ 662 if (pipe_ctx->top_pipe != NULL) 663 return DC_OK; 664 665 /* TODO check if timing_changed, disable stream if timing changed */ 666 667 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 668 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 669 opp_cnt++; 670 } 671 672 if (opp_cnt > 1) 673 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 674 pipe_ctx->stream_res.tg, 675 opp_inst, opp_cnt, 676 &pipe_ctx->stream->timing); 677 678 /* HW program guide assume display already disable 679 * by unplug sequence. OTG assume stop. 680 */ 681 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true); 682 683 if (false == pipe_ctx->clock_source->funcs->program_pix_clk( 684 pipe_ctx->clock_source, 685 &pipe_ctx->stream_res.pix_clk_params, 686 &pipe_ctx->pll_settings)) { 687 BREAK_TO_DEBUGGER(); 688 return DC_ERROR_UNEXPECTED; 689 } 690 691 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) 692 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); 693 694 pipe_ctx->stream_res.tg->funcs->program_timing( 695 pipe_ctx->stream_res.tg, 696 &stream->timing, 697 pipe_ctx->pipe_dlg_param.vready_offset, 698 pipe_ctx->pipe_dlg_param.vstartup_start, 699 pipe_ctx->pipe_dlg_param.vupdate_offset, 700 pipe_ctx->pipe_dlg_param.vupdate_width, 701 pipe_ctx->stream->signal, 702 true); 703 704 rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; 705 flow_control.flow_ctrl_mode = 0; 706 flow_control.flow_ctrl_cnt0 = 0x80; 707 flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(stream, opp_cnt); 708 if (mpc->funcs->set_out_rate_control) { 709 for (i = 0; i < opp_cnt; ++i) { 710 mpc->funcs->set_out_rate_control( 711 mpc, opp_inst[i], 712 true, 713 rate_control_2x_pclk, 714 &flow_control); 715 } 716 } 717 718 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 719 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( 720 odm_pipe->stream_res.opp, 721 true); 722 723 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 724 pipe_ctx->stream_res.opp, 725 true); 726 727 hws->funcs.blank_pixel_data(dc, pipe_ctx, true); 728 729 /* VTG is within DCHUB command block. DCFCLK is always on */ 730 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) { 731 BREAK_TO_DEBUGGER(); 732 return DC_ERROR_UNEXPECTED; 733 } 734 735 hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); 736 737 params.vertical_total_min = stream->adjust.v_total_min; 738 params.vertical_total_max = stream->adjust.v_total_max; 739 params.vertical_total_mid = stream->adjust.v_total_mid; 740 params.vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num; 741 if (pipe_ctx->stream_res.tg->funcs->set_drr) 742 pipe_ctx->stream_res.tg->funcs->set_drr( 743 pipe_ctx->stream_res.tg, ¶ms); 744 745 // DRR should set trigger event to monitor surface update event 746 if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) 747 event_triggers = 0x80; 748 /* Event triggers and num frames initialized for DRR, but can be 749 * later updated for PSR use. Note DRR trigger events are generated 750 * regardless of whether num frames met. 751 */ 752 if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) 753 pipe_ctx->stream_res.tg->funcs->set_static_screen_control( 754 pipe_ctx->stream_res.tg, event_triggers, 2); 755 756 /* TODO program crtc source select for non-virtual signal*/ 757 /* TODO program FMT */ 758 /* TODO setup link_enc */ 759 /* TODO set stream attributes */ 760 /* TODO program audio */ 761 /* TODO enable stream if timing changed */ 762 /* TODO unblank stream if DP */ 763 764 return DC_OK; 765 } 766 767 void dcn20_program_output_csc(struct dc *dc, 768 struct pipe_ctx *pipe_ctx, 769 enum dc_color_space colorspace, 770 uint16_t *matrix, 771 int opp_id) 772 { 773 struct mpc *mpc = dc->res_pool->mpc; 774 enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A; 775 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 776 777 if (mpc->funcs->power_on_mpc_mem_pwr) 778 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 779 780 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 781 if (mpc->funcs->set_output_csc != NULL) 782 mpc->funcs->set_output_csc(mpc, 783 opp_id, 784 matrix, 785 ocsc_mode); 786 } else { 787 if (mpc->funcs->set_ocsc_default != NULL) 788 mpc->funcs->set_ocsc_default(mpc, 789 opp_id, 790 colorspace, 791 ocsc_mode); 792 } 793 } 794 795 bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 796 const struct dc_stream_state *stream) 797 { 798 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 799 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 800 struct pwl_params *params = NULL; 801 /* 802 * program OGAM only for the top pipe 803 * if there is a pipe split then fix diagnostic is required: 804 * how to pass OGAM parameter for stream. 805 * if programming for all pipes is required then remove condition 806 * pipe_ctx->top_pipe == NULL ,but then fix the diagnostic. 807 */ 808 if (mpc->funcs->power_on_mpc_mem_pwr) 809 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 810 if (pipe_ctx->top_pipe == NULL 811 && mpc->funcs->set_output_gamma && stream->out_transfer_func) { 812 if (stream->out_transfer_func->type == TF_TYPE_HWPWL) 813 params = &stream->out_transfer_func->pwl; 814 else if (pipe_ctx->stream->out_transfer_func->type == 815 TF_TYPE_DISTRIBUTED_POINTS && 816 cm_helper_translate_curve_to_hw_format( 817 stream->out_transfer_func, 818 &mpc->blender_params, false)) 819 params = &mpc->blender_params; 820 /* 821 * there is no ROM 822 */ 823 if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) 824 BREAK_TO_DEBUGGER(); 825 } 826 /* 827 * if above if is not executed then 'params' equal to 0 and set in bypass 828 */ 829 mpc->funcs->set_output_gamma(mpc, mpcc_id, params); 830 831 return true; 832 } 833 834 bool dcn20_set_blend_lut( 835 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 836 { 837 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 838 bool result = true; 839 struct pwl_params *blend_lut = NULL; 840 841 if (plane_state->blend_tf) { 842 if (plane_state->blend_tf->type == TF_TYPE_HWPWL) 843 blend_lut = &plane_state->blend_tf->pwl; 844 else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 845 cm_helper_translate_curve_to_hw_format( 846 plane_state->blend_tf, 847 &dpp_base->regamma_params, false); 848 blend_lut = &dpp_base->regamma_params; 849 } 850 } 851 result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); 852 853 return result; 854 } 855 856 bool dcn20_set_shaper_3dlut( 857 struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) 858 { 859 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 860 bool result = true; 861 struct pwl_params *shaper_lut = NULL; 862 863 if (plane_state->in_shaper_func) { 864 if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) 865 shaper_lut = &plane_state->in_shaper_func->pwl; 866 else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 867 cm_helper_translate_curve_to_hw_format( 868 plane_state->in_shaper_func, 869 &dpp_base->shaper_params, true); 870 shaper_lut = &dpp_base->shaper_params; 871 } 872 } 873 874 result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); 875 if (plane_state->lut3d_func && 876 plane_state->lut3d_func->state.bits.initialized == 1) 877 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, 878 &plane_state->lut3d_func->lut_3d); 879 else 880 result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); 881 882 return result; 883 } 884 885 bool dcn20_set_input_transfer_func(struct dc *dc, 886 struct pipe_ctx *pipe_ctx, 887 const struct dc_plane_state *plane_state) 888 { 889 struct dce_hwseq *hws = dc->hwseq; 890 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 891 const struct dc_transfer_func *tf = NULL; 892 bool result = true; 893 bool use_degamma_ram = false; 894 895 if (dpp_base == NULL || plane_state == NULL) 896 return false; 897 898 hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); 899 hws->funcs.set_blend_lut(pipe_ctx, plane_state); 900 901 if (plane_state->in_transfer_func) 902 tf = plane_state->in_transfer_func; 903 904 905 if (tf == NULL) { 906 dpp_base->funcs->dpp_set_degamma(dpp_base, 907 IPP_DEGAMMA_MODE_BYPASS); 908 return true; 909 } 910 911 if (tf->type == TF_TYPE_HWPWL || tf->type == TF_TYPE_DISTRIBUTED_POINTS) 912 use_degamma_ram = true; 913 914 if (use_degamma_ram == true) { 915 if (tf->type == TF_TYPE_HWPWL) 916 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 917 &tf->pwl); 918 else if (tf->type == TF_TYPE_DISTRIBUTED_POINTS) { 919 cm_helper_translate_curve_to_degamma_hw_format(tf, 920 &dpp_base->degamma_params); 921 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, 922 &dpp_base->degamma_params); 923 } 924 return true; 925 } 926 /* handle here the optimized cases when de-gamma ROM could be used. 927 * 928 */ 929 if (tf->type == TF_TYPE_PREDEFINED) { 930 switch (tf->tf) { 931 case TRANSFER_FUNCTION_SRGB: 932 dpp_base->funcs->dpp_set_degamma(dpp_base, 933 IPP_DEGAMMA_MODE_HW_sRGB); 934 break; 935 case TRANSFER_FUNCTION_BT709: 936 dpp_base->funcs->dpp_set_degamma(dpp_base, 937 IPP_DEGAMMA_MODE_HW_xvYCC); 938 break; 939 case TRANSFER_FUNCTION_LINEAR: 940 dpp_base->funcs->dpp_set_degamma(dpp_base, 941 IPP_DEGAMMA_MODE_BYPASS); 942 break; 943 case TRANSFER_FUNCTION_PQ: 944 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL); 945 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params); 946 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params); 947 result = true; 948 break; 949 default: 950 result = false; 951 break; 952 } 953 } else if (tf->type == TF_TYPE_BYPASS) 954 dpp_base->funcs->dpp_set_degamma(dpp_base, 955 IPP_DEGAMMA_MODE_BYPASS); 956 else { 957 /* 958 * if we are here, we did not handle correctly. 959 * fix is required for this use case 960 */ 961 BREAK_TO_DEBUGGER(); 962 dpp_base->funcs->dpp_set_degamma(dpp_base, 963 IPP_DEGAMMA_MODE_BYPASS); 964 } 965 966 return result; 967 } 968 969 void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 970 { 971 struct pipe_ctx *odm_pipe; 972 int opp_cnt = 1; 973 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 974 975 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 976 opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 977 opp_cnt++; 978 } 979 980 if (opp_cnt > 1) 981 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 982 pipe_ctx->stream_res.tg, 983 opp_inst, opp_cnt, 984 &pipe_ctx->stream->timing); 985 else 986 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 987 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 988 } 989 990 void dcn20_blank_pixel_data( 991 struct dc *dc, 992 struct pipe_ctx *pipe_ctx, 993 bool blank) 994 { 995 struct tg_color black_color = {0}; 996 struct stream_resource *stream_res = &pipe_ctx->stream_res; 997 struct dc_stream_state *stream = pipe_ctx->stream; 998 enum dc_color_space color_space = stream->output_color_space; 999 enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; 1000 enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; 1001 struct pipe_ctx *odm_pipe; 1002 int odm_cnt = 1; 1003 1004 int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; 1005 int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; 1006 1007 if (stream->link->test_pattern_enabled) 1008 return; 1009 1010 /* get opp dpg blank color */ 1011 color_space_to_black_color(dc, color_space, &black_color); 1012 1013 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 1014 odm_cnt++; 1015 1016 width = width / odm_cnt; 1017 1018 if (blank) { 1019 dc->hwss.set_abm_immediate_disable(pipe_ctx); 1020 1021 if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { 1022 test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; 1023 test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; 1024 } 1025 } else { 1026 test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; 1027 } 1028 1029 dc->hwss.set_disp_pattern_generator(dc, 1030 pipe_ctx, 1031 test_pattern, 1032 test_pattern_color_space, 1033 stream->timing.display_color_depth, 1034 &black_color, 1035 width, 1036 height, 1037 0); 1038 1039 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1040 dc->hwss.set_disp_pattern_generator(dc, 1041 odm_pipe, 1042 dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE && blank ? 1043 CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, 1044 test_pattern_color_space, 1045 stream->timing.display_color_depth, 1046 &black_color, 1047 width, 1048 height, 1049 0); 1050 } 1051 1052 if (!blank) 1053 if (stream_res->abm) { 1054 dc->hwss.set_pipe(pipe_ctx); 1055 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); 1056 } 1057 } 1058 1059 1060 static void dcn20_power_on_plane( 1061 struct dce_hwseq *hws, 1062 struct pipe_ctx *pipe_ctx) 1063 { 1064 DC_LOGGER_INIT(hws->ctx->logger); 1065 if (REG(DC_IP_REQUEST_CNTL)) { 1066 REG_SET(DC_IP_REQUEST_CNTL, 0, 1067 IP_REQUEST_EN, 1); 1068 1069 if (hws->funcs.dpp_pg_control) 1070 hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); 1071 1072 if (hws->funcs.hubp_pg_control) 1073 hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); 1074 1075 REG_SET(DC_IP_REQUEST_CNTL, 0, 1076 IP_REQUEST_EN, 0); 1077 DC_LOG_DEBUG( 1078 "Un-gated front end for pipe %d\n", pipe_ctx->plane_res.hubp->inst); 1079 } 1080 } 1081 1082 void dcn20_enable_plane( 1083 struct dc *dc, 1084 struct pipe_ctx *pipe_ctx, 1085 struct dc_state *context) 1086 { 1087 //if (dc->debug.sanity_checks) { 1088 // dcn10_verify_allow_pstate_change_high(dc); 1089 //} 1090 dcn20_power_on_plane(dc->hwseq, pipe_ctx); 1091 1092 /* enable DCFCLK current DCHUB */ 1093 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); 1094 1095 /* initialize HUBP on power up */ 1096 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); 1097 1098 /* make sure OPP_PIPE_CLOCK_EN = 1 */ 1099 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( 1100 pipe_ctx->stream_res.opp, 1101 true); 1102 1103 /* TODO: enable/disable in dm as per update type. 1104 if (plane_state) { 1105 DC_LOG_DC(dc->ctx->logger, 1106 "Pipe:%d 0x%x: addr hi:0x%x, " 1107 "addr low:0x%x, " 1108 "src: %d, %d, %d," 1109 " %d; dst: %d, %d, %d, %d;\n", 1110 pipe_ctx->pipe_idx, 1111 plane_state, 1112 plane_state->address.grph.addr.high_part, 1113 plane_state->address.grph.addr.low_part, 1114 plane_state->src_rect.x, 1115 plane_state->src_rect.y, 1116 plane_state->src_rect.width, 1117 plane_state->src_rect.height, 1118 plane_state->dst_rect.x, 1119 plane_state->dst_rect.y, 1120 plane_state->dst_rect.width, 1121 plane_state->dst_rect.height); 1122 1123 DC_LOG_DC(dc->ctx->logger, 1124 "Pipe %d: width, height, x, y format:%d\n" 1125 "viewport:%d, %d, %d, %d\n" 1126 "recout: %d, %d, %d, %d\n", 1127 pipe_ctx->pipe_idx, 1128 plane_state->format, 1129 pipe_ctx->plane_res.scl_data.viewport.width, 1130 pipe_ctx->plane_res.scl_data.viewport.height, 1131 pipe_ctx->plane_res.scl_data.viewport.x, 1132 pipe_ctx->plane_res.scl_data.viewport.y, 1133 pipe_ctx->plane_res.scl_data.recout.width, 1134 pipe_ctx->plane_res.scl_data.recout.height, 1135 pipe_ctx->plane_res.scl_data.recout.x, 1136 pipe_ctx->plane_res.scl_data.recout.y); 1137 print_rq_dlg_ttu(dc, pipe_ctx); 1138 } 1139 */ 1140 if (dc->vm_pa_config.valid) { 1141 struct vm_system_aperture_param apt; 1142 1143 apt.sys_default.quad_part = 0; 1144 1145 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; 1146 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; 1147 1148 // Program system aperture settings 1149 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); 1150 } 1151 1152 if (!pipe_ctx->top_pipe 1153 && pipe_ctx->plane_state 1154 && pipe_ctx->plane_state->flip_int_enabled 1155 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) 1156 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp); 1157 1158 // if (dc->debug.sanity_checks) { 1159 // dcn10_verify_allow_pstate_change_high(dc); 1160 // } 1161 } 1162 1163 void dcn20_pipe_control_lock( 1164 struct dc *dc, 1165 struct pipe_ctx *pipe, 1166 bool lock) 1167 { 1168 struct pipe_ctx *temp_pipe; 1169 bool flip_immediate = false; 1170 1171 /* use TG master update lock to lock everything on the TG 1172 * therefore only top pipe need to lock 1173 */ 1174 if (!pipe || pipe->top_pipe) 1175 return; 1176 1177 if (pipe->plane_state != NULL) 1178 flip_immediate = pipe->plane_state->flip_immediate; 1179 1180 if (pipe->stream_res.gsl_group > 0) { 1181 temp_pipe = pipe->bottom_pipe; 1182 while (!flip_immediate && temp_pipe) { 1183 if (temp_pipe->plane_state != NULL) 1184 flip_immediate = temp_pipe->plane_state->flip_immediate; 1185 temp_pipe = temp_pipe->bottom_pipe; 1186 } 1187 } 1188 1189 if (flip_immediate && lock) { 1190 const int TIMEOUT_FOR_FLIP_PENDING = 100000; 1191 int i; 1192 1193 temp_pipe = pipe; 1194 while (temp_pipe) { 1195 if (temp_pipe->plane_state && temp_pipe->plane_state->flip_immediate) { 1196 for (i = 0; i < TIMEOUT_FOR_FLIP_PENDING; ++i) { 1197 if (!temp_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(temp_pipe->plane_res.hubp)) 1198 break; 1199 udelay(1); 1200 } 1201 1202 /* no reason it should take this long for immediate flips */ 1203 ASSERT(i != TIMEOUT_FOR_FLIP_PENDING); 1204 } 1205 temp_pipe = temp_pipe->bottom_pipe; 1206 } 1207 } 1208 1209 /* In flip immediate and pipe splitting case, we need to use GSL 1210 * for synchronization. Only do setup on locking and on flip type change. 1211 */ 1212 if (lock && (pipe->bottom_pipe != NULL || !flip_immediate)) 1213 if ((flip_immediate && pipe->stream_res.gsl_group == 0) || 1214 (!flip_immediate && pipe->stream_res.gsl_group > 0)) 1215 dcn20_setup_gsl_group_as_lock(dc, pipe, flip_immediate); 1216 1217 if (pipe->plane_state != NULL) 1218 flip_immediate = pipe->plane_state->flip_immediate; 1219 1220 temp_pipe = pipe->bottom_pipe; 1221 while (flip_immediate && temp_pipe) { 1222 if (temp_pipe->plane_state != NULL) 1223 flip_immediate = temp_pipe->plane_state->flip_immediate; 1224 temp_pipe = temp_pipe->bottom_pipe; 1225 } 1226 1227 if (!lock && pipe->stream_res.gsl_group > 0 && pipe->plane_state && 1228 !flip_immediate) 1229 dcn20_setup_gsl_group_as_lock(dc, pipe, false); 1230 1231 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { 1232 union dmub_hw_lock_flags hw_locks = { 0 }; 1233 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 1234 1235 hw_locks.bits.lock_pipe = 1; 1236 inst_flags.otg_inst = pipe->stream_res.tg->inst; 1237 1238 if (pipe->plane_state != NULL) 1239 hw_locks.bits.triple_buffer_lock = pipe->plane_state->triplebuffer_flips; 1240 1241 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 1242 lock, 1243 &hw_locks, 1244 &inst_flags); 1245 } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) { 1246 if (lock) 1247 pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); 1248 else 1249 pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg); 1250 } else { 1251 if (lock) 1252 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); 1253 else 1254 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); 1255 } 1256 } 1257 1258 static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) 1259 { 1260 new_pipe->update_flags.raw = 0; 1261 1262 /* Exit on unchanged, unused pipe */ 1263 if (!old_pipe->plane_state && !new_pipe->plane_state) 1264 return; 1265 /* Detect pipe enable/disable */ 1266 if (!old_pipe->plane_state && new_pipe->plane_state) { 1267 new_pipe->update_flags.bits.enable = 1; 1268 new_pipe->update_flags.bits.mpcc = 1; 1269 new_pipe->update_flags.bits.dppclk = 1; 1270 new_pipe->update_flags.bits.hubp_interdependent = 1; 1271 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1272 new_pipe->update_flags.bits.gamut_remap = 1; 1273 new_pipe->update_flags.bits.scaler = 1; 1274 new_pipe->update_flags.bits.viewport = 1; 1275 new_pipe->update_flags.bits.det_size = 1; 1276 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1277 new_pipe->update_flags.bits.odm = 1; 1278 new_pipe->update_flags.bits.global_sync = 1; 1279 } 1280 return; 1281 } 1282 if (old_pipe->plane_state && !new_pipe->plane_state) { 1283 new_pipe->update_flags.bits.disable = 1; 1284 return; 1285 } 1286 1287 /* Detect plane change */ 1288 if (old_pipe->plane_state != new_pipe->plane_state) { 1289 new_pipe->update_flags.bits.plane_changed = true; 1290 } 1291 1292 /* Detect top pipe only changes */ 1293 if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { 1294 /* Detect odm changes */ 1295 if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe 1296 && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) 1297 || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) 1298 || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) 1299 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1300 new_pipe->update_flags.bits.odm = 1; 1301 1302 /* Detect global sync changes */ 1303 if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset 1304 || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start 1305 || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset 1306 || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) 1307 new_pipe->update_flags.bits.global_sync = 1; 1308 } 1309 1310 if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb) 1311 new_pipe->update_flags.bits.det_size = 1; 1312 1313 /* 1314 * Detect opp / tg change, only set on change, not on enable 1315 * Assume mpcc inst = pipe index, if not this code needs to be updated 1316 * since mpcc is what is affected by these. In fact all of our sequence 1317 * makes this assumption at the moment with how hubp reset is matched to 1318 * same index mpcc reset. 1319 */ 1320 if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1321 new_pipe->update_flags.bits.opp_changed = 1; 1322 if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) 1323 new_pipe->update_flags.bits.tg_changed = 1; 1324 1325 /* 1326 * Detect mpcc blending changes, only dpp inst and opp matter here, 1327 * mpccs getting removed/inserted update connected ones during their own 1328 * programming 1329 */ 1330 if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp 1331 || old_pipe->stream_res.opp != new_pipe->stream_res.opp) 1332 new_pipe->update_flags.bits.mpcc = 1; 1333 1334 /* Detect dppclk change */ 1335 if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) 1336 new_pipe->update_flags.bits.dppclk = 1; 1337 1338 /* Check for scl update */ 1339 if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) 1340 new_pipe->update_flags.bits.scaler = 1; 1341 /* Check for vp update */ 1342 if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) 1343 || memcmp(&old_pipe->plane_res.scl_data.viewport_c, 1344 &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) 1345 new_pipe->update_flags.bits.viewport = 1; 1346 1347 /* Detect dlg/ttu/rq updates */ 1348 { 1349 struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; 1350 struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; 1351 struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; 1352 struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; 1353 1354 /* Detect pipe interdependent updates */ 1355 if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || 1356 old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || 1357 old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || 1358 old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || 1359 old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || 1360 old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || 1361 old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || 1362 old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || 1363 old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || 1364 old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || 1365 old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || 1366 old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || 1367 old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || 1368 old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || 1369 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || 1370 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || 1371 old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || 1372 old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { 1373 old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; 1374 old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; 1375 old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; 1376 old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; 1377 old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; 1378 old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; 1379 old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; 1380 old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; 1381 old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; 1382 old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; 1383 old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; 1384 old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; 1385 old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; 1386 old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; 1387 old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; 1388 old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; 1389 old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; 1390 old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; 1391 new_pipe->update_flags.bits.hubp_interdependent = 1; 1392 } 1393 /* Detect any other updates to ttu/rq/dlg */ 1394 if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || 1395 memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || 1396 memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) 1397 new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; 1398 } 1399 } 1400 1401 static void dcn20_update_dchubp_dpp( 1402 struct dc *dc, 1403 struct pipe_ctx *pipe_ctx, 1404 struct dc_state *context) 1405 { 1406 struct dce_hwseq *hws = dc->hwseq; 1407 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1408 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1409 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1410 bool viewport_changed = false; 1411 1412 if (pipe_ctx->update_flags.bits.dppclk) 1413 dpp->funcs->dpp_dppclk_control(dpp, false, true); 1414 1415 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG 1416 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. 1417 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG 1418 */ 1419 if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { 1420 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); 1421 1422 hubp->funcs->hubp_setup( 1423 hubp, 1424 &pipe_ctx->dlg_regs, 1425 &pipe_ctx->ttu_regs, 1426 &pipe_ctx->rq_regs, 1427 &pipe_ctx->pipe_dlg_param); 1428 1429 if (hubp->funcs->set_unbounded_requesting) 1430 hubp->funcs->set_unbounded_requesting(hubp, pipe_ctx->unbounded_req); 1431 } 1432 if (pipe_ctx->update_flags.bits.hubp_interdependent) 1433 hubp->funcs->hubp_setup_interdependent( 1434 hubp, 1435 &pipe_ctx->dlg_regs, 1436 &pipe_ctx->ttu_regs); 1437 1438 if (pipe_ctx->update_flags.bits.enable || 1439 pipe_ctx->update_flags.bits.plane_changed || 1440 plane_state->update_flags.bits.bpp_change || 1441 plane_state->update_flags.bits.input_csc_change || 1442 plane_state->update_flags.bits.color_space_change || 1443 plane_state->update_flags.bits.coeff_reduction_change) { 1444 struct dc_bias_and_scale bns_params = {0}; 1445 1446 // program the input csc 1447 dpp->funcs->dpp_setup(dpp, 1448 plane_state->format, 1449 EXPANSION_MODE_ZERO, 1450 plane_state->input_csc_color_matrix, 1451 plane_state->color_space, 1452 NULL); 1453 1454 if (dpp->funcs->dpp_program_bias_and_scale) { 1455 //TODO :for CNVC set scale and bias registers if necessary 1456 build_prescale_params(&bns_params, plane_state); 1457 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); 1458 } 1459 } 1460 1461 if (pipe_ctx->update_flags.bits.mpcc 1462 || pipe_ctx->update_flags.bits.plane_changed 1463 || plane_state->update_flags.bits.global_alpha_change 1464 || plane_state->update_flags.bits.per_pixel_alpha_change) { 1465 // MPCC inst is equal to pipe index in practice 1466 int mpcc_inst = hubp->inst; 1467 int opp_inst; 1468 int opp_count = dc->res_pool->pipe_count; 1469 1470 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { 1471 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { 1472 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); 1473 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; 1474 break; 1475 } 1476 } 1477 hws->funcs.update_mpcc(dc, pipe_ctx); 1478 } 1479 1480 if (pipe_ctx->update_flags.bits.scaler || 1481 plane_state->update_flags.bits.scaling_change || 1482 plane_state->update_flags.bits.position_change || 1483 plane_state->update_flags.bits.per_pixel_alpha_change || 1484 pipe_ctx->stream->update_flags.bits.scaling) { 1485 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; 1486 ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP); 1487 /* scaler configuration */ 1488 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( 1489 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); 1490 } 1491 1492 if (pipe_ctx->update_flags.bits.viewport || 1493 (context == dc->current_state && plane_state->update_flags.bits.position_change) || 1494 (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || 1495 (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { 1496 1497 hubp->funcs->mem_program_viewport( 1498 hubp, 1499 &pipe_ctx->plane_res.scl_data.viewport, 1500 &pipe_ctx->plane_res.scl_data.viewport_c); 1501 viewport_changed = true; 1502 } 1503 1504 /* Any updates are handled in dc interface, just need to apply existing for plane enable */ 1505 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || 1506 pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && 1507 pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { 1508 dc->hwss.set_cursor_position(pipe_ctx); 1509 dc->hwss.set_cursor_attribute(pipe_ctx); 1510 1511 if (dc->hwss.set_cursor_sdr_white_level) 1512 dc->hwss.set_cursor_sdr_white_level(pipe_ctx); 1513 } 1514 1515 /* Any updates are handled in dc interface, just need 1516 * to apply existing for plane enable / opp change */ 1517 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed 1518 || pipe_ctx->stream->update_flags.bits.gamut_remap 1519 || pipe_ctx->stream->update_flags.bits.out_csc) { 1520 /* dpp/cm gamut remap*/ 1521 dc->hwss.program_gamut_remap(pipe_ctx); 1522 1523 /*call the dcn2 method which uses mpc csc*/ 1524 dc->hwss.program_output_csc(dc, 1525 pipe_ctx, 1526 pipe_ctx->stream->output_color_space, 1527 pipe_ctx->stream->csc_color_matrix.matrix, 1528 hubp->opp_id); 1529 } 1530 1531 if (pipe_ctx->update_flags.bits.enable || 1532 pipe_ctx->update_flags.bits.plane_changed || 1533 pipe_ctx->update_flags.bits.opp_changed || 1534 plane_state->update_flags.bits.pixel_format_change || 1535 plane_state->update_flags.bits.horizontal_mirror_change || 1536 plane_state->update_flags.bits.rotation_change || 1537 plane_state->update_flags.bits.swizzle_change || 1538 plane_state->update_flags.bits.dcc_change || 1539 plane_state->update_flags.bits.bpp_change || 1540 plane_state->update_flags.bits.scaling_change || 1541 plane_state->update_flags.bits.plane_size_change) { 1542 struct plane_size size = plane_state->plane_size; 1543 1544 size.surface_size = pipe_ctx->plane_res.scl_data.viewport; 1545 hubp->funcs->hubp_program_surface_config( 1546 hubp, 1547 plane_state->format, 1548 &plane_state->tiling_info, 1549 &size, 1550 plane_state->rotation, 1551 &plane_state->dcc, 1552 plane_state->horizontal_mirror, 1553 0); 1554 hubp->power_gated = false; 1555 } 1556 1557 if (pipe_ctx->update_flags.bits.enable || 1558 pipe_ctx->update_flags.bits.plane_changed || 1559 plane_state->update_flags.bits.addr_update) 1560 hws->funcs.update_plane_addr(dc, pipe_ctx); 1561 1562 1563 1564 if (pipe_ctx->update_flags.bits.enable) 1565 hubp->funcs->set_blank(hubp, false); 1566 } 1567 1568 1569 static void dcn20_program_pipe( 1570 struct dc *dc, 1571 struct pipe_ctx *pipe_ctx, 1572 struct dc_state *context) 1573 { 1574 struct dce_hwseq *hws = dc->hwseq; 1575 /* Only need to unblank on top pipe */ 1576 if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) 1577 && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) 1578 hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); 1579 1580 /* Only update TG on top pipe */ 1581 if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe 1582 && !pipe_ctx->prev_odm_pipe) { 1583 1584 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1585 pipe_ctx->stream_res.tg, 1586 pipe_ctx->pipe_dlg_param.vready_offset, 1587 pipe_ctx->pipe_dlg_param.vstartup_start, 1588 pipe_ctx->pipe_dlg_param.vupdate_offset, 1589 pipe_ctx->pipe_dlg_param.vupdate_width); 1590 1591 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); 1592 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 1593 1594 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1595 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 1596 1597 if (hws->funcs.setup_vupdate_interrupt) 1598 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 1599 } 1600 1601 if (pipe_ctx->update_flags.bits.odm) 1602 hws->funcs.update_odm(dc, context, pipe_ctx); 1603 1604 if (pipe_ctx->update_flags.bits.enable) { 1605 dcn20_enable_plane(dc, pipe_ctx, context); 1606 if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes) 1607 dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); 1608 } 1609 1610 if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size) 1611 dc->res_pool->hubbub->funcs->program_det_size( 1612 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); 1613 1614 if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) 1615 dcn20_update_dchubp_dpp(dc, pipe_ctx, context); 1616 1617 if (pipe_ctx->update_flags.bits.enable 1618 || pipe_ctx->plane_state->update_flags.bits.hdr_mult) 1619 hws->funcs.set_hdr_multiplier(pipe_ctx); 1620 1621 if (pipe_ctx->update_flags.bits.enable || 1622 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1623 pipe_ctx->plane_state->update_flags.bits.gamma_change) 1624 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); 1625 1626 /* dcn10_translate_regamma_to_hw_format takes 750us to finish 1627 * only do gamma programming for powering on, internal memcmp to avoid 1628 * updating on slave planes 1629 */ 1630 if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) 1631 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); 1632 1633 /* If the pipe has been enabled or has a different opp, we 1634 * should reprogram the fmt. This deals with cases where 1635 * interation between mpc and odm combine on different streams 1636 * causes a different pipe to be chosen to odm combine with. 1637 */ 1638 if (pipe_ctx->update_flags.bits.enable 1639 || pipe_ctx->update_flags.bits.opp_changed) { 1640 1641 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 1642 pipe_ctx->stream_res.opp, 1643 COLOR_SPACE_YCBCR601, 1644 pipe_ctx->stream->timing.display_color_depth, 1645 pipe_ctx->stream->signal); 1646 1647 pipe_ctx->stream_res.opp->funcs->opp_program_fmt( 1648 pipe_ctx->stream_res.opp, 1649 &pipe_ctx->stream->bit_depth_params, 1650 &pipe_ctx->stream->clamping); 1651 } 1652 } 1653 1654 void dcn20_program_front_end_for_ctx( 1655 struct dc *dc, 1656 struct dc_state *context) 1657 { 1658 int i; 1659 struct dce_hwseq *hws = dc->hwseq; 1660 DC_LOGGER_INIT(dc->ctx->logger); 1661 1662 /* Carry over GSL groups in case the context is changing. */ 1663 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1664 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1665 struct pipe_ctx *old_pipe_ctx = 1666 &dc->current_state->res_ctx.pipe_ctx[i]; 1667 1668 if (pipe_ctx->stream == old_pipe_ctx->stream) 1669 pipe_ctx->stream_res.gsl_group = 1670 old_pipe_ctx->stream_res.gsl_group; 1671 } 1672 1673 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 1674 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1675 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1676 1677 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { 1678 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 1679 /*turn off triple buffer for full update*/ 1680 dc->hwss.program_triplebuffer( 1681 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 1682 } 1683 } 1684 } 1685 1686 /* Set pipe update flags and lock pipes */ 1687 for (i = 0; i < dc->res_pool->pipe_count; i++) 1688 dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], 1689 &context->res_ctx.pipe_ctx[i]); 1690 1691 /* OTG blank before disabling all front ends */ 1692 for (i = 0; i < dc->res_pool->pipe_count; i++) 1693 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1694 && !context->res_ctx.pipe_ctx[i].top_pipe 1695 && !context->res_ctx.pipe_ctx[i].prev_odm_pipe 1696 && context->res_ctx.pipe_ctx[i].stream) 1697 hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 1698 1699 1700 /* Disconnect mpcc */ 1701 for (i = 0; i < dc->res_pool->pipe_count; i++) 1702 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1703 || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { 1704 struct hubbub *hubbub = dc->res_pool->hubbub; 1705 1706 if (hubbub->funcs->program_det_size && context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 1707 hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 1708 hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1709 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); 1710 } 1711 1712 /* 1713 * Program all updated pipes, order matters for mpcc setup. Start with 1714 * top pipe and program all pipes that follow in order 1715 */ 1716 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1717 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1718 1719 if (pipe->plane_state && !pipe->top_pipe) { 1720 while (pipe) { 1721 if (hws->funcs.program_pipe) 1722 hws->funcs.program_pipe(dc, pipe, context); 1723 else 1724 dcn20_program_pipe(dc, pipe, context); 1725 1726 pipe = pipe->bottom_pipe; 1727 } 1728 } 1729 /* Program secondary blending tree and writeback pipes */ 1730 pipe = &context->res_ctx.pipe_ctx[i]; 1731 if (!pipe->top_pipe && !pipe->prev_odm_pipe 1732 && pipe->stream && pipe->stream->num_wb_info > 0 1733 && (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw) 1734 || pipe->stream->update_flags.raw) 1735 && hws->funcs.program_all_writeback_pipes_in_tree) 1736 hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); 1737 } 1738 } 1739 1740 void dcn20_post_unlock_program_front_end( 1741 struct dc *dc, 1742 struct dc_state *context) 1743 { 1744 int i; 1745 const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; 1746 struct dce_hwseq *hwseq = dc->hwseq; 1747 1748 DC_LOGGER_INIT(dc->ctx->logger); 1749 1750 for (i = 0; i < dc->res_pool->pipe_count; i++) 1751 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 1752 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1753 1754 /* 1755 * If we are enabling a pipe, we need to wait for pending clear as this is a critical 1756 * part of the enable operation otherwise, DM may request an immediate flip which 1757 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which 1758 * is unsupported on DCN. 1759 */ 1760 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1761 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1762 1763 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { 1764 struct hubp *hubp = pipe->plane_res.hubp; 1765 int j = 0; 1766 1767 for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 1768 && hubp->funcs->hubp_is_flip_pending(hubp); j++) 1769 mdelay(1); 1770 } 1771 } 1772 1773 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1774 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1775 struct pipe_ctx *mpcc_pipe; 1776 1777 if (pipe->vtp_locked) { 1778 dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp); 1779 pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true); 1780 pipe->vtp_locked = false; 1781 1782 for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 1783 mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true); 1784 1785 for (i = 0; i < dc->res_pool->pipe_count; i++) 1786 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) 1787 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1788 } 1789 } 1790 /* WA to apply WM setting*/ 1791 if (hwseq->wa.DEGVIDCN21) 1792 dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); 1793 1794 1795 /* WA for stutter underflow during MPO transitions when adding 2nd plane */ 1796 if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) { 1797 1798 if (dc->current_state->stream_status[0].plane_count == 1 && 1799 context->stream_status[0].plane_count > 1) { 1800 1801 struct timing_generator *tg = dc->res_pool->timing_generators[0]; 1802 1803 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false); 1804 1805 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true; 1806 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg); 1807 } 1808 } 1809 } 1810 1811 void dcn20_prepare_bandwidth( 1812 struct dc *dc, 1813 struct dc_state *context) 1814 { 1815 struct hubbub *hubbub = dc->res_pool->hubbub; 1816 1817 dc->clk_mgr->funcs->update_clocks( 1818 dc->clk_mgr, 1819 context, 1820 false); 1821 1822 /* program dchubbub watermarks */ 1823 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub, 1824 &context->bw_ctx.bw.dcn.watermarks, 1825 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1826 false); 1827 /* decrease compbuf size */ 1828 if (hubbub->funcs->program_compbuf_size) 1829 hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, false); 1830 } 1831 1832 void dcn20_optimize_bandwidth( 1833 struct dc *dc, 1834 struct dc_state *context) 1835 { 1836 struct hubbub *hubbub = dc->res_pool->hubbub; 1837 1838 /* program dchubbub watermarks */ 1839 hubbub->funcs->program_watermarks(hubbub, 1840 &context->bw_ctx.bw.dcn.watermarks, 1841 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 1842 true); 1843 1844 dc->clk_mgr->funcs->update_clocks( 1845 dc->clk_mgr, 1846 context, 1847 true); 1848 /* increase compbuf size */ 1849 if (hubbub->funcs->program_compbuf_size) 1850 hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); 1851 } 1852 1853 bool dcn20_update_bandwidth( 1854 struct dc *dc, 1855 struct dc_state *context) 1856 { 1857 int i; 1858 struct dce_hwseq *hws = dc->hwseq; 1859 1860 /* recalculate DML parameters */ 1861 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) 1862 return false; 1863 1864 /* apply updated bandwidth parameters */ 1865 dc->hwss.prepare_bandwidth(dc, context); 1866 1867 /* update hubp configs for all pipes */ 1868 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1869 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1870 1871 if (pipe_ctx->plane_state == NULL) 1872 continue; 1873 1874 if (pipe_ctx->top_pipe == NULL) { 1875 bool blank = !is_pipe_tree_visible(pipe_ctx); 1876 1877 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1878 pipe_ctx->stream_res.tg, 1879 pipe_ctx->pipe_dlg_param.vready_offset, 1880 pipe_ctx->pipe_dlg_param.vstartup_start, 1881 pipe_ctx->pipe_dlg_param.vupdate_offset, 1882 pipe_ctx->pipe_dlg_param.vupdate_width); 1883 1884 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1885 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 1886 1887 if (pipe_ctx->prev_odm_pipe == NULL) 1888 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); 1889 1890 if (hws->funcs.setup_vupdate_interrupt) 1891 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); 1892 } 1893 1894 pipe_ctx->plane_res.hubp->funcs->hubp_setup( 1895 pipe_ctx->plane_res.hubp, 1896 &pipe_ctx->dlg_regs, 1897 &pipe_ctx->ttu_regs, 1898 &pipe_ctx->rq_regs, 1899 &pipe_ctx->pipe_dlg_param); 1900 } 1901 1902 return true; 1903 } 1904 1905 void dcn20_enable_writeback( 1906 struct dc *dc, 1907 struct dc_writeback_info *wb_info, 1908 struct dc_state *context) 1909 { 1910 struct dwbc *dwb; 1911 struct mcif_wb *mcif_wb; 1912 struct timing_generator *optc; 1913 1914 ASSERT(wb_info->dwb_pipe_inst < MAX_DWB_PIPES); 1915 ASSERT(wb_info->wb_enabled); 1916 dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 1917 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 1918 1919 /* set the OPTC source mux */ 1920 optc = dc->res_pool->timing_generators[dwb->otg_inst]; 1921 optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); 1922 /* set MCIF_WB buffer and arbitration configuration */ 1923 mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); 1924 mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); 1925 /* Enable MCIF_WB */ 1926 mcif_wb->funcs->enable_mcif(mcif_wb); 1927 /* Enable DWB */ 1928 dwb->funcs->enable(dwb, &wb_info->dwb_params); 1929 /* TODO: add sequence to enable/disable warmup */ 1930 } 1931 1932 void dcn20_disable_writeback( 1933 struct dc *dc, 1934 unsigned int dwb_pipe_inst) 1935 { 1936 struct dwbc *dwb; 1937 struct mcif_wb *mcif_wb; 1938 1939 ASSERT(dwb_pipe_inst < MAX_DWB_PIPES); 1940 dwb = dc->res_pool->dwbc[dwb_pipe_inst]; 1941 mcif_wb = dc->res_pool->mcif_wb[dwb_pipe_inst]; 1942 1943 dwb->funcs->disable(dwb); 1944 mcif_wb->funcs->disable_mcif(mcif_wb); 1945 } 1946 1947 bool dcn20_wait_for_blank_complete( 1948 struct output_pixel_processor *opp) 1949 { 1950 int counter; 1951 1952 for (counter = 0; counter < 1000; counter++) { 1953 if (opp->funcs->dpg_is_blanked(opp)) 1954 break; 1955 1956 udelay(100); 1957 } 1958 1959 if (counter == 1000) { 1960 dm_error("DC: failed to blank crtc!\n"); 1961 return false; 1962 } 1963 1964 return true; 1965 } 1966 1967 bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) 1968 { 1969 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1970 1971 if (!hubp) 1972 return false; 1973 return hubp->funcs->dmdata_status_done(hubp); 1974 } 1975 1976 void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 1977 { 1978 struct dce_hwseq *hws = dc->hwseq; 1979 1980 if (pipe_ctx->stream_res.dsc) { 1981 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1982 1983 hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); 1984 while (odm_pipe) { 1985 hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); 1986 odm_pipe = odm_pipe->next_odm_pipe; 1987 } 1988 } 1989 } 1990 1991 void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx) 1992 { 1993 struct dce_hwseq *hws = dc->hwseq; 1994 1995 if (pipe_ctx->stream_res.dsc) { 1996 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1997 1998 hws->funcs.dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); 1999 while (odm_pipe) { 2000 hws->funcs.dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); 2001 odm_pipe = odm_pipe->next_odm_pipe; 2002 } 2003 } 2004 } 2005 2006 void dcn20_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) 2007 { 2008 struct dc_dmdata_attributes attr = { 0 }; 2009 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2010 2011 attr.dmdata_mode = DMDATA_HW_MODE; 2012 attr.dmdata_size = 2013 dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36; 2014 attr.address.quad_part = 2015 pipe_ctx->stream->dmdata_address.quad_part; 2016 attr.dmdata_dl_delta = 0; 2017 attr.dmdata_qos_mode = 0; 2018 attr.dmdata_qos_level = 0; 2019 attr.dmdata_repeat = 1; /* always repeat */ 2020 attr.dmdata_updated = 1; 2021 attr.dmdata_sw_data = NULL; 2022 2023 hubp->funcs->dmdata_set_attributes(hubp, &attr); 2024 } 2025 2026 void dcn20_init_vm_ctx( 2027 struct dce_hwseq *hws, 2028 struct dc *dc, 2029 struct dc_virtual_addr_space_config *va_config, 2030 int vmid) 2031 { 2032 struct dcn_hubbub_virt_addr_config config; 2033 2034 if (vmid == 0) { 2035 ASSERT(0); /* VMID cannot be 0 for vm context */ 2036 return; 2037 } 2038 2039 config.page_table_start_addr = va_config->page_table_start_addr; 2040 config.page_table_end_addr = va_config->page_table_end_addr; 2041 config.page_table_block_size = va_config->page_table_block_size_in_bytes; 2042 config.page_table_depth = va_config->page_table_depth; 2043 config.page_table_base_addr = va_config->page_table_base_addr; 2044 2045 dc->res_pool->hubbub->funcs->init_vm_ctx(dc->res_pool->hubbub, &config, vmid); 2046 } 2047 2048 int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 2049 { 2050 struct dcn_hubbub_phys_addr_config config; 2051 2052 config.system_aperture.fb_top = pa_config->system_aperture.fb_top; 2053 config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; 2054 config.system_aperture.fb_base = pa_config->system_aperture.fb_base; 2055 config.system_aperture.agp_top = pa_config->system_aperture.agp_top; 2056 config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; 2057 config.system_aperture.agp_base = pa_config->system_aperture.agp_base; 2058 config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; 2059 config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; 2060 config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; 2061 config.page_table_default_page_addr = pa_config->page_table_default_page_addr; 2062 2063 return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); 2064 } 2065 2066 static bool patch_address_for_sbs_tb_stereo( 2067 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr) 2068 { 2069 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2070 bool sec_split = pipe_ctx->top_pipe && 2071 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; 2072 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && 2073 (pipe_ctx->stream->timing.timing_3d_format == 2074 TIMING_3D_FORMAT_SIDE_BY_SIDE || 2075 pipe_ctx->stream->timing.timing_3d_format == 2076 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { 2077 *addr = plane_state->address.grph_stereo.left_addr; 2078 plane_state->address.grph_stereo.left_addr = 2079 plane_state->address.grph_stereo.right_addr; 2080 return true; 2081 } 2082 2083 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && 2084 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { 2085 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; 2086 plane_state->address.grph_stereo.right_addr = 2087 plane_state->address.grph_stereo.left_addr; 2088 plane_state->address.grph_stereo.right_meta_addr = 2089 plane_state->address.grph_stereo.left_meta_addr; 2090 } 2091 return false; 2092 } 2093 2094 void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) 2095 { 2096 bool addr_patched = false; 2097 PHYSICAL_ADDRESS_LOC addr; 2098 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2099 2100 if (plane_state == NULL) 2101 return; 2102 2103 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); 2104 2105 // Call Helper to track VMID use 2106 vm_helper_mark_vmid_used(dc->vm_helper, plane_state->address.vmid, pipe_ctx->plane_res.hubp->inst); 2107 2108 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( 2109 pipe_ctx->plane_res.hubp, 2110 &plane_state->address, 2111 plane_state->flip_immediate); 2112 2113 plane_state->status.requested_address = plane_state->address; 2114 2115 if (plane_state->flip_immediate) 2116 plane_state->status.current_address = plane_state->address; 2117 2118 if (addr_patched) 2119 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; 2120 } 2121 2122 void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, 2123 struct dc_link_settings *link_settings) 2124 { 2125 struct encoder_unblank_param params = { { 0 } }; 2126 struct dc_stream_state *stream = pipe_ctx->stream; 2127 struct dc_link *link = stream->link; 2128 struct dce_hwseq *hws = link->dc->hwseq; 2129 struct pipe_ctx *odm_pipe; 2130 2131 params.opp_cnt = 1; 2132 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 2133 params.opp_cnt++; 2134 } 2135 /* only 3 items below are used by unblank */ 2136 params.timing = pipe_ctx->stream->timing; 2137 2138 params.link_settings.link_rate = link_settings->link_rate; 2139 2140 if (is_dp_128b_132b_signal(pipe_ctx)) { 2141 /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ 2142 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( 2143 pipe_ctx->stream_res.hpo_dp_stream_enc, 2144 pipe_ctx->stream_res.tg->inst); 2145 } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 2146 if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) 2147 params.timing.pix_clk_100hz /= 2; 2148 pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( 2149 pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); 2150 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); 2151 } 2152 2153 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 2154 hws->funcs.edp_backlight_control(link, true); 2155 } 2156 } 2157 2158 void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) 2159 { 2160 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2161 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); 2162 2163 if (start_line < 0) 2164 start_line = 0; 2165 2166 if (tg->funcs->setup_vertical_interrupt2) 2167 tg->funcs->setup_vertical_interrupt2(tg, start_line); 2168 } 2169 2170 static void dcn20_reset_back_end_for_pipe( 2171 struct dc *dc, 2172 struct pipe_ctx *pipe_ctx, 2173 struct dc_state *context) 2174 { 2175 int i; 2176 struct dc_link *link; 2177 DC_LOGGER_INIT(dc->ctx->logger); 2178 if (pipe_ctx->stream_res.stream_enc == NULL) { 2179 pipe_ctx->stream = NULL; 2180 return; 2181 } 2182 2183 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 2184 link = pipe_ctx->stream->link; 2185 /* DPMS may already disable or */ 2186 /* dpms_off status is incorrect due to fastboot 2187 * feature. When system resume from S4 with second 2188 * screen only, the dpms_off would be true but 2189 * VBIOS lit up eDP, so check link status too. 2190 */ 2191 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 2192 core_link_disable_stream(pipe_ctx); 2193 else if (pipe_ctx->stream_res.audio) 2194 dc->hwss.disable_audio_stream(pipe_ctx); 2195 2196 /* free acquired resources */ 2197 if (pipe_ctx->stream_res.audio) { 2198 /*disable az_endpoint*/ 2199 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 2200 2201 /*free audio*/ 2202 if (dc->caps.dynamic_audio == true) { 2203 /*we have to dynamic arbitrate the audio endpoints*/ 2204 /*we free the resource, need reset is_audio_acquired*/ 2205 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, 2206 pipe_ctx->stream_res.audio, false); 2207 pipe_ctx->stream_res.audio = NULL; 2208 } 2209 } 2210 } 2211 else if (pipe_ctx->stream_res.dsc) { 2212 dp_set_dsc_enable(pipe_ctx, false); 2213 } 2214 2215 /* by upper caller loop, parent pipe: pipe0, will be reset last. 2216 * back end share by all pipes and will be disable only when disable 2217 * parent pipe. 2218 */ 2219 if (pipe_ctx->top_pipe == NULL) { 2220 2221 dc->hwss.set_abm_immediate_disable(pipe_ctx); 2222 2223 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg); 2224 2225 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false); 2226 if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass) 2227 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 2228 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 2229 2230 if (pipe_ctx->stream_res.tg->funcs->set_drr) 2231 pipe_ctx->stream_res.tg->funcs->set_drr( 2232 pipe_ctx->stream_res.tg, NULL); 2233 } 2234 2235 for (i = 0; i < dc->res_pool->pipe_count; i++) 2236 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx) 2237 break; 2238 2239 if (i == dc->res_pool->pipe_count) 2240 return; 2241 2242 pipe_ctx->stream = NULL; 2243 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n", 2244 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); 2245 } 2246 2247 void dcn20_reset_hw_ctx_wrap( 2248 struct dc *dc, 2249 struct dc_state *context) 2250 { 2251 int i; 2252 struct dce_hwseq *hws = dc->hwseq; 2253 2254 /* Reset Back End*/ 2255 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { 2256 struct pipe_ctx *pipe_ctx_old = 2257 &dc->current_state->res_ctx.pipe_ctx[i]; 2258 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2259 2260 if (!pipe_ctx_old->stream) 2261 continue; 2262 2263 if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 2264 continue; 2265 2266 if (!pipe_ctx->stream || 2267 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 2268 struct clock_source *old_clk = pipe_ctx_old->clock_source; 2269 2270 dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); 2271 if (hws->funcs.enable_stream_gating) 2272 hws->funcs.enable_stream_gating(dc, pipe_ctx); 2273 if (old_clk) 2274 old_clk->funcs->cs_power_down(old_clk); 2275 } 2276 } 2277 } 2278 2279 void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) 2280 { 2281 struct mpc *mpc = dc->res_pool->mpc; 2282 2283 // input to MPCC is always RGB, by default leave black_color at 0 2284 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 2285 get_hdr_visual_confirm_color(pipe_ctx, color); 2286 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 2287 get_surface_visual_confirm_color(pipe_ctx, color); 2288 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 2289 get_mpctree_visual_confirm_color(pipe_ctx, color); 2290 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 2291 get_surface_tile_visual_confirm_color(pipe_ctx, color); 2292 2293 if (mpc->funcs->set_bg_color) 2294 mpc->funcs->set_bg_color(mpc, color, mpcc_id); 2295 } 2296 2297 void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) 2298 { 2299 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2300 struct mpcc_blnd_cfg blnd_cfg = { {0} }; 2301 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; 2302 int mpcc_id; 2303 struct mpcc *new_mpcc; 2304 struct mpc *mpc = dc->res_pool->mpc; 2305 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); 2306 2307 if (per_pixel_alpha) 2308 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; 2309 else 2310 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; 2311 2312 blnd_cfg.overlap_only = false; 2313 blnd_cfg.global_gain = 0xff; 2314 2315 if (pipe_ctx->plane_state->global_alpha) 2316 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; 2317 else 2318 blnd_cfg.global_alpha = 0xff; 2319 2320 blnd_cfg.background_color_bpc = 4; 2321 blnd_cfg.bottom_gain_mode = 0; 2322 blnd_cfg.top_gain = 0x1f000; 2323 blnd_cfg.bottom_inside_gain = 0x1f000; 2324 blnd_cfg.bottom_outside_gain = 0x1f000; 2325 blnd_cfg.pre_multiplied_alpha = per_pixel_alpha; 2326 if (pipe_ctx->plane_state->format 2327 == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA) 2328 blnd_cfg.pre_multiplied_alpha = false; 2329 2330 /* 2331 * TODO: remove hack 2332 * Note: currently there is a bug in init_hw such that 2333 * on resume from hibernate, BIOS sets up MPCC0, and 2334 * we do mpcc_remove but the mpcc cannot go to idle 2335 * after remove. This cause us to pick mpcc1 here, 2336 * which causes a pstate hang for yet unknown reason. 2337 */ 2338 mpcc_id = hubp->inst; 2339 2340 /* If there is no full update, don't need to touch MPC tree*/ 2341 if (!pipe_ctx->plane_state->update_flags.bits.full_update && 2342 !pipe_ctx->update_flags.bits.mpcc) { 2343 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); 2344 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); 2345 return; 2346 } 2347 2348 /* check if this MPCC is already being used */ 2349 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); 2350 /* remove MPCC if being used */ 2351 if (new_mpcc != NULL) 2352 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc); 2353 else 2354 if (dc->debug.sanity_checks) 2355 mpc->funcs->assert_mpcc_idle_before_connect( 2356 dc->res_pool->mpc, mpcc_id); 2357 2358 /* Call MPC to insert new plane */ 2359 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, 2360 mpc_tree_params, 2361 &blnd_cfg, 2362 NULL, 2363 NULL, 2364 hubp->inst, 2365 mpcc_id); 2366 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); 2367 2368 ASSERT(new_mpcc != NULL); 2369 hubp->opp_id = pipe_ctx->stream_res.opp->inst; 2370 hubp->mpcc_id = mpcc_id; 2371 } 2372 2373 void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) 2374 { 2375 enum dc_lane_count lane_count = 2376 pipe_ctx->stream->link->cur_link_settings.lane_count; 2377 2378 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 2379 struct dc_link *link = pipe_ctx->stream->link; 2380 2381 uint32_t active_total_with_borders; 2382 uint32_t early_control = 0; 2383 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2384 struct link_encoder *link_enc; 2385 2386 if (link->is_dig_mapping_flexible && 2387 link->dc->res_pool->funcs->link_encs_assign) 2388 link_enc = pipe_ctx->stream->link_enc; 2389 else 2390 link_enc = link->link_enc; 2391 2392 /* For MST, there are multiply stream go to only one link. 2393 * connect DIG back_end to front_end while enable_stream and 2394 * disconnect them during disable_stream 2395 * BY this, it is logic clean to separate stream and link 2396 */ 2397 if (is_dp_128b_132b_signal(pipe_ctx)) { 2398 setup_dp_hpo_stream(pipe_ctx, true); 2399 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( 2400 pipe_ctx->stream_res.hpo_dp_stream_enc); 2401 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link( 2402 pipe_ctx->stream_res.hpo_dp_stream_enc, 2403 pipe_ctx->stream_res.hpo_dp_stream_enc->inst, 2404 link->hpo_dp_link_enc->inst); 2405 } 2406 2407 if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc) 2408 link_enc->funcs->connect_dig_be_to_fe( 2409 link_enc, pipe_ctx->stream_res.stream_enc->id, true); 2410 2411 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2412 dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); 2413 2414 if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { 2415 if (link->dc->hwss.program_dmdata_engine) 2416 link->dc->hwss.program_dmdata_engine(pipe_ctx); 2417 } 2418 2419 link->dc->hwss.update_info_frame(pipe_ctx); 2420 2421 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2422 dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 2423 2424 /* enable early control to avoid corruption on DP monitor*/ 2425 active_total_with_borders = 2426 timing->h_addressable 2427 + timing->h_border_left 2428 + timing->h_border_right; 2429 2430 if (lane_count != 0) 2431 early_control = active_total_with_borders % lane_count; 2432 2433 if (early_control == 0) 2434 early_control = lane_count; 2435 2436 tg->funcs->set_early_control(tg, early_control); 2437 2438 /* enable audio only within mode set */ 2439 if (pipe_ctx->stream_res.audio != NULL) { 2440 if (is_dp_128b_132b_signal(pipe_ctx)) 2441 pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc); 2442 else if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2443 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 2444 } 2445 } 2446 2447 void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) 2448 { 2449 struct dc_stream_state *stream = pipe_ctx->stream; 2450 struct hubp *hubp = pipe_ctx->plane_res.hubp; 2451 bool enable = false; 2452 struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 2453 enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) 2454 ? dmdata_dp 2455 : dmdata_hdmi; 2456 2457 /* if using dynamic meta, don't set up generic infopackets */ 2458 if (pipe_ctx->stream->dmdata_address.quad_part != 0) { 2459 pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; 2460 enable = true; 2461 } 2462 2463 if (!hubp) 2464 return; 2465 2466 if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) 2467 return; 2468 2469 stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, 2470 hubp->inst, mode); 2471 } 2472 2473 void dcn20_fpga_init_hw(struct dc *dc) 2474 { 2475 int i, j; 2476 struct dce_hwseq *hws = dc->hwseq; 2477 struct resource_pool *res_pool = dc->res_pool; 2478 struct dc_state *context = dc->current_state; 2479 2480 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) 2481 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); 2482 2483 // Initialize the dccg 2484 if (res_pool->dccg->funcs->dccg_init) 2485 res_pool->dccg->funcs->dccg_init(res_pool->dccg); 2486 2487 //Enable ability to power gate / don't force power on permanently 2488 hws->funcs.enable_power_gating_plane(hws, true); 2489 2490 // Specific to FPGA dccg and registers 2491 REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); 2492 REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); 2493 2494 hws->funcs.dccg_init(hws); 2495 2496 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); 2497 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 2498 if (REG(REFCLK_CNTL)) 2499 REG_WRITE(REFCLK_CNTL, 0); 2500 // 2501 2502 2503 /* Blank pixel data with OPP DPG */ 2504 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2505 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2506 2507 if (tg->funcs->is_tg_enabled(tg)) 2508 dcn20_init_blank(dc, tg); 2509 } 2510 2511 for (i = 0; i < res_pool->timing_generator_count; i++) { 2512 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2513 2514 if (tg->funcs->is_tg_enabled(tg)) 2515 tg->funcs->lock(tg); 2516 } 2517 2518 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2519 struct dpp *dpp = res_pool->dpps[i]; 2520 2521 dpp->funcs->dpp_reset(dpp); 2522 } 2523 2524 /* Reset all MPCC muxes */ 2525 res_pool->mpc->funcs->mpc_init(res_pool->mpc); 2526 2527 /* initialize OPP mpc_tree parameter */ 2528 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 2529 res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst; 2530 res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 2531 for (j = 0; j < MAX_PIPES; j++) 2532 res_pool->opps[i]->mpcc_disconnect_pending[j] = false; 2533 } 2534 2535 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2536 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2537 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2538 struct hubp *hubp = dc->res_pool->hubps[i]; 2539 struct dpp *dpp = dc->res_pool->dpps[i]; 2540 2541 pipe_ctx->stream_res.tg = tg; 2542 pipe_ctx->pipe_idx = i; 2543 2544 pipe_ctx->plane_res.hubp = hubp; 2545 pipe_ctx->plane_res.dpp = dpp; 2546 pipe_ctx->plane_res.mpcc_inst = dpp->inst; 2547 hubp->mpcc_id = dpp->inst; 2548 hubp->opp_id = OPP_ID_INVALID; 2549 hubp->power_gated = false; 2550 pipe_ctx->stream_res.opp = NULL; 2551 2552 hubp->funcs->hubp_init(hubp); 2553 2554 //dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 2555 //dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; 2556 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 2557 pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; 2558 /*to do*/ 2559 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); 2560 } 2561 2562 /* initialize DWB pointer to MCIF_WB */ 2563 for (i = 0; i < res_pool->res_cap->num_dwb; i++) 2564 res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i]; 2565 2566 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2567 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2568 2569 if (tg->funcs->is_tg_enabled(tg)) 2570 tg->funcs->unlock(tg); 2571 } 2572 2573 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2574 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2575 2576 dc->hwss.disable_plane(dc, pipe_ctx); 2577 2578 pipe_ctx->stream_res.tg = NULL; 2579 pipe_ctx->plane_res.hubp = NULL; 2580 } 2581 2582 for (i = 0; i < dc->res_pool->timing_generator_count; i++) { 2583 struct timing_generator *tg = dc->res_pool->timing_generators[i]; 2584 2585 tg->funcs->tg_init(tg); 2586 } 2587 2588 if (dc->res_pool->hubbub->funcs->init_crb) 2589 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); 2590 } 2591 #ifndef TRIM_FSFT 2592 bool dcn20_optimize_timing_for_fsft(struct dc *dc, 2593 struct dc_crtc_timing *timing, 2594 unsigned int max_input_rate_in_khz) 2595 { 2596 unsigned int old_v_front_porch; 2597 unsigned int old_v_total; 2598 unsigned int max_input_rate_in_100hz; 2599 unsigned long long new_v_total; 2600 2601 max_input_rate_in_100hz = max_input_rate_in_khz * 10; 2602 if (max_input_rate_in_100hz < timing->pix_clk_100hz) 2603 return false; 2604 2605 old_v_total = timing->v_total; 2606 old_v_front_porch = timing->v_front_porch; 2607 2608 timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz; 2609 timing->pix_clk_100hz = max_input_rate_in_100hz; 2610 2611 new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz); 2612 2613 timing->v_total = new_v_total; 2614 timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total); 2615 return true; 2616 } 2617 #endif 2618 2619 void dcn20_set_disp_pattern_generator(const struct dc *dc, 2620 struct pipe_ctx *pipe_ctx, 2621 enum controller_dp_test_pattern test_pattern, 2622 enum controller_dp_color_space color_space, 2623 enum dc_color_depth color_depth, 2624 const struct tg_color *solid_color, 2625 int width, int height, int offset) 2626 { 2627 pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern, 2628 color_space, color_depth, solid_color, width, height, offset); 2629 } 2630