1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "clk_mgr.h" 37 #include "clock_source.h" 38 #include "dc_bios_types.h" 39 40 #include "bios_parser_interface.h" 41 #include "bios/bios_parser_helper.h" 42 #include "include/irq_service_interface.h" 43 #include "transform.h" 44 #include "dmcu.h" 45 #include "dpp.h" 46 #include "timing_generator.h" 47 #include "abm.h" 48 #include "virtual/virtual_link_encoder.h" 49 #include "hubp.h" 50 51 #include "link_hwss.h" 52 #include "link_encoder.h" 53 #include "link_enc_cfg.h" 54 55 #include "dc_link.h" 56 #include "dc_link_ddc.h" 57 #include "dm_helpers.h" 58 #include "mem_input.h" 59 60 #include "dc_link_dp.h" 61 #include "dc_dmub_srv.h" 62 63 #include "dsc.h" 64 65 #include "vm_helper.h" 66 67 #include "dce/dce_i2c.h" 68 69 #include "dmub/dmub_srv.h" 70 71 #include "i2caux_interface.h" 72 73 #include "dce/dmub_psr.h" 74 75 #include "dce/dmub_hw_lock_mgr.h" 76 77 #include "dc_trace.h" 78 79 #include "dce/dmub_outbox.h" 80 81 #define CTX \ 82 dc->ctx 83 84 #define DC_LOGGER \ 85 dc->ctx->logger 86 87 static const char DC_BUILD_ID[] = "production-build"; 88 89 /** 90 * DOC: Overview 91 * 92 * DC is the OS-agnostic component of the amdgpu DC driver. 93 * 94 * DC maintains and validates a set of structs representing the state of the 95 * driver and writes that state to AMD hardware 96 * 97 * Main DC HW structs: 98 * 99 * struct dc - The central struct. One per driver. Created on driver load, 100 * destroyed on driver unload. 101 * 102 * struct dc_context - One per driver. 103 * Used as a backpointer by most other structs in dc. 104 * 105 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 106 * plugpoints). Created on driver load, destroyed on driver unload. 107 * 108 * struct dc_sink - One per display. Created on boot or hotplug. 109 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 110 * (the display directly attached). It may also have one or more remote 111 * sinks (in the Multi-Stream Transport case) 112 * 113 * struct resource_pool - One per driver. Represents the hw blocks not in the 114 * main pipeline. Not directly accessible by dm. 115 * 116 * Main dc state structs: 117 * 118 * These structs can be created and destroyed as needed. There is a full set of 119 * these structs in dc->current_state representing the currently programmed state. 120 * 121 * struct dc_state - The global DC state to track global state information, 122 * such as bandwidth values. 123 * 124 * struct dc_stream_state - Represents the hw configuration for the pipeline from 125 * a framebuffer to a display. Maps one-to-one with dc_sink. 126 * 127 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 128 * and may have more in the Multi-Plane Overlay case. 129 * 130 * struct resource_context - Represents the programmable state of everything in 131 * the resource_pool. Not directly accessible by dm. 132 * 133 * struct pipe_ctx - A member of struct resource_context. Represents the 134 * internal hardware pipeline components. Each dc_plane_state has either 135 * one or two (in the pipe-split case). 136 */ 137 138 /* Private functions */ 139 140 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 141 { 142 if (new > *original) 143 *original = new; 144 } 145 146 static void destroy_links(struct dc *dc) 147 { 148 uint32_t i; 149 150 for (i = 0; i < dc->link_count; i++) { 151 if (NULL != dc->links[i]) 152 link_destroy(&dc->links[i]); 153 } 154 } 155 156 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 157 { 158 int i; 159 uint32_t count = 0; 160 161 for (i = 0; i < num_links; i++) { 162 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 163 links[i]->is_internal_display) 164 count++; 165 } 166 167 return count; 168 } 169 170 static int get_seamless_boot_stream_count(struct dc_state *ctx) 171 { 172 uint8_t i; 173 uint8_t seamless_boot_stream_count = 0; 174 175 for (i = 0; i < ctx->stream_count; i++) 176 if (ctx->streams[i]->apply_seamless_boot_optimization) 177 seamless_boot_stream_count++; 178 179 return seamless_boot_stream_count; 180 } 181 182 static bool create_links( 183 struct dc *dc, 184 uint32_t num_virtual_links) 185 { 186 int i; 187 int connectors_num; 188 struct dc_bios *bios = dc->ctx->dc_bios; 189 190 dc->link_count = 0; 191 192 connectors_num = bios->funcs->get_connectors_number(bios); 193 194 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 195 196 if (connectors_num > ENUM_ID_COUNT) { 197 dm_error( 198 "DC: Number of connectors %d exceeds maximum of %d!\n", 199 connectors_num, 200 ENUM_ID_COUNT); 201 return false; 202 } 203 204 dm_output_to_console( 205 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 206 __func__, 207 connectors_num, 208 num_virtual_links); 209 210 for (i = 0; i < connectors_num; i++) { 211 struct link_init_data link_init_params = {0}; 212 struct dc_link *link; 213 214 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 215 216 link_init_params.ctx = dc->ctx; 217 /* next BIOS object table connector */ 218 link_init_params.connector_index = i; 219 link_init_params.link_index = dc->link_count; 220 link_init_params.dc = dc; 221 link = link_create(&link_init_params); 222 223 if (link) { 224 dc->links[dc->link_count] = link; 225 link->dc = dc; 226 ++dc->link_count; 227 } 228 } 229 230 DC_LOG_DC("BIOS object table - end"); 231 232 /* Create a link for each usb4 dpia port */ 233 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 234 struct link_init_data link_init_params = {0}; 235 struct dc_link *link; 236 237 link_init_params.ctx = dc->ctx; 238 link_init_params.connector_index = i; 239 link_init_params.link_index = dc->link_count; 240 link_init_params.dc = dc; 241 link_init_params.is_dpia_link = true; 242 243 link = link_create(&link_init_params); 244 if (link) { 245 dc->links[dc->link_count] = link; 246 link->dc = dc; 247 ++dc->link_count; 248 } 249 } 250 251 for (i = 0; i < num_virtual_links; i++) { 252 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 253 struct encoder_init_data enc_init = {0}; 254 255 if (link == NULL) { 256 BREAK_TO_DEBUGGER(); 257 goto failed_alloc; 258 } 259 260 link->link_index = dc->link_count; 261 dc->links[dc->link_count] = link; 262 dc->link_count++; 263 264 link->ctx = dc->ctx; 265 link->dc = dc; 266 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 267 link->link_id.type = OBJECT_TYPE_CONNECTOR; 268 link->link_id.id = CONNECTOR_ID_VIRTUAL; 269 link->link_id.enum_id = ENUM_ID_1; 270 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 271 272 if (!link->link_enc) { 273 BREAK_TO_DEBUGGER(); 274 goto failed_alloc; 275 } 276 277 link->link_status.dpcd_caps = &link->dpcd_caps; 278 279 enc_init.ctx = dc->ctx; 280 enc_init.channel = CHANNEL_ID_UNKNOWN; 281 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 282 enc_init.transmitter = TRANSMITTER_UNKNOWN; 283 enc_init.connector = link->link_id; 284 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 285 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 286 enc_init.encoder.enum_id = ENUM_ID_1; 287 virtual_link_encoder_construct(link->link_enc, &enc_init); 288 } 289 290 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 291 292 return true; 293 294 failed_alloc: 295 return false; 296 } 297 298 /* Create additional DIG link encoder objects if fewer than the platform 299 * supports were created during link construction. This can happen if the 300 * number of physical connectors is less than the number of DIGs. 301 */ 302 static bool create_link_encoders(struct dc *dc) 303 { 304 bool res = true; 305 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 306 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 307 int i; 308 309 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 310 * link encoders and physical display endpoints and does not require 311 * additional link encoder objects. 312 */ 313 if (num_usb4_dpia == 0) 314 return res; 315 316 /* Create as many link encoder objects as the platform supports. DPIA 317 * endpoints can be programmably mapped to any DIG. 318 */ 319 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 320 for (i = 0; i < num_dig_link_enc; i++) { 321 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 322 323 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 324 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 325 (enum engine_id)(ENGINE_ID_DIGA + i)); 326 if (link_enc) { 327 dc->res_pool->link_encoders[i] = link_enc; 328 dc->res_pool->dig_link_enc_count++; 329 } else { 330 res = false; 331 } 332 } 333 } 334 } 335 336 return res; 337 } 338 339 /* Destroy any additional DIG link encoder objects created by 340 * create_link_encoders(). 341 * NB: Must only be called after destroy_links(). 342 */ 343 static void destroy_link_encoders(struct dc *dc) 344 { 345 unsigned int num_usb4_dpia; 346 unsigned int num_dig_link_enc; 347 int i; 348 349 if (!dc->res_pool) 350 return; 351 352 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 353 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 354 355 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 356 * link encoders and physical display endpoints and does not require 357 * additional link encoder objects. 358 */ 359 if (num_usb4_dpia == 0) 360 return; 361 362 for (i = 0; i < num_dig_link_enc; i++) { 363 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 364 365 if (link_enc) { 366 link_enc->funcs->destroy(&link_enc); 367 dc->res_pool->link_encoders[i] = NULL; 368 dc->res_pool->dig_link_enc_count--; 369 } 370 } 371 } 372 373 static struct dc_perf_trace *dc_perf_trace_create(void) 374 { 375 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 376 } 377 378 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 379 { 380 kfree(*perf_trace); 381 *perf_trace = NULL; 382 } 383 384 /** 385 * dc_stream_adjust_vmin_vmax: 386 * 387 * Looks up the pipe context of dc_stream_state and updates the 388 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 389 * Rate, which is a power-saving feature that targets reducing panel 390 * refresh rate while the screen is static 391 * 392 * @dc: dc reference 393 * @stream: Initial dc stream state 394 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 395 */ 396 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 397 struct dc_stream_state *stream, 398 struct dc_crtc_timing_adjust *adjust) 399 { 400 int i; 401 402 stream->adjust.v_total_max = adjust->v_total_max; 403 stream->adjust.v_total_mid = adjust->v_total_mid; 404 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 405 stream->adjust.v_total_min = adjust->v_total_min; 406 407 for (i = 0; i < MAX_PIPES; i++) { 408 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 409 410 if (pipe->stream == stream && pipe->stream_res.tg) { 411 dc->hwss.set_drr(&pipe, 412 1, 413 *adjust); 414 415 return true; 416 } 417 } 418 return false; 419 } 420 421 /** 422 * dc_stream_get_last_used_drr_vtotal - dc_stream_get_last_vrr_vtotal 423 * 424 * @dc: [in] dc reference 425 * @stream: [in] Initial dc stream state 426 * @adjust: [in] Updated parameters for vertical_total_min and 427 * 428 * Looks up the pipe context of dc_stream_state and gets the last VTOTAL used 429 * by DRR (Dynamic Refresh Rate) 430 */ 431 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 432 struct dc_stream_state *stream, 433 uint32_t *refresh_rate) 434 { 435 bool status = false; 436 437 int i = 0; 438 439 for (i = 0; i < MAX_PIPES; i++) { 440 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 441 442 if (pipe->stream == stream && pipe->stream_res.tg) { 443 /* Only execute if a function pointer has been defined for 444 * the DC version in question 445 */ 446 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 447 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 448 449 status = true; 450 451 break; 452 } 453 } 454 } 455 456 return status; 457 } 458 459 bool dc_stream_get_crtc_position(struct dc *dc, 460 struct dc_stream_state **streams, int num_streams, 461 unsigned int *v_pos, unsigned int *nom_v_pos) 462 { 463 /* TODO: Support multiple streams */ 464 const struct dc_stream_state *stream = streams[0]; 465 int i; 466 bool ret = false; 467 struct crtc_position position; 468 469 for (i = 0; i < MAX_PIPES; i++) { 470 struct pipe_ctx *pipe = 471 &dc->current_state->res_ctx.pipe_ctx[i]; 472 473 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 474 dc->hwss.get_position(&pipe, 1, &position); 475 476 *v_pos = position.vertical_count; 477 *nom_v_pos = position.nominal_vcount; 478 ret = true; 479 } 480 } 481 return ret; 482 } 483 484 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 485 static inline void 486 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 487 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 488 { 489 union dmub_rb_cmd cmd = {0}; 490 491 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 492 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 493 494 if (is_stop) { 495 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 496 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 497 } else { 498 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 499 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 500 cmd.secure_display.roi_info.x_start = rect->x; 501 cmd.secure_display.roi_info.y_start = rect->y; 502 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 503 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 504 } 505 506 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 507 dc_dmub_srv_cmd_execute(dmub_srv); 508 } 509 510 static inline void 511 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 512 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 513 { 514 if (is_stop) 515 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 516 else 517 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 518 } 519 520 bool 521 dc_stream_forward_crc_window(struct dc *dc, 522 struct rect *rect, struct dc_stream_state *stream, bool is_stop) 523 { 524 struct dmcu *dmcu; 525 struct dc_dmub_srv *dmub_srv; 526 struct otg_phy_mux mux_mapping; 527 struct pipe_ctx *pipe; 528 int i; 529 530 for (i = 0; i < MAX_PIPES; i++) { 531 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 532 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 533 break; 534 } 535 536 /* Stream not found */ 537 if (i == MAX_PIPES) 538 return false; 539 540 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 541 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 542 543 dmcu = dc->res_pool->dmcu; 544 dmub_srv = dc->ctx->dmub_srv; 545 546 /* forward to dmub */ 547 if (dmub_srv) 548 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 549 /* forward to dmcu */ 550 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 551 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 552 else 553 return false; 554 555 return true; 556 } 557 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 558 559 /** 560 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 561 * @dc: DC Object 562 * @stream: The stream to configure CRC on. 563 * @enable: Enable CRC if true, disable otherwise. 564 * @crc_window: CRC window (x/y start/end) information 565 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 566 * once. 567 * 568 * By default, only CRC0 is configured, and the entire frame is used to 569 * calculate the crc. 570 */ 571 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 572 struct crc_params *crc_window, bool enable, bool continuous) 573 { 574 int i; 575 struct pipe_ctx *pipe; 576 struct crc_params param; 577 struct timing_generator *tg; 578 579 for (i = 0; i < MAX_PIPES; i++) { 580 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 581 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 582 break; 583 } 584 /* Stream not found */ 585 if (i == MAX_PIPES) 586 return false; 587 588 /* By default, capture the full frame */ 589 param.windowa_x_start = 0; 590 param.windowa_y_start = 0; 591 param.windowa_x_end = pipe->stream->timing.h_addressable; 592 param.windowa_y_end = pipe->stream->timing.v_addressable; 593 param.windowb_x_start = 0; 594 param.windowb_y_start = 0; 595 param.windowb_x_end = pipe->stream->timing.h_addressable; 596 param.windowb_y_end = pipe->stream->timing.v_addressable; 597 598 if (crc_window) { 599 param.windowa_x_start = crc_window->windowa_x_start; 600 param.windowa_y_start = crc_window->windowa_y_start; 601 param.windowa_x_end = crc_window->windowa_x_end; 602 param.windowa_y_end = crc_window->windowa_y_end; 603 param.windowb_x_start = crc_window->windowb_x_start; 604 param.windowb_y_start = crc_window->windowb_y_start; 605 param.windowb_x_end = crc_window->windowb_x_end; 606 param.windowb_y_end = crc_window->windowb_y_end; 607 } 608 609 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 610 param.odm_mode = pipe->next_odm_pipe ? 1:0; 611 612 /* Default to the union of both windows */ 613 param.selection = UNION_WINDOW_A_B; 614 param.continuous_mode = continuous; 615 param.enable = enable; 616 617 tg = pipe->stream_res.tg; 618 619 /* Only call if supported */ 620 if (tg->funcs->configure_crc) 621 return tg->funcs->configure_crc(tg, ¶m); 622 DC_LOG_WARNING("CRC capture not supported."); 623 return false; 624 } 625 626 /** 627 * dc_stream_get_crc() - Get CRC values for the given stream. 628 * 629 * @dc: DC object. 630 * @stream: The DC stream state of the stream to get CRCs from. 631 * @r_cr: CRC value for the red component. 632 * @g_y: CRC value for the green component. 633 * @b_cb: CRC value for the blue component. 634 * 635 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 636 * 637 * Return: 638 * false if stream is not found, or if CRCs are not enabled. 639 */ 640 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 641 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 642 { 643 int i; 644 struct pipe_ctx *pipe; 645 struct timing_generator *tg; 646 647 for (i = 0; i < MAX_PIPES; i++) { 648 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 649 if (pipe->stream == stream) 650 break; 651 } 652 /* Stream not found */ 653 if (i == MAX_PIPES) 654 return false; 655 656 tg = pipe->stream_res.tg; 657 658 if (tg->funcs->get_crc) 659 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 660 DC_LOG_WARNING("CRC capture not supported."); 661 return false; 662 } 663 664 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 665 enum dc_dynamic_expansion option) 666 { 667 /* OPP FMT dyn expansion updates*/ 668 int i; 669 struct pipe_ctx *pipe_ctx; 670 671 for (i = 0; i < MAX_PIPES; i++) { 672 if (dc->current_state->res_ctx.pipe_ctx[i].stream 673 == stream) { 674 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 675 pipe_ctx->stream_res.opp->dyn_expansion = option; 676 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 677 pipe_ctx->stream_res.opp, 678 COLOR_SPACE_YCBCR601, 679 stream->timing.display_color_depth, 680 stream->signal); 681 } 682 } 683 } 684 685 void dc_stream_set_dither_option(struct dc_stream_state *stream, 686 enum dc_dither_option option) 687 { 688 struct bit_depth_reduction_params params; 689 struct dc_link *link = stream->link; 690 struct pipe_ctx *pipes = NULL; 691 int i; 692 693 for (i = 0; i < MAX_PIPES; i++) { 694 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 695 stream) { 696 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 697 break; 698 } 699 } 700 701 if (!pipes) 702 return; 703 if (option > DITHER_OPTION_MAX) 704 return; 705 706 stream->dither_option = option; 707 708 memset(¶ms, 0, sizeof(params)); 709 resource_build_bit_depth_reduction_params(stream, ¶ms); 710 stream->bit_depth_params = params; 711 712 if (pipes->plane_res.xfm && 713 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 714 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 715 pipes->plane_res.xfm, 716 pipes->plane_res.scl_data.lb_params.depth, 717 &stream->bit_depth_params); 718 } 719 720 pipes->stream_res.opp->funcs-> 721 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 722 } 723 724 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 725 { 726 int i; 727 bool ret = false; 728 struct pipe_ctx *pipes; 729 730 for (i = 0; i < MAX_PIPES; i++) { 731 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 732 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 733 dc->hwss.program_gamut_remap(pipes); 734 ret = true; 735 } 736 } 737 738 return ret; 739 } 740 741 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 742 { 743 int i; 744 bool ret = false; 745 struct pipe_ctx *pipes; 746 747 for (i = 0; i < MAX_PIPES; i++) { 748 if (dc->current_state->res_ctx.pipe_ctx[i].stream 749 == stream) { 750 751 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 752 dc->hwss.program_output_csc(dc, 753 pipes, 754 stream->output_color_space, 755 stream->csc_color_matrix.matrix, 756 pipes->stream_res.opp->inst); 757 ret = true; 758 } 759 } 760 761 return ret; 762 } 763 764 void dc_stream_set_static_screen_params(struct dc *dc, 765 struct dc_stream_state **streams, 766 int num_streams, 767 const struct dc_static_screen_params *params) 768 { 769 int i, j; 770 struct pipe_ctx *pipes_affected[MAX_PIPES]; 771 int num_pipes_affected = 0; 772 773 for (i = 0; i < num_streams; i++) { 774 struct dc_stream_state *stream = streams[i]; 775 776 for (j = 0; j < MAX_PIPES; j++) { 777 if (dc->current_state->res_ctx.pipe_ctx[j].stream 778 == stream) { 779 pipes_affected[num_pipes_affected++] = 780 &dc->current_state->res_ctx.pipe_ctx[j]; 781 } 782 } 783 } 784 785 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 786 } 787 788 static void dc_destruct(struct dc *dc) 789 { 790 // reset link encoder assignment table on destruct 791 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 792 link_enc_cfg_init(dc, dc->current_state); 793 794 if (dc->current_state) { 795 dc_release_state(dc->current_state); 796 dc->current_state = NULL; 797 } 798 799 destroy_links(dc); 800 801 destroy_link_encoders(dc); 802 803 if (dc->clk_mgr) { 804 dc_destroy_clk_mgr(dc->clk_mgr); 805 dc->clk_mgr = NULL; 806 } 807 808 dc_destroy_resource_pool(dc); 809 810 if (dc->ctx->gpio_service) 811 dal_gpio_service_destroy(&dc->ctx->gpio_service); 812 813 if (dc->ctx->created_bios) 814 dal_bios_parser_destroy(&dc->ctx->dc_bios); 815 816 dc_perf_trace_destroy(&dc->ctx->perf_trace); 817 818 kfree(dc->ctx); 819 dc->ctx = NULL; 820 821 kfree(dc->bw_vbios); 822 dc->bw_vbios = NULL; 823 824 kfree(dc->bw_dceip); 825 dc->bw_dceip = NULL; 826 827 kfree(dc->dcn_soc); 828 dc->dcn_soc = NULL; 829 830 kfree(dc->dcn_ip); 831 dc->dcn_ip = NULL; 832 833 kfree(dc->vm_helper); 834 dc->vm_helper = NULL; 835 836 } 837 838 static bool dc_construct_ctx(struct dc *dc, 839 const struct dc_init_data *init_params) 840 { 841 struct dc_context *dc_ctx; 842 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 843 844 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 845 if (!dc_ctx) 846 return false; 847 848 dc_ctx->cgs_device = init_params->cgs_device; 849 dc_ctx->driver_context = init_params->driver; 850 dc_ctx->dc = dc; 851 dc_ctx->asic_id = init_params->asic_id; 852 dc_ctx->dc_sink_id_count = 0; 853 dc_ctx->dc_stream_id_count = 0; 854 dc_ctx->dce_environment = init_params->dce_environment; 855 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 856 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 857 858 /* Create logger */ 859 860 dc_version = resource_parse_asic_id(init_params->asic_id); 861 dc_ctx->dce_version = dc_version; 862 863 dc_ctx->perf_trace = dc_perf_trace_create(); 864 if (!dc_ctx->perf_trace) { 865 ASSERT_CRITICAL(false); 866 return false; 867 } 868 869 dc->ctx = dc_ctx; 870 871 return true; 872 } 873 874 static bool dc_construct(struct dc *dc, 875 const struct dc_init_data *init_params) 876 { 877 struct dc_context *dc_ctx; 878 struct bw_calcs_dceip *dc_dceip; 879 struct bw_calcs_vbios *dc_vbios; 880 struct dcn_soc_bounding_box *dcn_soc; 881 struct dcn_ip_params *dcn_ip; 882 883 dc->config = init_params->flags; 884 885 // Allocate memory for the vm_helper 886 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 887 if (!dc->vm_helper) { 888 dm_error("%s: failed to create dc->vm_helper\n", __func__); 889 goto fail; 890 } 891 892 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 893 894 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 895 if (!dc_dceip) { 896 dm_error("%s: failed to create dceip\n", __func__); 897 goto fail; 898 } 899 900 dc->bw_dceip = dc_dceip; 901 902 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 903 if (!dc_vbios) { 904 dm_error("%s: failed to create vbios\n", __func__); 905 goto fail; 906 } 907 908 dc->bw_vbios = dc_vbios; 909 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 910 if (!dcn_soc) { 911 dm_error("%s: failed to create dcn_soc\n", __func__); 912 goto fail; 913 } 914 915 dc->dcn_soc = dcn_soc; 916 917 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 918 if (!dcn_ip) { 919 dm_error("%s: failed to create dcn_ip\n", __func__); 920 goto fail; 921 } 922 923 dc->dcn_ip = dcn_ip; 924 925 if (!dc_construct_ctx(dc, init_params)) { 926 dm_error("%s: failed to create ctx\n", __func__); 927 goto fail; 928 } 929 930 dc_ctx = dc->ctx; 931 932 /* Resource should construct all asic specific resources. 933 * This should be the only place where we need to parse the asic id 934 */ 935 if (init_params->vbios_override) 936 dc_ctx->dc_bios = init_params->vbios_override; 937 else { 938 /* Create BIOS parser */ 939 struct bp_init_data bp_init_data; 940 941 bp_init_data.ctx = dc_ctx; 942 bp_init_data.bios = init_params->asic_id.atombios_base_address; 943 944 dc_ctx->dc_bios = dal_bios_parser_create( 945 &bp_init_data, dc_ctx->dce_version); 946 947 if (!dc_ctx->dc_bios) { 948 ASSERT_CRITICAL(false); 949 goto fail; 950 } 951 952 dc_ctx->created_bios = true; 953 } 954 955 dc->vendor_signature = init_params->vendor_signature; 956 957 /* Create GPIO service */ 958 dc_ctx->gpio_service = dal_gpio_service_create( 959 dc_ctx->dce_version, 960 dc_ctx->dce_environment, 961 dc_ctx); 962 963 if (!dc_ctx->gpio_service) { 964 ASSERT_CRITICAL(false); 965 goto fail; 966 } 967 968 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 969 if (!dc->res_pool) 970 goto fail; 971 972 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 973 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 974 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 975 976 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 977 if (!dc->clk_mgr) 978 goto fail; 979 #ifdef CONFIG_DRM_AMD_DC_DCN 980 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 981 982 if (dc->res_pool->funcs->update_bw_bounding_box) { 983 DC_FP_START(); 984 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 985 DC_FP_END(); 986 } 987 #endif 988 989 /* Creation of current_state must occur after dc->dml 990 * is initialized in dc_create_resource_pool because 991 * on creation it copies the contents of dc->dml 992 */ 993 994 dc->current_state = dc_create_state(dc); 995 996 if (!dc->current_state) { 997 dm_error("%s: failed to create validate ctx\n", __func__); 998 goto fail; 999 } 1000 1001 if (!create_links(dc, init_params->num_virtual_links)) 1002 goto fail; 1003 1004 /* Create additional DIG link encoder objects if fewer than the platform 1005 * supports were created during link construction. 1006 */ 1007 if (!create_link_encoders(dc)) 1008 goto fail; 1009 1010 dc_resource_state_construct(dc, dc->current_state); 1011 1012 return true; 1013 1014 fail: 1015 return false; 1016 } 1017 1018 static void disable_all_writeback_pipes_for_stream( 1019 const struct dc *dc, 1020 struct dc_stream_state *stream, 1021 struct dc_state *context) 1022 { 1023 int i; 1024 1025 for (i = 0; i < stream->num_wb_info; i++) 1026 stream->writeback_info[i].wb_enabled = false; 1027 } 1028 1029 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, 1030 struct dc_stream_state *stream, bool lock) 1031 { 1032 int i; 1033 1034 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1035 if (dc->hwss.interdependent_update_lock) 1036 dc->hwss.interdependent_update_lock(dc, context, lock); 1037 else { 1038 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1039 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1040 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1041 1042 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1043 if (stream == pipe_ctx->stream) { 1044 if (!pipe_ctx->top_pipe && 1045 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1046 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1047 } 1048 } 1049 } 1050 } 1051 1052 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1053 { 1054 int i, j; 1055 struct dc_state *dangling_context = dc_create_state(dc); 1056 struct dc_state *current_ctx; 1057 struct pipe_ctx *pipe; 1058 struct timing_generator *tg; 1059 1060 if (dangling_context == NULL) 1061 return; 1062 1063 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1064 1065 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1066 struct dc_stream_state *old_stream = 1067 dc->current_state->res_ctx.pipe_ctx[i].stream; 1068 bool should_disable = true; 1069 bool pipe_split_change = false; 1070 1071 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1072 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1073 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1074 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1075 else 1076 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1077 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1078 1079 for (j = 0; j < context->stream_count; j++) { 1080 if (old_stream == context->streams[j]) { 1081 should_disable = false; 1082 break; 1083 } 1084 } 1085 if (!should_disable && pipe_split_change && 1086 dc->current_state->stream_count != context->stream_count) 1087 should_disable = true; 1088 1089 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1090 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1091 struct pipe_ctx *old_pipe, *new_pipe; 1092 1093 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1094 new_pipe = &context->res_ctx.pipe_ctx[i]; 1095 1096 if (old_pipe->plane_state && !new_pipe->plane_state) 1097 should_disable = true; 1098 } 1099 1100 if (should_disable && old_stream) { 1101 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1102 tg = pipe->stream_res.tg; 1103 /* When disabling plane for a phantom pipe, we must turn on the 1104 * phantom OTG so the disable programming gets the double buffer 1105 * update. Otherwise the pipe will be left in a partially disabled 1106 * state that can result in underflow or hang when enabling it 1107 * again for different use. 1108 */ 1109 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1110 if (tg->funcs->enable_crtc) 1111 tg->funcs->enable_crtc(tg); 1112 } 1113 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1114 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1115 1116 if (dc->hwss.apply_ctx_for_surface) { 1117 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1118 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1119 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1120 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1121 } 1122 if (dc->hwss.program_front_end_for_ctx) { 1123 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1124 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1125 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1126 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1127 } 1128 /* We need to put the phantom OTG back into it's default (disabled) state or we 1129 * can get corruption when transition from one SubVP config to a different one. 1130 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1131 * will still get it's double buffer update. 1132 */ 1133 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1134 if (tg->funcs->disable_phantom_crtc) 1135 tg->funcs->disable_phantom_crtc(tg); 1136 } 1137 } 1138 } 1139 1140 current_ctx = dc->current_state; 1141 dc->current_state = dangling_context; 1142 dc_release_state(current_ctx); 1143 } 1144 1145 static void disable_vbios_mode_if_required( 1146 struct dc *dc, 1147 struct dc_state *context) 1148 { 1149 unsigned int i, j; 1150 1151 /* check if timing_changed, disable stream*/ 1152 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1153 struct dc_stream_state *stream = NULL; 1154 struct dc_link *link = NULL; 1155 struct pipe_ctx *pipe = NULL; 1156 1157 pipe = &context->res_ctx.pipe_ctx[i]; 1158 stream = pipe->stream; 1159 if (stream == NULL) 1160 continue; 1161 1162 // only looking for first odm pipe 1163 if (pipe->prev_odm_pipe) 1164 continue; 1165 1166 if (stream->link->local_sink && 1167 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1168 link = stream->link; 1169 } 1170 1171 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1172 unsigned int enc_inst, tg_inst = 0; 1173 unsigned int pix_clk_100hz; 1174 1175 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1176 if (enc_inst != ENGINE_ID_UNKNOWN) { 1177 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1178 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1179 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1180 dc->res_pool->stream_enc[j]); 1181 break; 1182 } 1183 } 1184 1185 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1186 dc->res_pool->dp_clock_source, 1187 tg_inst, &pix_clk_100hz); 1188 1189 if (link->link_status.link_active) { 1190 uint32_t requested_pix_clk_100hz = 1191 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1192 1193 if (pix_clk_100hz != requested_pix_clk_100hz) { 1194 core_link_disable_stream(pipe); 1195 pipe->stream->dpms_off = false; 1196 } 1197 } 1198 } 1199 } 1200 } 1201 } 1202 1203 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1204 { 1205 int i; 1206 PERF_TRACE(); 1207 for (i = 0; i < MAX_PIPES; i++) { 1208 int count = 0; 1209 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1210 1211 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1212 continue; 1213 1214 /* Timeout 100 ms */ 1215 while (count < 100000) { 1216 /* Must set to false to start with, due to OR in update function */ 1217 pipe->plane_state->status.is_flip_pending = false; 1218 dc->hwss.update_pending_status(pipe); 1219 if (!pipe->plane_state->status.is_flip_pending) 1220 break; 1221 udelay(1); 1222 count++; 1223 } 1224 ASSERT(!pipe->plane_state->status.is_flip_pending); 1225 } 1226 PERF_TRACE(); 1227 } 1228 1229 /* Public functions */ 1230 1231 struct dc *dc_create(const struct dc_init_data *init_params) 1232 { 1233 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1234 unsigned int full_pipe_count; 1235 1236 if (!dc) 1237 return NULL; 1238 1239 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1240 if (!dc_construct_ctx(dc, init_params)) 1241 goto destruct_dc; 1242 } else { 1243 if (!dc_construct(dc, init_params)) 1244 goto destruct_dc; 1245 1246 full_pipe_count = dc->res_pool->pipe_count; 1247 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1248 full_pipe_count--; 1249 dc->caps.max_streams = min( 1250 full_pipe_count, 1251 dc->res_pool->stream_enc_count); 1252 1253 dc->caps.max_links = dc->link_count; 1254 dc->caps.max_audios = dc->res_pool->audio_count; 1255 dc->caps.linear_pitch_alignment = 64; 1256 1257 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1258 1259 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1260 1261 if (dc->res_pool->dmcu != NULL) 1262 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1263 } 1264 1265 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1266 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1267 1268 /* Populate versioning information */ 1269 dc->versions.dc_ver = DC_VER; 1270 1271 dc->build_id = DC_BUILD_ID; 1272 1273 DC_LOG_DC("Display Core initialized\n"); 1274 1275 1276 1277 return dc; 1278 1279 destruct_dc: 1280 dc_destruct(dc); 1281 kfree(dc); 1282 return NULL; 1283 } 1284 1285 static void detect_edp_presence(struct dc *dc) 1286 { 1287 struct dc_link *edp_links[MAX_NUM_EDP]; 1288 struct dc_link *edp_link = NULL; 1289 enum dc_connection_type type; 1290 int i; 1291 int edp_num; 1292 1293 get_edp_links(dc, edp_links, &edp_num); 1294 if (!edp_num) 1295 return; 1296 1297 for (i = 0; i < edp_num; i++) { 1298 edp_link = edp_links[i]; 1299 if (dc->config.edp_not_connected) { 1300 edp_link->edp_sink_present = false; 1301 } else { 1302 dc_link_detect_sink(edp_link, &type); 1303 edp_link->edp_sink_present = (type != dc_connection_none); 1304 } 1305 } 1306 } 1307 1308 void dc_hardware_init(struct dc *dc) 1309 { 1310 1311 detect_edp_presence(dc); 1312 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1313 dc->hwss.init_hw(dc); 1314 } 1315 1316 void dc_init_callbacks(struct dc *dc, 1317 const struct dc_callback_init *init_params) 1318 { 1319 #ifdef CONFIG_DRM_AMD_DC_HDCP 1320 dc->ctx->cp_psp = init_params->cp_psp; 1321 #endif 1322 } 1323 1324 void dc_deinit_callbacks(struct dc *dc) 1325 { 1326 #ifdef CONFIG_DRM_AMD_DC_HDCP 1327 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1328 #endif 1329 } 1330 1331 void dc_destroy(struct dc **dc) 1332 { 1333 dc_destruct(*dc); 1334 kfree(*dc); 1335 *dc = NULL; 1336 } 1337 1338 static void enable_timing_multisync( 1339 struct dc *dc, 1340 struct dc_state *ctx) 1341 { 1342 int i, multisync_count = 0; 1343 int pipe_count = dc->res_pool->pipe_count; 1344 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1345 1346 for (i = 0; i < pipe_count; i++) { 1347 if (!ctx->res_ctx.pipe_ctx[i].stream || 1348 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1349 continue; 1350 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1351 continue; 1352 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1353 multisync_count++; 1354 } 1355 1356 if (multisync_count > 0) { 1357 dc->hwss.enable_per_frame_crtc_position_reset( 1358 dc, multisync_count, multisync_pipes); 1359 } 1360 } 1361 1362 static void program_timing_sync( 1363 struct dc *dc, 1364 struct dc_state *ctx) 1365 { 1366 int i, j, k; 1367 int group_index = 0; 1368 int num_group = 0; 1369 int pipe_count = dc->res_pool->pipe_count; 1370 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1371 1372 for (i = 0; i < pipe_count; i++) { 1373 if (!ctx->res_ctx.pipe_ctx[i].stream 1374 || ctx->res_ctx.pipe_ctx[i].top_pipe 1375 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1376 continue; 1377 1378 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1379 } 1380 1381 for (i = 0; i < pipe_count; i++) { 1382 int group_size = 1; 1383 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1384 struct pipe_ctx *pipe_set[MAX_PIPES]; 1385 1386 if (!unsynced_pipes[i]) 1387 continue; 1388 1389 pipe_set[0] = unsynced_pipes[i]; 1390 unsynced_pipes[i] = NULL; 1391 1392 /* Add tg to the set, search rest of the tg's for ones with 1393 * same timing, add all tgs with same timing to the group 1394 */ 1395 for (j = i + 1; j < pipe_count; j++) { 1396 if (!unsynced_pipes[j]) 1397 continue; 1398 if (sync_type != TIMING_SYNCHRONIZABLE && 1399 dc->hwss.enable_vblanks_synchronization && 1400 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1401 resource_are_vblanks_synchronizable( 1402 unsynced_pipes[j]->stream, 1403 pipe_set[0]->stream)) { 1404 sync_type = VBLANK_SYNCHRONIZABLE; 1405 pipe_set[group_size] = unsynced_pipes[j]; 1406 unsynced_pipes[j] = NULL; 1407 group_size++; 1408 } else 1409 if (sync_type != VBLANK_SYNCHRONIZABLE && 1410 resource_are_streams_timing_synchronizable( 1411 unsynced_pipes[j]->stream, 1412 pipe_set[0]->stream)) { 1413 sync_type = TIMING_SYNCHRONIZABLE; 1414 pipe_set[group_size] = unsynced_pipes[j]; 1415 unsynced_pipes[j] = NULL; 1416 group_size++; 1417 } 1418 } 1419 1420 /* set first unblanked pipe as master */ 1421 for (j = 0; j < group_size; j++) { 1422 bool is_blanked; 1423 1424 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1425 is_blanked = 1426 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1427 else 1428 is_blanked = 1429 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1430 if (!is_blanked) { 1431 if (j == 0) 1432 break; 1433 1434 swap(pipe_set[0], pipe_set[j]); 1435 break; 1436 } 1437 } 1438 1439 for (k = 0; k < group_size; k++) { 1440 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1441 1442 status->timing_sync_info.group_id = num_group; 1443 status->timing_sync_info.group_size = group_size; 1444 if (k == 0) 1445 status->timing_sync_info.master = true; 1446 else 1447 status->timing_sync_info.master = false; 1448 1449 } 1450 1451 /* remove any other pipes that are already been synced */ 1452 if (dc->config.use_pipe_ctx_sync_logic) { 1453 /* check pipe's syncd to decide which pipe to be removed */ 1454 for (j = 1; j < group_size; j++) { 1455 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1456 group_size--; 1457 pipe_set[j] = pipe_set[group_size]; 1458 j--; 1459 } else 1460 /* link slave pipe's syncd with master pipe */ 1461 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1462 } 1463 } else { 1464 for (j = j + 1; j < group_size; j++) { 1465 bool is_blanked; 1466 1467 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1468 is_blanked = 1469 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1470 else 1471 is_blanked = 1472 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1473 if (!is_blanked) { 1474 group_size--; 1475 pipe_set[j] = pipe_set[group_size]; 1476 j--; 1477 } 1478 } 1479 } 1480 1481 if (group_size > 1) { 1482 if (sync_type == TIMING_SYNCHRONIZABLE) { 1483 dc->hwss.enable_timing_synchronization( 1484 dc, group_index, group_size, pipe_set); 1485 } else 1486 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1487 dc->hwss.enable_vblanks_synchronization( 1488 dc, group_index, group_size, pipe_set); 1489 } 1490 group_index++; 1491 } 1492 num_group++; 1493 } 1494 } 1495 1496 static bool streams_changed(struct dc *dc, 1497 struct dc_stream_state *streams[], 1498 uint8_t stream_count) 1499 { 1500 uint8_t i; 1501 1502 if (stream_count != dc->current_state->stream_count) 1503 return true; 1504 1505 for (i = 0; i < dc->current_state->stream_count; i++) { 1506 if (dc->current_state->streams[i] != streams[i]) 1507 return true; 1508 if (!streams[i]->link->link_state_valid) 1509 return true; 1510 } 1511 1512 return false; 1513 } 1514 1515 bool dc_validate_boot_timing(const struct dc *dc, 1516 const struct dc_sink *sink, 1517 struct dc_crtc_timing *crtc_timing) 1518 { 1519 struct timing_generator *tg; 1520 struct stream_encoder *se = NULL; 1521 1522 struct dc_crtc_timing hw_crtc_timing = {0}; 1523 1524 struct dc_link *link = sink->link; 1525 unsigned int i, enc_inst, tg_inst = 0; 1526 1527 /* Support seamless boot on EDP displays only */ 1528 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1529 return false; 1530 } 1531 1532 /* Check for enabled DIG to identify enabled display */ 1533 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1534 return false; 1535 1536 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1537 1538 if (enc_inst == ENGINE_ID_UNKNOWN) 1539 return false; 1540 1541 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1542 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1543 1544 se = dc->res_pool->stream_enc[i]; 1545 1546 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1547 dc->res_pool->stream_enc[i]); 1548 break; 1549 } 1550 } 1551 1552 // tg_inst not found 1553 if (i == dc->res_pool->stream_enc_count) 1554 return false; 1555 1556 if (tg_inst >= dc->res_pool->timing_generator_count) 1557 return false; 1558 1559 if (tg_inst != link->link_enc->preferred_engine) 1560 return false; 1561 1562 tg = dc->res_pool->timing_generators[tg_inst]; 1563 1564 if (!tg->funcs->get_hw_timing) 1565 return false; 1566 1567 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1568 return false; 1569 1570 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1571 return false; 1572 1573 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1574 return false; 1575 1576 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1577 return false; 1578 1579 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1580 return false; 1581 1582 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1583 return false; 1584 1585 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1586 return false; 1587 1588 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1589 return false; 1590 1591 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1592 return false; 1593 1594 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1595 return false; 1596 1597 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1598 return false; 1599 1600 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1601 return false; 1602 1603 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1604 return false; 1605 1606 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1607 if (crtc_timing->flags.DSC) 1608 return false; 1609 1610 if (dc_is_dp_signal(link->connector_signal)) { 1611 unsigned int pix_clk_100hz; 1612 uint32_t numOdmPipes = 1; 1613 uint32_t id_src[4] = {0}; 1614 1615 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1616 dc->res_pool->dp_clock_source, 1617 tg_inst, &pix_clk_100hz); 1618 1619 if (tg->funcs->get_optc_source) 1620 tg->funcs->get_optc_source(tg, 1621 &numOdmPipes, &id_src[0], &id_src[1]); 1622 1623 if (numOdmPipes == 2) 1624 pix_clk_100hz *= 2; 1625 if (numOdmPipes == 4) 1626 pix_clk_100hz *= 4; 1627 1628 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1629 // slightly due to rounding issues in 10 kHz units. 1630 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1631 return false; 1632 1633 if (!se->funcs->dp_get_pixel_format) 1634 return false; 1635 1636 if (!se->funcs->dp_get_pixel_format( 1637 se, 1638 &hw_crtc_timing.pixel_encoding, 1639 &hw_crtc_timing.display_color_depth)) 1640 return false; 1641 1642 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1643 return false; 1644 1645 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1646 return false; 1647 } 1648 1649 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1650 return false; 1651 } 1652 1653 if (is_edp_ilr_optimization_required(link, crtc_timing)) { 1654 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1655 return false; 1656 } 1657 1658 return true; 1659 } 1660 1661 static inline bool should_update_pipe_for_stream( 1662 struct dc_state *context, 1663 struct pipe_ctx *pipe_ctx, 1664 struct dc_stream_state *stream) 1665 { 1666 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1667 } 1668 1669 static inline bool should_update_pipe_for_plane( 1670 struct dc_state *context, 1671 struct pipe_ctx *pipe_ctx, 1672 struct dc_plane_state *plane_state) 1673 { 1674 return (pipe_ctx->plane_state == plane_state); 1675 } 1676 1677 void dc_enable_stereo( 1678 struct dc *dc, 1679 struct dc_state *context, 1680 struct dc_stream_state *streams[], 1681 uint8_t stream_count) 1682 { 1683 int i, j; 1684 struct pipe_ctx *pipe; 1685 1686 for (i = 0; i < MAX_PIPES; i++) { 1687 if (context != NULL) { 1688 pipe = &context->res_ctx.pipe_ctx[i]; 1689 } else { 1690 context = dc->current_state; 1691 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1692 } 1693 1694 for (j = 0; pipe && j < stream_count; j++) { 1695 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1696 dc->hwss.setup_stereo) 1697 dc->hwss.setup_stereo(pipe, dc); 1698 } 1699 } 1700 } 1701 1702 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1703 { 1704 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1705 enable_timing_multisync(dc, context); 1706 program_timing_sync(dc, context); 1707 } 1708 } 1709 1710 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1711 { 1712 int i; 1713 unsigned int stream_mask = 0; 1714 1715 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1716 if (context->res_ctx.pipe_ctx[i].stream) 1717 stream_mask |= 1 << i; 1718 } 1719 1720 return stream_mask; 1721 } 1722 1723 void dc_z10_restore(const struct dc *dc) 1724 { 1725 if (dc->hwss.z10_restore) 1726 dc->hwss.z10_restore(dc); 1727 } 1728 1729 void dc_z10_save_init(struct dc *dc) 1730 { 1731 if (dc->hwss.z10_save_init) 1732 dc->hwss.z10_save_init(dc); 1733 } 1734 1735 /** 1736 * dc_commit_state_no_check - Apply context to the hardware 1737 * 1738 * @dc: DC object with the current status to be updated 1739 * @context: New state that will become the current status at the end of this function 1740 * 1741 * Applies given context to the hardware and copy it into current context. 1742 * It's up to the user to release the src context afterwards. 1743 */ 1744 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1745 { 1746 struct dc_bios *dcb = dc->ctx->dc_bios; 1747 enum dc_status result = DC_ERROR_UNEXPECTED; 1748 struct pipe_ctx *pipe; 1749 int i, k, l; 1750 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1751 struct dc_state *old_state; 1752 bool subvp_prev_use = false; 1753 1754 dc_z10_restore(dc); 1755 dc_allow_idle_optimizations(dc, false); 1756 1757 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1758 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1759 1760 /* Check old context for SubVP */ 1761 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1762 if (subvp_prev_use) 1763 break; 1764 } 1765 1766 for (i = 0; i < context->stream_count; i++) 1767 dc_streams[i] = context->streams[i]; 1768 1769 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1770 disable_vbios_mode_if_required(dc, context); 1771 dc->hwss.enable_accelerated_mode(dc, context); 1772 } 1773 1774 if (context->stream_count > get_seamless_boot_stream_count(context) || 1775 context->stream_count == 0) 1776 dc->hwss.prepare_bandwidth(dc, context); 1777 1778 /* When SubVP is active, all HW programming must be done while 1779 * SubVP lock is acquired 1780 */ 1781 if (dc->hwss.subvp_pipe_control_lock) 1782 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1783 1784 if (dc->debug.enable_double_buffered_dsc_pg_support) 1785 dc->hwss.update_dsc_pg(dc, context, false); 1786 1787 disable_dangling_plane(dc, context); 1788 /* re-program planes for existing stream, in case we need to 1789 * free up plane resource for later use 1790 */ 1791 if (dc->hwss.apply_ctx_for_surface) { 1792 for (i = 0; i < context->stream_count; i++) { 1793 if (context->streams[i]->mode_changed) 1794 continue; 1795 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1796 dc->hwss.apply_ctx_for_surface( 1797 dc, context->streams[i], 1798 context->stream_status[i].plane_count, 1799 context); /* use new pipe config in new context */ 1800 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1801 dc->hwss.post_unlock_program_front_end(dc, context); 1802 } 1803 } 1804 1805 /* Program hardware */ 1806 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1807 pipe = &context->res_ctx.pipe_ctx[i]; 1808 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1809 } 1810 1811 result = dc->hwss.apply_ctx_to_hw(dc, context); 1812 1813 if (result != DC_OK) { 1814 /* Application of dc_state to hardware stopped. */ 1815 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1816 return result; 1817 } 1818 1819 dc_trigger_sync(dc, context); 1820 1821 /* Program all planes within new context*/ 1822 if (dc->hwss.program_front_end_for_ctx) { 1823 dc->hwss.interdependent_update_lock(dc, context, true); 1824 dc->hwss.program_front_end_for_ctx(dc, context); 1825 dc->hwss.interdependent_update_lock(dc, context, false); 1826 dc->hwss.post_unlock_program_front_end(dc, context); 1827 } 1828 1829 if (dc->hwss.commit_subvp_config) 1830 dc->hwss.commit_subvp_config(dc, context); 1831 if (dc->hwss.subvp_pipe_control_lock) 1832 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1833 1834 for (i = 0; i < context->stream_count; i++) { 1835 const struct dc_link *link = context->streams[i]->link; 1836 1837 if (!context->streams[i]->mode_changed) 1838 continue; 1839 1840 if (dc->hwss.apply_ctx_for_surface) { 1841 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1842 dc->hwss.apply_ctx_for_surface( 1843 dc, context->streams[i], 1844 context->stream_status[i].plane_count, 1845 context); 1846 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1847 dc->hwss.post_unlock_program_front_end(dc, context); 1848 } 1849 1850 /* 1851 * enable stereo 1852 * TODO rework dc_enable_stereo call to work with validation sets? 1853 */ 1854 for (k = 0; k < MAX_PIPES; k++) { 1855 pipe = &context->res_ctx.pipe_ctx[k]; 1856 1857 for (l = 0 ; pipe && l < context->stream_count; l++) { 1858 if (context->streams[l] && 1859 context->streams[l] == pipe->stream && 1860 dc->hwss.setup_stereo) 1861 dc->hwss.setup_stereo(pipe, dc); 1862 } 1863 } 1864 1865 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1866 context->streams[i]->timing.h_addressable, 1867 context->streams[i]->timing.v_addressable, 1868 context->streams[i]->timing.h_total, 1869 context->streams[i]->timing.v_total, 1870 context->streams[i]->timing.pix_clk_100hz / 10); 1871 } 1872 1873 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1874 1875 if (context->stream_count > get_seamless_boot_stream_count(context) || 1876 context->stream_count == 0) { 1877 /* Must wait for no flips to be pending before doing optimize bw */ 1878 wait_for_no_pipes_pending(dc, context); 1879 /* pplib is notified if disp_num changed */ 1880 dc->hwss.optimize_bandwidth(dc, context); 1881 } 1882 1883 if (dc->debug.enable_double_buffered_dsc_pg_support) 1884 dc->hwss.update_dsc_pg(dc, context, true); 1885 1886 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1887 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1888 else 1889 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1890 1891 context->stream_mask = get_stream_mask(dc, context); 1892 1893 if (context->stream_mask != dc->current_state->stream_mask) 1894 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1895 1896 for (i = 0; i < context->stream_count; i++) 1897 context->streams[i]->mode_changed = false; 1898 1899 old_state = dc->current_state; 1900 dc->current_state = context; 1901 1902 dc_release_state(old_state); 1903 1904 dc_retain_state(dc->current_state); 1905 1906 return result; 1907 } 1908 1909 /** 1910 * dc_commit_streams - Commit current stream state 1911 * 1912 * @dc: DC object with the commit state to be configured in the hardware 1913 * @streams: Array with a list of stream state 1914 * @stream_count: Total of streams 1915 * 1916 * Function responsible for commit streams change to the hardware. 1917 * 1918 * Return: 1919 * Return DC_OK if everything work as expected, otherwise, return a dc_status 1920 * code. 1921 */ 1922 enum dc_status dc_commit_streams(struct dc *dc, 1923 struct dc_stream_state *streams[], 1924 uint8_t stream_count) 1925 { 1926 int i, j; 1927 struct dc_state *context; 1928 enum dc_status res = DC_OK; 1929 struct dc_validation_set set[MAX_STREAMS] = {0}; 1930 1931 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 1932 return res; 1933 1934 if (!streams_changed(dc, streams, stream_count)) 1935 return res; 1936 1937 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 1938 1939 for (i = 0; i < stream_count; i++) { 1940 struct dc_stream_state *stream = streams[i]; 1941 struct dc_stream_status *status = dc_stream_get_status(stream); 1942 1943 dc_stream_log(dc, stream); 1944 1945 set[i].stream = stream; 1946 1947 if (status) { 1948 set[i].plane_count = status->plane_count; 1949 for (j = 0; j < status->plane_count; j++) 1950 set[i].plane_states[j] = status->plane_states[j]; 1951 } 1952 } 1953 1954 context = dc_create_state(dc); 1955 if (!context) 1956 goto context_alloc_fail; 1957 1958 dc_resource_state_copy_construct_current(dc, context); 1959 1960 res = dc_validate_with_context(dc, set, stream_count, context, false); 1961 if (res != DC_OK) { 1962 BREAK_TO_DEBUGGER(); 1963 goto fail; 1964 } 1965 1966 res = dc_commit_state_no_check(dc, context); 1967 1968 for (i = 0; i < stream_count; i++) { 1969 for (j = 0; j < context->stream_count; j++) { 1970 if (streams[i]->stream_id == context->streams[j]->stream_id) 1971 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 1972 1973 if (dc_is_embedded_signal(streams[i]->signal)) { 1974 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); 1975 1976 if (dc->hwss.is_abm_supported) 1977 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); 1978 else 1979 status->is_abm_supported = true; 1980 } 1981 } 1982 } 1983 1984 fail: 1985 dc_release_state(context); 1986 1987 context_alloc_fail: 1988 1989 DC_LOG_DC("%s Finished.\n", __func__); 1990 1991 return res; 1992 } 1993 1994 /* TODO: When the transition to the new commit sequence is done, remove this 1995 * function in favor of dc_commit_streams. */ 1996 bool dc_commit_state(struct dc *dc, struct dc_state *context) 1997 { 1998 enum dc_status result = DC_ERROR_UNEXPECTED; 1999 int i; 2000 2001 /* TODO: Since change commit sequence can have a huge impact, 2002 * we decided to only enable it for DCN3x. However, as soon as 2003 * we get more confident about this change we'll need to enable 2004 * the new sequence for all ASICs. */ 2005 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 2006 result = dc_commit_streams(dc, context->streams, context->stream_count); 2007 return result == DC_OK; 2008 } 2009 2010 if (!streams_changed(dc, context->streams, context->stream_count)) 2011 return DC_OK; 2012 2013 DC_LOG_DC("%s: %d streams\n", 2014 __func__, context->stream_count); 2015 2016 for (i = 0; i < context->stream_count; i++) { 2017 struct dc_stream_state *stream = context->streams[i]; 2018 2019 dc_stream_log(dc, stream); 2020 } 2021 2022 /* 2023 * Previous validation was perfomred with fast_validation = true and 2024 * the full DML state required for hardware programming was skipped. 2025 * 2026 * Re-validate here to calculate these parameters / watermarks. 2027 */ 2028 result = dc_validate_global_state(dc, context, false); 2029 if (result != DC_OK) { 2030 DC_LOG_ERROR("DC commit global validation failure: %s (%d)", 2031 dc_status_to_str(result), result); 2032 return result; 2033 } 2034 2035 result = dc_commit_state_no_check(dc, context); 2036 2037 return (result == DC_OK); 2038 } 2039 2040 bool dc_acquire_release_mpc_3dlut( 2041 struct dc *dc, bool acquire, 2042 struct dc_stream_state *stream, 2043 struct dc_3dlut **lut, 2044 struct dc_transfer_func **shaper) 2045 { 2046 int pipe_idx; 2047 bool ret = false; 2048 bool found_pipe_idx = false; 2049 const struct resource_pool *pool = dc->res_pool; 2050 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2051 int mpcc_id = 0; 2052 2053 if (pool && res_ctx) { 2054 if (acquire) { 2055 /*find pipe idx for the given stream*/ 2056 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2057 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2058 found_pipe_idx = true; 2059 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2060 break; 2061 } 2062 } 2063 } else 2064 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2065 2066 if (found_pipe_idx) { 2067 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2068 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2069 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2070 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2071 } 2072 } 2073 return ret; 2074 } 2075 2076 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2077 { 2078 int i; 2079 struct pipe_ctx *pipe; 2080 2081 for (i = 0; i < MAX_PIPES; i++) { 2082 pipe = &context->res_ctx.pipe_ctx[i]; 2083 2084 // Don't check flip pending on phantom pipes 2085 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2086 continue; 2087 2088 /* Must set to false to start with, due to OR in update function */ 2089 pipe->plane_state->status.is_flip_pending = false; 2090 dc->hwss.update_pending_status(pipe); 2091 if (pipe->plane_state->status.is_flip_pending) 2092 return true; 2093 } 2094 return false; 2095 } 2096 2097 /* Perform updates here which need to be deferred until next vupdate 2098 * 2099 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2100 * but forcing lut memory to shutdown state is immediate. This causes 2101 * single frame corruption as lut gets disabled mid-frame unless shutdown 2102 * is deferred until after entering bypass. 2103 */ 2104 static void process_deferred_updates(struct dc *dc) 2105 { 2106 int i = 0; 2107 2108 if (dc->debug.enable_mem_low_power.bits.cm) { 2109 ASSERT(dc->dcn_ip->max_num_dpp); 2110 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2111 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2112 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2113 } 2114 } 2115 2116 void dc_post_update_surfaces_to_stream(struct dc *dc) 2117 { 2118 int i; 2119 struct dc_state *context = dc->current_state; 2120 2121 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2122 return; 2123 2124 post_surface_trace(dc); 2125 2126 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 2127 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2128 else 2129 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2130 2131 if (is_flip_pending_in_pipes(dc, context)) 2132 return; 2133 2134 for (i = 0; i < dc->res_pool->pipe_count; i++) 2135 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2136 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2137 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2138 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 2139 } 2140 2141 process_deferred_updates(dc); 2142 2143 dc->hwss.optimize_bandwidth(dc, context); 2144 2145 if (dc->debug.enable_double_buffered_dsc_pg_support) 2146 dc->hwss.update_dsc_pg(dc, context, true); 2147 2148 dc->optimized_required = false; 2149 dc->wm_optimized_required = false; 2150 } 2151 2152 static void init_state(struct dc *dc, struct dc_state *context) 2153 { 2154 /* Each context must have their own instance of VBA and in order to 2155 * initialize and obtain IP and SOC the base DML instance from DC is 2156 * initially copied into every context 2157 */ 2158 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2159 } 2160 2161 struct dc_state *dc_create_state(struct dc *dc) 2162 { 2163 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2164 GFP_KERNEL); 2165 2166 if (!context) 2167 return NULL; 2168 2169 init_state(dc, context); 2170 2171 kref_init(&context->refcount); 2172 2173 return context; 2174 } 2175 2176 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2177 { 2178 int i, j; 2179 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2180 2181 if (!new_ctx) 2182 return NULL; 2183 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2184 2185 for (i = 0; i < MAX_PIPES; i++) { 2186 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2187 2188 if (cur_pipe->top_pipe) 2189 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2190 2191 if (cur_pipe->bottom_pipe) 2192 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2193 2194 if (cur_pipe->prev_odm_pipe) 2195 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2196 2197 if (cur_pipe->next_odm_pipe) 2198 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2199 2200 } 2201 2202 for (i = 0; i < new_ctx->stream_count; i++) { 2203 dc_stream_retain(new_ctx->streams[i]); 2204 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2205 dc_plane_state_retain( 2206 new_ctx->stream_status[i].plane_states[j]); 2207 } 2208 2209 kref_init(&new_ctx->refcount); 2210 2211 return new_ctx; 2212 } 2213 2214 void dc_retain_state(struct dc_state *context) 2215 { 2216 kref_get(&context->refcount); 2217 } 2218 2219 static void dc_state_free(struct kref *kref) 2220 { 2221 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2222 dc_resource_state_destruct(context); 2223 kvfree(context); 2224 } 2225 2226 void dc_release_state(struct dc_state *context) 2227 { 2228 kref_put(&context->refcount, dc_state_free); 2229 } 2230 2231 bool dc_set_generic_gpio_for_stereo(bool enable, 2232 struct gpio_service *gpio_service) 2233 { 2234 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2235 struct gpio_pin_info pin_info; 2236 struct gpio *generic; 2237 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2238 GFP_KERNEL); 2239 2240 if (!config) 2241 return false; 2242 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2243 2244 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2245 kfree(config); 2246 return false; 2247 } else { 2248 generic = dal_gpio_service_create_generic_mux( 2249 gpio_service, 2250 pin_info.offset, 2251 pin_info.mask); 2252 } 2253 2254 if (!generic) { 2255 kfree(config); 2256 return false; 2257 } 2258 2259 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2260 2261 config->enable_output_from_mux = enable; 2262 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2263 2264 if (gpio_result == GPIO_RESULT_OK) 2265 gpio_result = dal_mux_setup_config(generic, config); 2266 2267 if (gpio_result == GPIO_RESULT_OK) { 2268 dal_gpio_close(generic); 2269 dal_gpio_destroy_generic_mux(&generic); 2270 kfree(config); 2271 return true; 2272 } else { 2273 dal_gpio_close(generic); 2274 dal_gpio_destroy_generic_mux(&generic); 2275 kfree(config); 2276 return false; 2277 } 2278 } 2279 2280 static bool is_surface_in_context( 2281 const struct dc_state *context, 2282 const struct dc_plane_state *plane_state) 2283 { 2284 int j; 2285 2286 for (j = 0; j < MAX_PIPES; j++) { 2287 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2288 2289 if (plane_state == pipe_ctx->plane_state) { 2290 return true; 2291 } 2292 } 2293 2294 return false; 2295 } 2296 2297 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2298 { 2299 union surface_update_flags *update_flags = &u->surface->update_flags; 2300 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2301 2302 if (!u->plane_info) 2303 return UPDATE_TYPE_FAST; 2304 2305 if (u->plane_info->color_space != u->surface->color_space) { 2306 update_flags->bits.color_space_change = 1; 2307 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2308 } 2309 2310 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2311 update_flags->bits.horizontal_mirror_change = 1; 2312 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2313 } 2314 2315 if (u->plane_info->rotation != u->surface->rotation) { 2316 update_flags->bits.rotation_change = 1; 2317 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2318 } 2319 2320 if (u->plane_info->format != u->surface->format) { 2321 update_flags->bits.pixel_format_change = 1; 2322 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2323 } 2324 2325 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2326 update_flags->bits.stereo_format_change = 1; 2327 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2328 } 2329 2330 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2331 update_flags->bits.per_pixel_alpha_change = 1; 2332 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2333 } 2334 2335 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2336 update_flags->bits.global_alpha_change = 1; 2337 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2338 } 2339 2340 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2341 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2342 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2343 /* During DCC on/off, stutter period is calculated before 2344 * DCC has fully transitioned. This results in incorrect 2345 * stutter period calculation. Triggering a full update will 2346 * recalculate stutter period. 2347 */ 2348 update_flags->bits.dcc_change = 1; 2349 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2350 } 2351 2352 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2353 resource_pixel_format_to_bpp(u->surface->format)) { 2354 /* different bytes per element will require full bandwidth 2355 * and DML calculation 2356 */ 2357 update_flags->bits.bpp_change = 1; 2358 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2359 } 2360 2361 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2362 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2363 update_flags->bits.plane_size_change = 1; 2364 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2365 } 2366 2367 2368 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2369 sizeof(union dc_tiling_info)) != 0) { 2370 update_flags->bits.swizzle_change = 1; 2371 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2372 2373 /* todo: below are HW dependent, we should add a hook to 2374 * DCE/N resource and validated there. 2375 */ 2376 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2377 /* swizzled mode requires RQ to be setup properly, 2378 * thus need to run DML to calculate RQ settings 2379 */ 2380 update_flags->bits.bandwidth_change = 1; 2381 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2382 } 2383 } 2384 2385 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2386 return update_type; 2387 } 2388 2389 static enum surface_update_type get_scaling_info_update_type( 2390 const struct dc_surface_update *u) 2391 { 2392 union surface_update_flags *update_flags = &u->surface->update_flags; 2393 2394 if (!u->scaling_info) 2395 return UPDATE_TYPE_FAST; 2396 2397 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2398 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2399 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2400 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2401 || u->scaling_info->scaling_quality.integer_scaling != 2402 u->surface->scaling_quality.integer_scaling 2403 ) { 2404 update_flags->bits.scaling_change = 1; 2405 2406 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2407 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2408 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2409 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2410 /* Making dst rect smaller requires a bandwidth change */ 2411 update_flags->bits.bandwidth_change = 1; 2412 } 2413 2414 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2415 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2416 2417 update_flags->bits.scaling_change = 1; 2418 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2419 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2420 /* Making src rect bigger requires a bandwidth change */ 2421 update_flags->bits.clock_change = 1; 2422 } 2423 2424 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2425 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2426 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2427 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2428 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2429 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2430 update_flags->bits.position_change = 1; 2431 2432 if (update_flags->bits.clock_change 2433 || update_flags->bits.bandwidth_change 2434 || update_flags->bits.scaling_change) 2435 return UPDATE_TYPE_FULL; 2436 2437 if (update_flags->bits.position_change) 2438 return UPDATE_TYPE_MED; 2439 2440 return UPDATE_TYPE_FAST; 2441 } 2442 2443 static enum surface_update_type det_surface_update(const struct dc *dc, 2444 const struct dc_surface_update *u) 2445 { 2446 const struct dc_state *context = dc->current_state; 2447 enum surface_update_type type; 2448 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2449 union surface_update_flags *update_flags = &u->surface->update_flags; 2450 2451 if (u->flip_addr) 2452 update_flags->bits.addr_update = 1; 2453 2454 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2455 update_flags->raw = 0xFFFFFFFF; 2456 return UPDATE_TYPE_FULL; 2457 } 2458 2459 update_flags->raw = 0; // Reset all flags 2460 2461 type = get_plane_info_update_type(u); 2462 elevate_update_type(&overall_type, type); 2463 2464 type = get_scaling_info_update_type(u); 2465 elevate_update_type(&overall_type, type); 2466 2467 if (u->flip_addr) { 2468 update_flags->bits.addr_update = 1; 2469 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2470 update_flags->bits.tmz_changed = 1; 2471 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2472 } 2473 } 2474 if (u->in_transfer_func) 2475 update_flags->bits.in_transfer_func_change = 1; 2476 2477 if (u->input_csc_color_matrix) 2478 update_flags->bits.input_csc_change = 1; 2479 2480 if (u->coeff_reduction_factor) 2481 update_flags->bits.coeff_reduction_change = 1; 2482 2483 if (u->gamut_remap_matrix) 2484 update_flags->bits.gamut_remap_change = 1; 2485 2486 if (u->gamma) { 2487 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2488 2489 if (u->plane_info) 2490 format = u->plane_info->format; 2491 else if (u->surface) 2492 format = u->surface->format; 2493 2494 if (dce_use_lut(format)) 2495 update_flags->bits.gamma_change = 1; 2496 } 2497 2498 if (u->lut3d_func || u->func_shaper) 2499 update_flags->bits.lut_3d = 1; 2500 2501 if (u->hdr_mult.value) 2502 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2503 update_flags->bits.hdr_mult = 1; 2504 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2505 } 2506 2507 if (update_flags->bits.in_transfer_func_change) { 2508 type = UPDATE_TYPE_MED; 2509 elevate_update_type(&overall_type, type); 2510 } 2511 2512 if (update_flags->bits.input_csc_change 2513 || update_flags->bits.coeff_reduction_change 2514 || update_flags->bits.lut_3d 2515 || update_flags->bits.gamma_change 2516 || update_flags->bits.gamut_remap_change) { 2517 type = UPDATE_TYPE_FULL; 2518 elevate_update_type(&overall_type, type); 2519 } 2520 2521 return overall_type; 2522 } 2523 2524 static enum surface_update_type check_update_surfaces_for_stream( 2525 struct dc *dc, 2526 struct dc_surface_update *updates, 2527 int surface_count, 2528 struct dc_stream_update *stream_update, 2529 const struct dc_stream_status *stream_status) 2530 { 2531 int i; 2532 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2533 2534 if (dc->idle_optimizations_allowed) 2535 overall_type = UPDATE_TYPE_FULL; 2536 2537 if (stream_status == NULL || stream_status->plane_count != surface_count) 2538 overall_type = UPDATE_TYPE_FULL; 2539 2540 if (stream_update && stream_update->pending_test_pattern) { 2541 overall_type = UPDATE_TYPE_FULL; 2542 } 2543 2544 /* some stream updates require passive update */ 2545 if (stream_update) { 2546 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2547 2548 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2549 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2550 stream_update->integer_scaling_update) 2551 su_flags->bits.scaling = 1; 2552 2553 if (stream_update->out_transfer_func) 2554 su_flags->bits.out_tf = 1; 2555 2556 if (stream_update->abm_level) 2557 su_flags->bits.abm_level = 1; 2558 2559 if (stream_update->dpms_off) 2560 su_flags->bits.dpms_off = 1; 2561 2562 if (stream_update->gamut_remap) 2563 su_flags->bits.gamut_remap = 1; 2564 2565 if (stream_update->wb_update) 2566 su_flags->bits.wb_update = 1; 2567 2568 if (stream_update->dsc_config) 2569 su_flags->bits.dsc_changed = 1; 2570 2571 if (stream_update->mst_bw_update) 2572 su_flags->bits.mst_bw = 1; 2573 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) 2574 su_flags->bits.crtc_timing_adjust = 1; 2575 2576 if (su_flags->raw != 0) 2577 overall_type = UPDATE_TYPE_FULL; 2578 2579 if (stream_update->output_csc_transform || stream_update->output_color_space) 2580 su_flags->bits.out_csc = 1; 2581 } 2582 2583 for (i = 0 ; i < surface_count; i++) { 2584 enum surface_update_type type = 2585 det_surface_update(dc, &updates[i]); 2586 2587 elevate_update_type(&overall_type, type); 2588 } 2589 2590 return overall_type; 2591 } 2592 2593 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect) 2594 { 2595 int view_height, view_width, clip_x, clip_y, clip_width, clip_height; 2596 2597 view_height = src.height; 2598 view_width = src.width; 2599 2600 clip_x = clip_rect.x; 2601 clip_y = clip_rect.y; 2602 2603 clip_width = clip_rect.width; 2604 clip_height = clip_rect.height; 2605 2606 /* check for centered video accounting for off by 1 scaling truncation */ 2607 if ((view_height - clip_y - clip_height <= clip_y + 1) && 2608 (view_width - clip_x - clip_width <= clip_x + 1) && 2609 (view_height - clip_y - clip_height >= clip_y - 1) && 2610 (view_width - clip_x - clip_width >= clip_x - 1)) { 2611 2612 /* when OS scales up/down to letter box, it may end up 2613 * with few blank pixels on the border due to truncating. 2614 * Add offset margin to account for this 2615 */ 2616 if (clip_x <= 4 || clip_y <= 4) 2617 return true; 2618 } 2619 2620 return false; 2621 } 2622 2623 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc, 2624 struct dc_surface_update *srf_updates, int surface_count, 2625 enum surface_update_type update_type) 2626 { 2627 enum surface_update_type new_update_type = update_type; 2628 int i, j; 2629 struct pipe_ctx *pipe = NULL; 2630 struct dc_stream_state *stream; 2631 2632 /* Check that we are in windowed MPO with ODM 2633 * - look for MPO pipe by scanning pipes for first pipe matching 2634 * surface that has moved ( position change ) 2635 * - MPO pipe will have top pipe 2636 * - check that top pipe has ODM pointer 2637 */ 2638 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) { 2639 for (i = 0; i < surface_count; i++) { 2640 if (srf_updates[i].surface && srf_updates[i].scaling_info 2641 && srf_updates[i].surface->update_flags.bits.position_change) { 2642 2643 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2644 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) { 2645 pipe = &dc->current_state->res_ctx.pipe_ctx[j]; 2646 stream = pipe->stream; 2647 break; 2648 } 2649 } 2650 2651 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream 2652 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) { 2653 struct rect old_clip_rect, new_clip_rect; 2654 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle; 2655 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle; 2656 2657 old_clip_rect = srf_updates[i].surface->clip_rect; 2658 new_clip_rect = srf_updates[i].scaling_info->clip_rect; 2659 2660 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2661 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2662 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right; 2663 2664 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2665 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2666 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right; 2667 2668 if (old_clip_rect_left && new_clip_rect_middle) 2669 new_update_type = UPDATE_TYPE_FULL; 2670 else if (old_clip_rect_middle && new_clip_rect_right) 2671 new_update_type = UPDATE_TYPE_FULL; 2672 else if (old_clip_rect_right && new_clip_rect_middle) 2673 new_update_type = UPDATE_TYPE_FULL; 2674 else if (old_clip_rect_middle && new_clip_rect_left) 2675 new_update_type = UPDATE_TYPE_FULL; 2676 } 2677 } 2678 } 2679 } 2680 return new_update_type; 2681 } 2682 2683 /* 2684 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2685 * 2686 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2687 */ 2688 enum surface_update_type dc_check_update_surfaces_for_stream( 2689 struct dc *dc, 2690 struct dc_surface_update *updates, 2691 int surface_count, 2692 struct dc_stream_update *stream_update, 2693 const struct dc_stream_status *stream_status) 2694 { 2695 int i; 2696 enum surface_update_type type; 2697 2698 if (stream_update) 2699 stream_update->stream->update_flags.raw = 0; 2700 for (i = 0; i < surface_count; i++) 2701 updates[i].surface->update_flags.raw = 0; 2702 2703 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2704 if (type == UPDATE_TYPE_FULL) { 2705 if (stream_update) { 2706 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2707 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2708 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2709 } 2710 for (i = 0; i < surface_count; i++) 2711 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2712 } 2713 2714 if (type == UPDATE_TYPE_MED) 2715 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc, 2716 updates, surface_count, type); 2717 2718 if (type == UPDATE_TYPE_FAST) { 2719 // If there's an available clock comparator, we use that. 2720 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2721 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2722 dc->optimized_required = true; 2723 // Else we fallback to mem compare. 2724 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2725 dc->optimized_required = true; 2726 } 2727 2728 dc->optimized_required |= dc->wm_optimized_required; 2729 } 2730 2731 return type; 2732 } 2733 2734 static struct dc_stream_status *stream_get_status( 2735 struct dc_state *ctx, 2736 struct dc_stream_state *stream) 2737 { 2738 uint8_t i; 2739 2740 for (i = 0; i < ctx->stream_count; i++) { 2741 if (stream == ctx->streams[i]) { 2742 return &ctx->stream_status[i]; 2743 } 2744 } 2745 2746 return NULL; 2747 } 2748 2749 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2750 2751 static void copy_surface_update_to_plane( 2752 struct dc_plane_state *surface, 2753 struct dc_surface_update *srf_update) 2754 { 2755 if (srf_update->flip_addr) { 2756 surface->address = srf_update->flip_addr->address; 2757 surface->flip_immediate = 2758 srf_update->flip_addr->flip_immediate; 2759 surface->time.time_elapsed_in_us[surface->time.index] = 2760 srf_update->flip_addr->flip_timestamp_in_us - 2761 surface->time.prev_update_time_in_us; 2762 surface->time.prev_update_time_in_us = 2763 srf_update->flip_addr->flip_timestamp_in_us; 2764 surface->time.index++; 2765 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2766 surface->time.index = 0; 2767 2768 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2769 } 2770 2771 if (srf_update->scaling_info) { 2772 surface->scaling_quality = 2773 srf_update->scaling_info->scaling_quality; 2774 surface->dst_rect = 2775 srf_update->scaling_info->dst_rect; 2776 surface->src_rect = 2777 srf_update->scaling_info->src_rect; 2778 surface->clip_rect = 2779 srf_update->scaling_info->clip_rect; 2780 } 2781 2782 if (srf_update->plane_info) { 2783 surface->color_space = 2784 srf_update->plane_info->color_space; 2785 surface->format = 2786 srf_update->plane_info->format; 2787 surface->plane_size = 2788 srf_update->plane_info->plane_size; 2789 surface->rotation = 2790 srf_update->plane_info->rotation; 2791 surface->horizontal_mirror = 2792 srf_update->plane_info->horizontal_mirror; 2793 surface->stereo_format = 2794 srf_update->plane_info->stereo_format; 2795 surface->tiling_info = 2796 srf_update->plane_info->tiling_info; 2797 surface->visible = 2798 srf_update->plane_info->visible; 2799 surface->per_pixel_alpha = 2800 srf_update->plane_info->per_pixel_alpha; 2801 surface->global_alpha = 2802 srf_update->plane_info->global_alpha; 2803 surface->global_alpha_value = 2804 srf_update->plane_info->global_alpha_value; 2805 surface->dcc = 2806 srf_update->plane_info->dcc; 2807 surface->layer_index = 2808 srf_update->plane_info->layer_index; 2809 } 2810 2811 if (srf_update->gamma && 2812 (surface->gamma_correction != 2813 srf_update->gamma)) { 2814 memcpy(&surface->gamma_correction->entries, 2815 &srf_update->gamma->entries, 2816 sizeof(struct dc_gamma_entries)); 2817 surface->gamma_correction->is_identity = 2818 srf_update->gamma->is_identity; 2819 surface->gamma_correction->num_entries = 2820 srf_update->gamma->num_entries; 2821 surface->gamma_correction->type = 2822 srf_update->gamma->type; 2823 } 2824 2825 if (srf_update->in_transfer_func && 2826 (surface->in_transfer_func != 2827 srf_update->in_transfer_func)) { 2828 surface->in_transfer_func->sdr_ref_white_level = 2829 srf_update->in_transfer_func->sdr_ref_white_level; 2830 surface->in_transfer_func->tf = 2831 srf_update->in_transfer_func->tf; 2832 surface->in_transfer_func->type = 2833 srf_update->in_transfer_func->type; 2834 memcpy(&surface->in_transfer_func->tf_pts, 2835 &srf_update->in_transfer_func->tf_pts, 2836 sizeof(struct dc_transfer_func_distributed_points)); 2837 } 2838 2839 if (srf_update->func_shaper && 2840 (surface->in_shaper_func != 2841 srf_update->func_shaper)) 2842 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2843 sizeof(*surface->in_shaper_func)); 2844 2845 if (srf_update->lut3d_func && 2846 (surface->lut3d_func != 2847 srf_update->lut3d_func)) 2848 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2849 sizeof(*surface->lut3d_func)); 2850 2851 if (srf_update->hdr_mult.value) 2852 surface->hdr_mult = 2853 srf_update->hdr_mult; 2854 2855 if (srf_update->blend_tf && 2856 (surface->blend_tf != 2857 srf_update->blend_tf)) 2858 memcpy(surface->blend_tf, srf_update->blend_tf, 2859 sizeof(*surface->blend_tf)); 2860 2861 if (srf_update->input_csc_color_matrix) 2862 surface->input_csc_color_matrix = 2863 *srf_update->input_csc_color_matrix; 2864 2865 if (srf_update->coeff_reduction_factor) 2866 surface->coeff_reduction_factor = 2867 *srf_update->coeff_reduction_factor; 2868 2869 if (srf_update->gamut_remap_matrix) 2870 surface->gamut_remap_matrix = 2871 *srf_update->gamut_remap_matrix; 2872 } 2873 2874 static void copy_stream_update_to_stream(struct dc *dc, 2875 struct dc_state *context, 2876 struct dc_stream_state *stream, 2877 struct dc_stream_update *update) 2878 { 2879 struct dc_context *dc_ctx = dc->ctx; 2880 2881 if (update == NULL || stream == NULL) 2882 return; 2883 2884 if (update->src.height && update->src.width) 2885 stream->src = update->src; 2886 2887 if (update->dst.height && update->dst.width) 2888 stream->dst = update->dst; 2889 2890 if (update->out_transfer_func && 2891 stream->out_transfer_func != update->out_transfer_func) { 2892 stream->out_transfer_func->sdr_ref_white_level = 2893 update->out_transfer_func->sdr_ref_white_level; 2894 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2895 stream->out_transfer_func->type = 2896 update->out_transfer_func->type; 2897 memcpy(&stream->out_transfer_func->tf_pts, 2898 &update->out_transfer_func->tf_pts, 2899 sizeof(struct dc_transfer_func_distributed_points)); 2900 } 2901 2902 if (update->hdr_static_metadata) 2903 stream->hdr_static_metadata = *update->hdr_static_metadata; 2904 2905 if (update->abm_level) 2906 stream->abm_level = *update->abm_level; 2907 2908 if (update->periodic_interrupt) 2909 stream->periodic_interrupt = *update->periodic_interrupt; 2910 2911 if (update->gamut_remap) 2912 stream->gamut_remap_matrix = *update->gamut_remap; 2913 2914 /* Note: this being updated after mode set is currently not a use case 2915 * however if it arises OCSC would need to be reprogrammed at the 2916 * minimum 2917 */ 2918 if (update->output_color_space) 2919 stream->output_color_space = *update->output_color_space; 2920 2921 if (update->output_csc_transform) 2922 stream->csc_color_matrix = *update->output_csc_transform; 2923 2924 if (update->vrr_infopacket) 2925 stream->vrr_infopacket = *update->vrr_infopacket; 2926 2927 if (update->allow_freesync) 2928 stream->allow_freesync = *update->allow_freesync; 2929 2930 if (update->vrr_active_variable) 2931 stream->vrr_active_variable = *update->vrr_active_variable; 2932 2933 if (update->crtc_timing_adjust) 2934 stream->adjust = *update->crtc_timing_adjust; 2935 2936 if (update->dpms_off) 2937 stream->dpms_off = *update->dpms_off; 2938 2939 if (update->hfvsif_infopacket) 2940 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2941 2942 if (update->vtem_infopacket) 2943 stream->vtem_infopacket = *update->vtem_infopacket; 2944 2945 if (update->vsc_infopacket) 2946 stream->vsc_infopacket = *update->vsc_infopacket; 2947 2948 if (update->vsp_infopacket) 2949 stream->vsp_infopacket = *update->vsp_infopacket; 2950 2951 if (update->dither_option) 2952 stream->dither_option = *update->dither_option; 2953 2954 if (update->pending_test_pattern) 2955 stream->test_pattern = *update->pending_test_pattern; 2956 /* update current stream with writeback info */ 2957 if (update->wb_update) { 2958 int i; 2959 2960 stream->num_wb_info = update->wb_update->num_wb_info; 2961 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2962 for (i = 0; i < stream->num_wb_info; i++) 2963 stream->writeback_info[i] = 2964 update->wb_update->writeback_info[i]; 2965 } 2966 if (update->dsc_config) { 2967 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2968 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2969 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2970 update->dsc_config->num_slices_v != 0); 2971 2972 /* Use temporarry context for validating new DSC config */ 2973 struct dc_state *dsc_validate_context = dc_create_state(dc); 2974 2975 if (dsc_validate_context) { 2976 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2977 2978 stream->timing.dsc_cfg = *update->dsc_config; 2979 stream->timing.flags.DSC = enable_dsc; 2980 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2981 stream->timing.dsc_cfg = old_dsc_cfg; 2982 stream->timing.flags.DSC = old_dsc_enabled; 2983 update->dsc_config = NULL; 2984 } 2985 2986 dc_release_state(dsc_validate_context); 2987 } else { 2988 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2989 update->dsc_config = NULL; 2990 } 2991 } 2992 } 2993 2994 static bool update_planes_and_stream_state(struct dc *dc, 2995 struct dc_surface_update *srf_updates, int surface_count, 2996 struct dc_stream_state *stream, 2997 struct dc_stream_update *stream_update, 2998 enum surface_update_type *new_update_type, 2999 struct dc_state **new_context) 3000 { 3001 struct dc_state *context; 3002 int i, j; 3003 enum surface_update_type update_type; 3004 const struct dc_stream_status *stream_status; 3005 struct dc_context *dc_ctx = dc->ctx; 3006 3007 stream_status = dc_stream_get_status(stream); 3008 3009 if (!stream_status) { 3010 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3011 ASSERT(false); 3012 3013 return false; /* Cannot commit surface to stream that is not committed */ 3014 } 3015 3016 context = dc->current_state; 3017 3018 update_type = dc_check_update_surfaces_for_stream( 3019 dc, srf_updates, surface_count, stream_update, stream_status); 3020 3021 /* update current stream with the new updates */ 3022 copy_stream_update_to_stream(dc, context, stream, stream_update); 3023 3024 /* do not perform surface update if surface has invalid dimensions 3025 * (all zero) and no scaling_info is provided 3026 */ 3027 if (surface_count > 0) { 3028 for (i = 0; i < surface_count; i++) { 3029 if ((srf_updates[i].surface->src_rect.width == 0 || 3030 srf_updates[i].surface->src_rect.height == 0 || 3031 srf_updates[i].surface->dst_rect.width == 0 || 3032 srf_updates[i].surface->dst_rect.height == 0) && 3033 (!srf_updates[i].scaling_info || 3034 srf_updates[i].scaling_info->src_rect.width == 0 || 3035 srf_updates[i].scaling_info->src_rect.height == 0 || 3036 srf_updates[i].scaling_info->dst_rect.width == 0 || 3037 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3038 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3039 return false; 3040 } 3041 } 3042 } 3043 3044 if (update_type >= update_surface_trace_level) 3045 update_surface_trace(dc, srf_updates, surface_count); 3046 3047 if (update_type >= UPDATE_TYPE_FULL) { 3048 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3049 3050 for (i = 0; i < surface_count; i++) 3051 new_planes[i] = srf_updates[i].surface; 3052 3053 /* initialize scratch memory for building context */ 3054 context = dc_create_state(dc); 3055 if (context == NULL) { 3056 DC_ERROR("Failed to allocate new validate context!\n"); 3057 return false; 3058 } 3059 3060 dc_resource_state_copy_construct( 3061 dc->current_state, context); 3062 3063 /* For each full update, remove all existing phantom pipes first. 3064 * Ensures that we have enough pipes for newly added MPO planes 3065 */ 3066 if (dc->res_pool->funcs->remove_phantom_pipes) 3067 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 3068 3069 /*remove old surfaces from context */ 3070 if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3071 3072 BREAK_TO_DEBUGGER(); 3073 goto fail; 3074 } 3075 3076 /* add surface to context */ 3077 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3078 3079 BREAK_TO_DEBUGGER(); 3080 goto fail; 3081 } 3082 } 3083 3084 /* save update parameters into surface */ 3085 for (i = 0; i < surface_count; i++) { 3086 struct dc_plane_state *surface = srf_updates[i].surface; 3087 3088 copy_surface_update_to_plane(surface, &srf_updates[i]); 3089 3090 if (update_type >= UPDATE_TYPE_MED) { 3091 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3092 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3093 3094 if (pipe_ctx->plane_state != surface) 3095 continue; 3096 3097 resource_build_scaling_params(pipe_ctx); 3098 } 3099 } 3100 } 3101 3102 if (update_type == UPDATE_TYPE_FULL) { 3103 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3104 /* For phantom pipes we remove and create a new set of phantom pipes 3105 * for each full update (because we don't know if we'll need phantom 3106 * pipes until after the first round of validation). However, if validation 3107 * fails we need to keep the existing phantom pipes (because we don't update 3108 * the dc->current_state). 3109 * 3110 * The phantom stream/plane refcount is decremented for validation because 3111 * we assume it'll be removed (the free comes when the dc_state is freed), 3112 * but if validation fails we have to increment back the refcount so it's 3113 * consistent. 3114 */ 3115 if (dc->res_pool->funcs->retain_phantom_pipes) 3116 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); 3117 BREAK_TO_DEBUGGER(); 3118 goto fail; 3119 } 3120 } 3121 3122 *new_context = context; 3123 *new_update_type = update_type; 3124 3125 return true; 3126 3127 fail: 3128 dc_release_state(context); 3129 3130 return false; 3131 3132 } 3133 3134 static void commit_planes_do_stream_update(struct dc *dc, 3135 struct dc_stream_state *stream, 3136 struct dc_stream_update *stream_update, 3137 enum surface_update_type update_type, 3138 struct dc_state *context) 3139 { 3140 int j; 3141 3142 // Stream updates 3143 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3144 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3145 3146 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 3147 3148 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3149 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3150 3151 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3152 stream_update->vrr_infopacket || 3153 stream_update->vsc_infopacket || 3154 stream_update->vsp_infopacket || 3155 stream_update->hfvsif_infopacket || 3156 stream_update->vtem_infopacket) { 3157 resource_build_info_frame(pipe_ctx); 3158 dc->hwss.update_info_frame(pipe_ctx); 3159 3160 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3161 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3162 } 3163 3164 if (stream_update->hdr_static_metadata && 3165 stream->use_dynamic_meta && 3166 dc->hwss.set_dmdata_attributes && 3167 pipe_ctx->stream->dmdata_address.quad_part != 0) 3168 dc->hwss.set_dmdata_attributes(pipe_ctx); 3169 3170 if (stream_update->gamut_remap) 3171 dc_stream_set_gamut_remap(dc, stream); 3172 3173 if (stream_update->output_csc_transform) 3174 dc_stream_program_csc_matrix(dc, stream); 3175 3176 if (stream_update->dither_option) { 3177 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3178 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3179 &pipe_ctx->stream->bit_depth_params); 3180 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3181 &stream->bit_depth_params, 3182 &stream->clamping); 3183 while (odm_pipe) { 3184 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3185 &stream->bit_depth_params, 3186 &stream->clamping); 3187 odm_pipe = odm_pipe->next_odm_pipe; 3188 } 3189 } 3190 3191 3192 /* Full fe update*/ 3193 if (update_type == UPDATE_TYPE_FAST) 3194 continue; 3195 3196 if (stream_update->dsc_config) 3197 dp_update_dsc_config(pipe_ctx); 3198 3199 if (stream_update->mst_bw_update) { 3200 if (stream_update->mst_bw_update->is_increase) 3201 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 3202 else 3203 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 3204 } 3205 3206 if (stream_update->pending_test_pattern) { 3207 dc_link_dp_set_test_pattern(stream->link, 3208 stream->test_pattern.type, 3209 stream->test_pattern.color_space, 3210 stream->test_pattern.p_link_settings, 3211 stream->test_pattern.p_custom_pattern, 3212 stream->test_pattern.cust_pattern_size); 3213 } 3214 3215 if (stream_update->dpms_off) { 3216 if (*stream_update->dpms_off) { 3217 core_link_disable_stream(pipe_ctx); 3218 /* for dpms, keep acquired resources*/ 3219 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3220 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3221 3222 dc->optimized_required = true; 3223 3224 } else { 3225 if (get_seamless_boot_stream_count(context) == 0) 3226 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3227 core_link_enable_stream(dc->current_state, pipe_ctx); 3228 } 3229 } 3230 3231 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3232 bool should_program_abm = true; 3233 3234 // if otg funcs defined check if blanked before programming 3235 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3236 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3237 should_program_abm = false; 3238 3239 if (should_program_abm) { 3240 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3241 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3242 } else { 3243 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3244 pipe_ctx->stream_res.abm, stream->abm_level); 3245 } 3246 } 3247 } 3248 } 3249 } 3250 } 3251 3252 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3253 { 3254 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3255 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3256 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3257 return true; 3258 3259 return false; 3260 } 3261 3262 void dc_dmub_update_dirty_rect(struct dc *dc, 3263 int surface_count, 3264 struct dc_stream_state *stream, 3265 struct dc_surface_update *srf_updates, 3266 struct dc_state *context) 3267 { 3268 union dmub_rb_cmd cmd; 3269 struct dc_context *dc_ctx = dc->ctx; 3270 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3271 unsigned int i, j; 3272 unsigned int panel_inst = 0; 3273 3274 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3275 return; 3276 3277 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3278 return; 3279 3280 memset(&cmd, 0x0, sizeof(cmd)); 3281 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3282 cmd.update_dirty_rect.header.sub_type = 0; 3283 cmd.update_dirty_rect.header.payload_bytes = 3284 sizeof(cmd.update_dirty_rect) - 3285 sizeof(cmd.update_dirty_rect.header); 3286 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3287 for (i = 0; i < surface_count; i++) { 3288 struct dc_plane_state *plane_state = srf_updates[i].surface; 3289 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3290 3291 if (!srf_updates[i].surface || !flip_addr) 3292 continue; 3293 /* Do not send in immediate flip mode */ 3294 if (srf_updates[i].surface->flip_immediate) 3295 continue; 3296 3297 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3298 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3299 sizeof(flip_addr->dirty_rects)); 3300 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3301 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3302 3303 if (pipe_ctx->stream != stream) 3304 continue; 3305 if (pipe_ctx->plane_state != plane_state) 3306 continue; 3307 3308 update_dirty_rect->panel_inst = panel_inst; 3309 update_dirty_rect->pipe_idx = j; 3310 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); 3311 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); 3312 } 3313 } 3314 } 3315 3316 static void commit_planes_for_stream(struct dc *dc, 3317 struct dc_surface_update *srf_updates, 3318 int surface_count, 3319 struct dc_stream_state *stream, 3320 struct dc_stream_update *stream_update, 3321 enum surface_update_type update_type, 3322 struct dc_state *context) 3323 { 3324 int i, j; 3325 struct pipe_ctx *top_pipe_to_program = NULL; 3326 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3327 bool subvp_prev_use = false; 3328 3329 // Once we apply the new subvp context to hardware it won't be in the 3330 // dc->current_state anymore, so we have to cache it before we apply 3331 // the new SubVP context 3332 subvp_prev_use = false; 3333 3334 3335 dc_z10_restore(dc); 3336 3337 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 3338 /* Optimize seamless boot flag keeps clocks and watermarks high until 3339 * first flip. After first flip, optimization is required to lower 3340 * bandwidth. Important to note that it is expected UEFI will 3341 * only light up a single display on POST, therefore we only expect 3342 * one stream with seamless boot flag set. 3343 */ 3344 if (stream->apply_seamless_boot_optimization) { 3345 stream->apply_seamless_boot_optimization = false; 3346 3347 if (get_seamless_boot_stream_count(context) == 0) 3348 dc->optimized_required = true; 3349 } 3350 } 3351 3352 if (update_type == UPDATE_TYPE_FULL) { 3353 dc_allow_idle_optimizations(dc, false); 3354 3355 if (get_seamless_boot_stream_count(context) == 0) 3356 dc->hwss.prepare_bandwidth(dc, context); 3357 3358 if (dc->debug.enable_double_buffered_dsc_pg_support) 3359 dc->hwss.update_dsc_pg(dc, context, false); 3360 3361 context_clock_trace(dc, context); 3362 } 3363 3364 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3365 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3366 3367 if (!pipe_ctx->top_pipe && 3368 !pipe_ctx->prev_odm_pipe && 3369 pipe_ctx->stream && 3370 pipe_ctx->stream == stream) { 3371 top_pipe_to_program = pipe_ctx; 3372 } 3373 } 3374 3375 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3376 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3377 3378 // Check old context for SubVP 3379 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3380 if (subvp_prev_use) 3381 break; 3382 } 3383 3384 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3385 struct pipe_ctx *mpcc_pipe; 3386 struct pipe_ctx *odm_pipe; 3387 3388 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3389 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3390 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3391 } 3392 3393 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3394 if (top_pipe_to_program && 3395 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3396 if (should_use_dmub_lock(stream->link)) { 3397 union dmub_hw_lock_flags hw_locks = { 0 }; 3398 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3399 3400 hw_locks.bits.lock_dig = 1; 3401 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3402 3403 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3404 true, 3405 &hw_locks, 3406 &inst_flags); 3407 } else 3408 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3409 top_pipe_to_program->stream_res.tg); 3410 } 3411 3412 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3413 if (dc->hwss.subvp_pipe_control_lock) 3414 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3415 dc->hwss.interdependent_update_lock(dc, context, true); 3416 3417 } else { 3418 if (dc->hwss.subvp_pipe_control_lock) 3419 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3420 /* Lock the top pipe while updating plane addrs, since freesync requires 3421 * plane addr update event triggers to be synchronized. 3422 * top_pipe_to_program is expected to never be NULL 3423 */ 3424 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3425 } 3426 3427 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3428 3429 if (update_type != UPDATE_TYPE_FAST) { 3430 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3431 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3432 3433 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || 3434 subvp_prev_use) { 3435 // If old context or new context has phantom pipes, apply 3436 // the phantom timings now. We can't change the phantom 3437 // pipe configuration safely without driver acquiring 3438 // the DMCUB lock first. 3439 dc->hwss.apply_ctx_to_hw(dc, context); 3440 break; 3441 } 3442 } 3443 } 3444 3445 // Stream updates 3446 if (stream_update) 3447 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3448 3449 if (surface_count == 0) { 3450 /* 3451 * In case of turning off screen, no need to program front end a second time. 3452 * just return after program blank. 3453 */ 3454 if (dc->hwss.apply_ctx_for_surface) 3455 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3456 if (dc->hwss.program_front_end_for_ctx) 3457 dc->hwss.program_front_end_for_ctx(dc, context); 3458 3459 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3460 dc->hwss.interdependent_update_lock(dc, context, false); 3461 } else { 3462 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3463 } 3464 dc->hwss.post_unlock_program_front_end(dc, context); 3465 3466 if (update_type != UPDATE_TYPE_FAST) 3467 if (dc->hwss.commit_subvp_config) 3468 dc->hwss.commit_subvp_config(dc, context); 3469 3470 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3471 * move the SubVP lock to after the phantom pipes have been setup 3472 */ 3473 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3474 if (dc->hwss.subvp_pipe_control_lock) 3475 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3476 } else { 3477 if (dc->hwss.subvp_pipe_control_lock) 3478 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3479 } 3480 3481 return; 3482 } 3483 3484 if (update_type != UPDATE_TYPE_FAST) { 3485 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3486 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3487 3488 if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && 3489 pipe_ctx->stream && pipe_ctx->plane_state) { 3490 /* Only update visual confirm for SUBVP here. 3491 * The bar appears on all pipes, so we need to update the bar on all displays, 3492 * so the information doesn't get stale. 3493 */ 3494 struct mpcc_blnd_cfg blnd_cfg = { 0 }; 3495 3496 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, 3497 pipe_ctx->plane_res.hubp->inst); 3498 } 3499 } 3500 } 3501 3502 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 3503 for (i = 0; i < surface_count; i++) { 3504 struct dc_plane_state *plane_state = srf_updates[i].surface; 3505 /*set logical flag for lock/unlock use*/ 3506 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3507 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3508 if (!pipe_ctx->plane_state) 3509 continue; 3510 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3511 continue; 3512 pipe_ctx->plane_state->triplebuffer_flips = false; 3513 if (update_type == UPDATE_TYPE_FAST && 3514 dc->hwss.program_triplebuffer != NULL && 3515 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3516 /*triple buffer for VUpdate only*/ 3517 pipe_ctx->plane_state->triplebuffer_flips = true; 3518 } 3519 } 3520 if (update_type == UPDATE_TYPE_FULL) { 3521 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3522 plane_state->flip_immediate = false; 3523 } 3524 } 3525 } 3526 3527 // Update Type FULL, Surface updates 3528 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3529 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3530 3531 if (!pipe_ctx->top_pipe && 3532 !pipe_ctx->prev_odm_pipe && 3533 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3534 struct dc_stream_status *stream_status = NULL; 3535 3536 if (!pipe_ctx->plane_state) 3537 continue; 3538 3539 /* Full fe update*/ 3540 if (update_type == UPDATE_TYPE_FAST) 3541 continue; 3542 3543 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3544 3545 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3546 /*turn off triple buffer for full update*/ 3547 dc->hwss.program_triplebuffer( 3548 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3549 } 3550 stream_status = 3551 stream_get_status(context, pipe_ctx->stream); 3552 3553 if (dc->hwss.apply_ctx_for_surface) 3554 dc->hwss.apply_ctx_for_surface( 3555 dc, pipe_ctx->stream, stream_status->plane_count, context); 3556 } 3557 } 3558 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3559 dc->hwss.program_front_end_for_ctx(dc, context); 3560 if (dc->debug.validate_dml_output) { 3561 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3562 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3563 if (cur_pipe->stream == NULL) 3564 continue; 3565 3566 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3567 cur_pipe->plane_res.hubp, dc->ctx, 3568 &context->res_ctx.pipe_ctx[i].rq_regs, 3569 &context->res_ctx.pipe_ctx[i].dlg_regs, 3570 &context->res_ctx.pipe_ctx[i].ttu_regs); 3571 } 3572 } 3573 } 3574 3575 // Update Type FAST, Surface updates 3576 if (update_type == UPDATE_TYPE_FAST) { 3577 if (dc->hwss.set_flip_control_gsl) 3578 for (i = 0; i < surface_count; i++) { 3579 struct dc_plane_state *plane_state = srf_updates[i].surface; 3580 3581 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3582 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3583 3584 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3585 continue; 3586 3587 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3588 continue; 3589 3590 // GSL has to be used for flip immediate 3591 dc->hwss.set_flip_control_gsl(pipe_ctx, 3592 pipe_ctx->plane_state->flip_immediate); 3593 } 3594 } 3595 3596 /* Perform requested Updates */ 3597 for (i = 0; i < surface_count; i++) { 3598 struct dc_plane_state *plane_state = srf_updates[i].surface; 3599 3600 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3601 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3602 3603 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3604 continue; 3605 3606 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3607 continue; 3608 3609 /*program triple buffer after lock based on flip type*/ 3610 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3611 /*only enable triplebuffer for fast_update*/ 3612 dc->hwss.program_triplebuffer( 3613 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3614 } 3615 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3616 dc->hwss.update_plane_addr(dc, pipe_ctx); 3617 } 3618 } 3619 } 3620 3621 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3622 dc->hwss.interdependent_update_lock(dc, context, false); 3623 } else { 3624 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3625 } 3626 3627 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3628 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3629 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3630 top_pipe_to_program->stream_res.tg, 3631 CRTC_STATE_VACTIVE); 3632 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3633 top_pipe_to_program->stream_res.tg, 3634 CRTC_STATE_VBLANK); 3635 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3636 top_pipe_to_program->stream_res.tg, 3637 CRTC_STATE_VACTIVE); 3638 3639 if (should_use_dmub_lock(stream->link)) { 3640 union dmub_hw_lock_flags hw_locks = { 0 }; 3641 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3642 3643 hw_locks.bits.lock_dig = 1; 3644 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3645 3646 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3647 false, 3648 &hw_locks, 3649 &inst_flags); 3650 } else 3651 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3652 top_pipe_to_program->stream_res.tg); 3653 } 3654 3655 /* For phantom pipe OTG enable, it has to be done after any previous pipe 3656 * that was in use has already been programmed at gotten its double buffer 3657 * update for "disable". 3658 */ 3659 if (update_type != UPDATE_TYPE_FAST) { 3660 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3661 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3662 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3663 3664 /* If an active, non-phantom pipe is being transitioned into a phantom 3665 * pipe, wait for the double buffer update to complete first before we do 3666 * ANY phantom pipe programming. 3667 */ 3668 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && 3669 old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { 3670 old_pipe->stream_res.tg->funcs->wait_for_state( 3671 old_pipe->stream_res.tg, 3672 CRTC_STATE_VBLANK); 3673 old_pipe->stream_res.tg->funcs->wait_for_state( 3674 old_pipe->stream_res.tg, 3675 CRTC_STATE_VACTIVE); 3676 } 3677 } 3678 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3679 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3680 3681 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || 3682 subvp_prev_use) { 3683 // If old context or new context has phantom pipes, apply 3684 // the phantom timings now. We can't change the phantom 3685 // pipe configuration safely without driver acquiring 3686 // the DMCUB lock first. 3687 dc->hwss.apply_ctx_to_hw(dc, context); 3688 break; 3689 } 3690 } 3691 } 3692 3693 if (update_type != UPDATE_TYPE_FAST) 3694 dc->hwss.post_unlock_program_front_end(dc, context); 3695 if (update_type != UPDATE_TYPE_FAST) 3696 if (dc->hwss.commit_subvp_config) 3697 dc->hwss.commit_subvp_config(dc, context); 3698 3699 if (update_type != UPDATE_TYPE_FAST) 3700 if (dc->hwss.commit_subvp_config) 3701 dc->hwss.commit_subvp_config(dc, context); 3702 3703 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3704 * move the SubVP lock to after the phantom pipes have been setup 3705 */ 3706 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3707 if (dc->hwss.subvp_pipe_control_lock) 3708 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3709 } else { 3710 if (dc->hwss.subvp_pipe_control_lock) 3711 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3712 } 3713 3714 // Fire manual trigger only when bottom plane is flipped 3715 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3716 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3717 3718 if (!pipe_ctx->plane_state) 3719 continue; 3720 3721 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3722 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3723 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3724 pipe_ctx->plane_state->skip_manual_trigger) 3725 continue; 3726 3727 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3728 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3729 } 3730 } 3731 3732 /** 3733 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 3734 * 3735 * @dc: Used to get the current state status 3736 * @stream: Target stream, which we want to remove the attached planes 3737 * @surface_count: Number of surface update 3738 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 3739 * 3740 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 3741 * the MPO if used simultaneously in some specific configurations (e.g., 3742 * 4k@144). This function checks if the incoming context requires applying a 3743 * transition state with unnecessary pipe splitting and ODM disabled to 3744 * circumvent our hardware limitations to prevent this edge case. If the OPP 3745 * associated with an MPCC might change due to plane additions, this function 3746 * returns true. 3747 * 3748 * Return: 3749 * Return true if OPP and MPCC might change, otherwise, return false. 3750 */ 3751 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3752 struct dc_stream_state *stream, 3753 int surface_count, 3754 bool *is_plane_addition) 3755 { 3756 3757 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3758 bool force_minimal_pipe_splitting = false; 3759 bool subvp_active = false; 3760 uint32_t i; 3761 3762 *is_plane_addition = false; 3763 3764 if (cur_stream_status && 3765 dc->current_state->stream_count > 0 && 3766 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3767 /* determine if minimal transition is required due to MPC*/ 3768 if (surface_count > 0) { 3769 if (cur_stream_status->plane_count > surface_count) { 3770 force_minimal_pipe_splitting = true; 3771 } else if (cur_stream_status->plane_count < surface_count) { 3772 force_minimal_pipe_splitting = true; 3773 *is_plane_addition = true; 3774 } 3775 } 3776 } 3777 3778 if (cur_stream_status && 3779 dc->current_state->stream_count == 1 && 3780 dc->debug.enable_single_display_2to1_odm_policy) { 3781 /* determine if minimal transition is required due to dynamic ODM*/ 3782 if (surface_count > 0) { 3783 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3784 force_minimal_pipe_splitting = true; 3785 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3786 force_minimal_pipe_splitting = true; 3787 *is_plane_addition = true; 3788 } 3789 } 3790 } 3791 3792 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3793 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3794 3795 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 3796 subvp_active = true; 3797 break; 3798 } 3799 } 3800 3801 /* For SubVP when adding or removing planes we need to add a minimal transition 3802 * (even when disabling all planes). Whenever disabling a phantom pipe, we 3803 * must use the minimal transition path to disable the pipe correctly. 3804 * 3805 * We want to use the minimal transition whenever subvp is active, not only if 3806 * a plane is being added / removed from a subvp stream (MPO plane can be added 3807 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 3808 * a min transition to disable subvp. 3809 */ 3810 if (cur_stream_status && subvp_active) { 3811 /* determine if minimal transition is required due to SubVP*/ 3812 if (cur_stream_status->plane_count > surface_count) { 3813 force_minimal_pipe_splitting = true; 3814 } else if (cur_stream_status->plane_count < surface_count) { 3815 force_minimal_pipe_splitting = true; 3816 *is_plane_addition = true; 3817 } 3818 } 3819 3820 return force_minimal_pipe_splitting; 3821 } 3822 3823 /** 3824 * commit_minimal_transition_state - Create a transition pipe split state 3825 * 3826 * @dc: Used to get the current state status 3827 * @transition_base_context: New transition state 3828 * 3829 * In some specific configurations, such as pipe split on multi-display with 3830 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 3831 * programming when moving to new planes. To mitigate those types of problems, 3832 * this function adds a transition state that minimizes pipe usage before 3833 * programming the new configuration. When adding a new plane, the current 3834 * state requires the least pipes, so it is applied without splitting. When 3835 * removing a plane, the new state requires the least pipes, so it is applied 3836 * without splitting. 3837 * 3838 * Return: 3839 * Return false if something is wrong in the transition state. 3840 */ 3841 static bool commit_minimal_transition_state(struct dc *dc, 3842 struct dc_state *transition_base_context) 3843 { 3844 struct dc_state *transition_context = dc_create_state(dc); 3845 enum pipe_split_policy tmp_mpc_policy; 3846 bool temp_dynamic_odm_policy; 3847 bool temp_subvp_policy; 3848 enum dc_status ret = DC_ERROR_UNEXPECTED; 3849 unsigned int i, j; 3850 unsigned int pipe_in_use = 0; 3851 bool subvp_in_use = false; 3852 3853 if (!transition_context) 3854 return false; 3855 /* Setup: 3856 * Store the current ODM and MPC config in some temp variables to be 3857 * restored after we commit the transition state. 3858 */ 3859 3860 /* check current pipes in use*/ 3861 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3862 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 3863 3864 if (pipe->plane_state) 3865 pipe_in_use++; 3866 } 3867 3868 /* If SubVP is enabled and we are adding or removing planes from any main subvp 3869 * pipe, we must use the minimal transition. 3870 */ 3871 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3872 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3873 3874 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3875 subvp_in_use = true; 3876 break; 3877 } 3878 } 3879 3880 /* When the OS add a new surface if we have been used all of pipes with odm combine 3881 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 3882 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 3883 * call it again. Otherwise return true to skip. 3884 * 3885 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 3886 * enter/exit MPO when DCN still have enough resources. 3887 */ 3888 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { 3889 dc_release_state(transition_context); 3890 return true; 3891 } 3892 3893 if (!dc->config.is_vmin_only_asic) { 3894 tmp_mpc_policy = dc->debug.pipe_split_policy; 3895 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3896 } 3897 3898 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3899 dc->debug.enable_single_display_2to1_odm_policy = false; 3900 3901 temp_subvp_policy = dc->debug.force_disable_subvp; 3902 dc->debug.force_disable_subvp = true; 3903 3904 dc_resource_state_copy_construct(transition_base_context, transition_context); 3905 3906 /* commit minimal state */ 3907 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { 3908 for (i = 0; i < transition_context->stream_count; i++) { 3909 struct dc_stream_status *stream_status = &transition_context->stream_status[i]; 3910 3911 for (j = 0; j < stream_status->plane_count; j++) { 3912 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 3913 3914 /* force vsync flip when reconfiguring pipes to prevent underflow 3915 * and corruption 3916 */ 3917 plane_state->flip_immediate = false; 3918 } 3919 } 3920 3921 ret = dc_commit_state_no_check(dc, transition_context); 3922 } 3923 3924 /* always release as dc_commit_state_no_check retains in good case */ 3925 dc_release_state(transition_context); 3926 3927 /* TearDown: 3928 * Restore original configuration for ODM and MPO. 3929 */ 3930 if (!dc->config.is_vmin_only_asic) 3931 dc->debug.pipe_split_policy = tmp_mpc_policy; 3932 3933 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; 3934 dc->debug.force_disable_subvp = temp_subvp_policy; 3935 3936 if (ret != DC_OK) { 3937 /* this should never happen */ 3938 BREAK_TO_DEBUGGER(); 3939 return false; 3940 } 3941 3942 /* force full surface update */ 3943 for (i = 0; i < dc->current_state->stream_count; i++) { 3944 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 3945 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 3946 } 3947 } 3948 3949 return true; 3950 } 3951 3952 bool dc_update_planes_and_stream(struct dc *dc, 3953 struct dc_surface_update *srf_updates, int surface_count, 3954 struct dc_stream_state *stream, 3955 struct dc_stream_update *stream_update) 3956 { 3957 struct dc_state *context; 3958 enum surface_update_type update_type; 3959 int i; 3960 struct mall_temp_config mall_temp_config; 3961 3962 /* In cases where MPO and split or ODM are used transitions can 3963 * cause underflow. Apply stream configuration with minimal pipe 3964 * split first to avoid unsupported transitions for active pipes. 3965 */ 3966 bool force_minimal_pipe_splitting; 3967 bool is_plane_addition; 3968 3969 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 3970 dc, 3971 stream, 3972 surface_count, 3973 &is_plane_addition); 3974 3975 /* on plane addition, minimal state is the current one */ 3976 if (force_minimal_pipe_splitting && is_plane_addition && 3977 !commit_minimal_transition_state(dc, dc->current_state)) 3978 return false; 3979 3980 if (!update_planes_and_stream_state( 3981 dc, 3982 srf_updates, 3983 surface_count, 3984 stream, 3985 stream_update, 3986 &update_type, 3987 &context)) 3988 return false; 3989 3990 /* on plane removal, minimal state is the new one */ 3991 if (force_minimal_pipe_splitting && !is_plane_addition) { 3992 /* Since all phantom pipes are removed in full validation, 3993 * we have to save and restore the subvp/mall config when 3994 * we do a minimal transition since the flags marking the 3995 * pipe as subvp/phantom will be cleared (dc copy constructor 3996 * creates a shallow copy). 3997 */ 3998 if (dc->res_pool->funcs->save_mall_state) 3999 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); 4000 if (!commit_minimal_transition_state(dc, context)) { 4001 dc_release_state(context); 4002 return false; 4003 } 4004 if (dc->res_pool->funcs->restore_mall_state) 4005 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); 4006 4007 /* If we do a minimal transition with plane removal and the context 4008 * has subvp we also have to retain back the phantom stream / planes 4009 * since the refcount is decremented as part of the min transition 4010 * (we commit a state with no subvp, so the phantom streams / planes 4011 * had to be removed). 4012 */ 4013 if (dc->res_pool->funcs->retain_phantom_pipes) 4014 dc->res_pool->funcs->retain_phantom_pipes(dc, context); 4015 update_type = UPDATE_TYPE_FULL; 4016 } 4017 4018 commit_planes_for_stream( 4019 dc, 4020 srf_updates, 4021 surface_count, 4022 stream, 4023 stream_update, 4024 update_type, 4025 context); 4026 4027 if (dc->current_state != context) { 4028 4029 /* Since memory free requires elevated IRQL, an interrupt 4030 * request is generated by mem free. If this happens 4031 * between freeing and reassigning the context, our vsync 4032 * interrupt will call into dc and cause a memory 4033 * corruption BSOD. Hence, we first reassign the context, 4034 * then free the old context. 4035 */ 4036 4037 struct dc_state *old = dc->current_state; 4038 4039 dc->current_state = context; 4040 dc_release_state(old); 4041 4042 // clear any forced full updates 4043 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4044 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4045 4046 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4047 pipe_ctx->plane_state->force_full_update = false; 4048 } 4049 } 4050 return true; 4051 } 4052 4053 void dc_commit_updates_for_stream(struct dc *dc, 4054 struct dc_surface_update *srf_updates, 4055 int surface_count, 4056 struct dc_stream_state *stream, 4057 struct dc_stream_update *stream_update, 4058 struct dc_state *state) 4059 { 4060 const struct dc_stream_status *stream_status; 4061 enum surface_update_type update_type; 4062 struct dc_state *context; 4063 struct dc_context *dc_ctx = dc->ctx; 4064 int i, j; 4065 4066 /* TODO: Since change commit sequence can have a huge impact, 4067 * we decided to only enable it for DCN3x. However, as soon as 4068 * we get more confident about this change we'll need to enable 4069 * the new sequence for all ASICs. 4070 */ 4071 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 4072 dc_update_planes_and_stream(dc, srf_updates, 4073 surface_count, stream, 4074 stream_update); 4075 return; 4076 } 4077 4078 stream_status = dc_stream_get_status(stream); 4079 context = dc->current_state; 4080 4081 update_type = dc_check_update_surfaces_for_stream( 4082 dc, srf_updates, surface_count, stream_update, stream_status); 4083 4084 if (update_type >= update_surface_trace_level) 4085 update_surface_trace(dc, srf_updates, surface_count); 4086 4087 4088 if (update_type >= UPDATE_TYPE_FULL) { 4089 4090 /* initialize scratch memory for building context */ 4091 context = dc_create_state(dc); 4092 if (context == NULL) { 4093 DC_ERROR("Failed to allocate new validate context!\n"); 4094 return; 4095 } 4096 4097 dc_resource_state_copy_construct(state, context); 4098 4099 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4100 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4101 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4102 4103 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4104 new_pipe->plane_state->force_full_update = true; 4105 } 4106 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) { 4107 /* 4108 * Previous frame finished and HW is ready for optimization. 4109 * 4110 * Only relevant for DCN behavior where we can guarantee the optimization 4111 * is safe to apply - retain the legacy behavior for DCE. 4112 */ 4113 dc_post_update_surfaces_to_stream(dc); 4114 } 4115 4116 4117 for (i = 0; i < surface_count; i++) { 4118 struct dc_plane_state *surface = srf_updates[i].surface; 4119 4120 copy_surface_update_to_plane(surface, &srf_updates[i]); 4121 4122 if (update_type >= UPDATE_TYPE_MED) { 4123 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4124 struct pipe_ctx *pipe_ctx = 4125 &context->res_ctx.pipe_ctx[j]; 4126 4127 if (pipe_ctx->plane_state != surface) 4128 continue; 4129 4130 resource_build_scaling_params(pipe_ctx); 4131 } 4132 } 4133 } 4134 4135 copy_stream_update_to_stream(dc, context, stream, stream_update); 4136 4137 if (update_type >= UPDATE_TYPE_FULL) { 4138 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4139 DC_ERROR("Mode validation failed for stream update!\n"); 4140 dc_release_state(context); 4141 return; 4142 } 4143 } 4144 4145 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4146 4147 commit_planes_for_stream( 4148 dc, 4149 srf_updates, 4150 surface_count, 4151 stream, 4152 stream_update, 4153 update_type, 4154 context); 4155 /*update current_State*/ 4156 if (dc->current_state != context) { 4157 4158 struct dc_state *old = dc->current_state; 4159 4160 dc->current_state = context; 4161 dc_release_state(old); 4162 4163 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4164 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4165 4166 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4167 pipe_ctx->plane_state->force_full_update = false; 4168 } 4169 } 4170 4171 /* Legacy optimization path for DCE. */ 4172 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4173 dc_post_update_surfaces_to_stream(dc); 4174 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4175 } 4176 4177 return; 4178 4179 } 4180 4181 uint8_t dc_get_current_stream_count(struct dc *dc) 4182 { 4183 return dc->current_state->stream_count; 4184 } 4185 4186 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4187 { 4188 if (i < dc->current_state->stream_count) 4189 return dc->current_state->streams[i]; 4190 return NULL; 4191 } 4192 4193 enum dc_irq_source dc_interrupt_to_irq_source( 4194 struct dc *dc, 4195 uint32_t src_id, 4196 uint32_t ext_id) 4197 { 4198 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4199 } 4200 4201 /* 4202 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4203 */ 4204 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4205 { 4206 4207 if (dc == NULL) 4208 return false; 4209 4210 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4211 } 4212 4213 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4214 { 4215 dal_irq_service_ack(dc->res_pool->irqs, src); 4216 } 4217 4218 void dc_power_down_on_boot(struct dc *dc) 4219 { 4220 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4221 dc->hwss.power_down_on_boot) 4222 dc->hwss.power_down_on_boot(dc); 4223 } 4224 4225 void dc_set_power_state( 4226 struct dc *dc, 4227 enum dc_acpi_cm_power_state power_state) 4228 { 4229 struct kref refcount; 4230 struct display_mode_lib *dml; 4231 4232 if (!dc->current_state) 4233 return; 4234 4235 switch (power_state) { 4236 case DC_ACPI_CM_POWER_STATE_D0: 4237 dc_resource_state_construct(dc, dc->current_state); 4238 4239 dc_z10_restore(dc); 4240 4241 if (dc->ctx->dmub_srv) 4242 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); 4243 4244 dc->hwss.init_hw(dc); 4245 4246 if (dc->hwss.init_sys_ctx != NULL && 4247 dc->vm_pa_config.valid) { 4248 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4249 } 4250 4251 break; 4252 default: 4253 ASSERT(dc->current_state->stream_count == 0); 4254 /* Zero out the current context so that on resume we start with 4255 * clean state, and dc hw programming optimizations will not 4256 * cause any trouble. 4257 */ 4258 dml = kzalloc(sizeof(struct display_mode_lib), 4259 GFP_KERNEL); 4260 4261 ASSERT(dml); 4262 if (!dml) 4263 return; 4264 4265 /* Preserve refcount */ 4266 refcount = dc->current_state->refcount; 4267 /* Preserve display mode lib */ 4268 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 4269 4270 dc_resource_state_destruct(dc->current_state); 4271 memset(dc->current_state, 0, 4272 sizeof(*dc->current_state)); 4273 4274 dc->current_state->refcount = refcount; 4275 dc->current_state->bw_ctx.dml = *dml; 4276 4277 kfree(dml); 4278 4279 break; 4280 } 4281 } 4282 4283 void dc_resume(struct dc *dc) 4284 { 4285 uint32_t i; 4286 4287 for (i = 0; i < dc->link_count; i++) 4288 core_link_resume(dc->links[i]); 4289 } 4290 4291 bool dc_is_dmcu_initialized(struct dc *dc) 4292 { 4293 struct dmcu *dmcu = dc->res_pool->dmcu; 4294 4295 if (dmcu) 4296 return dmcu->funcs->is_dmcu_initialized(dmcu); 4297 return false; 4298 } 4299 4300 bool dc_is_oem_i2c_device_present( 4301 struct dc *dc, 4302 size_t slave_address) 4303 { 4304 if (dc->res_pool->oem_device) 4305 return dce_i2c_oem_device_present( 4306 dc->res_pool, 4307 dc->res_pool->oem_device, 4308 slave_address); 4309 4310 return false; 4311 } 4312 4313 bool dc_submit_i2c( 4314 struct dc *dc, 4315 uint32_t link_index, 4316 struct i2c_command *cmd) 4317 { 4318 4319 struct dc_link *link = dc->links[link_index]; 4320 struct ddc_service *ddc = link->ddc; 4321 return dce_i2c_submit_command( 4322 dc->res_pool, 4323 ddc->ddc_pin, 4324 cmd); 4325 } 4326 4327 bool dc_submit_i2c_oem( 4328 struct dc *dc, 4329 struct i2c_command *cmd) 4330 { 4331 struct ddc_service *ddc = dc->res_pool->oem_device; 4332 if (ddc) 4333 return dce_i2c_submit_command( 4334 dc->res_pool, 4335 ddc->ddc_pin, 4336 cmd); 4337 4338 return false; 4339 } 4340 4341 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) 4342 { 4343 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { 4344 BREAK_TO_DEBUGGER(); 4345 return false; 4346 } 4347 4348 dc_sink_retain(sink); 4349 4350 dc_link->remote_sinks[dc_link->sink_count] = sink; 4351 dc_link->sink_count++; 4352 4353 return true; 4354 } 4355 4356 /* 4357 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link 4358 * 4359 * EDID length is in bytes 4360 */ 4361 struct dc_sink *dc_link_add_remote_sink( 4362 struct dc_link *link, 4363 const uint8_t *edid, 4364 int len, 4365 struct dc_sink_init_data *init_data) 4366 { 4367 struct dc_sink *dc_sink; 4368 enum dc_edid_status edid_status; 4369 4370 if (len > DC_MAX_EDID_BUFFER_SIZE) { 4371 dm_error("Max EDID buffer size breached!\n"); 4372 return NULL; 4373 } 4374 4375 if (!init_data) { 4376 BREAK_TO_DEBUGGER(); 4377 return NULL; 4378 } 4379 4380 if (!init_data->link) { 4381 BREAK_TO_DEBUGGER(); 4382 return NULL; 4383 } 4384 4385 dc_sink = dc_sink_create(init_data); 4386 4387 if (!dc_sink) 4388 return NULL; 4389 4390 memmove(dc_sink->dc_edid.raw_edid, edid, len); 4391 dc_sink->dc_edid.length = len; 4392 4393 if (!link_add_remote_sink_helper( 4394 link, 4395 dc_sink)) 4396 goto fail_add_sink; 4397 4398 edid_status = dm_helpers_parse_edid_caps( 4399 link, 4400 &dc_sink->dc_edid, 4401 &dc_sink->edid_caps); 4402 4403 /* 4404 * Treat device as no EDID device if EDID 4405 * parsing fails 4406 */ 4407 if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) { 4408 dc_sink->dc_edid.length = 0; 4409 dm_error("Bad EDID, status%d!\n", edid_status); 4410 } 4411 4412 return dc_sink; 4413 4414 fail_add_sink: 4415 dc_sink_release(dc_sink); 4416 return NULL; 4417 } 4418 4419 /* 4420 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link 4421 * 4422 * Note that this just removes the struct dc_sink - it doesn't 4423 * program hardware or alter other members of dc_link 4424 */ 4425 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) 4426 { 4427 int i; 4428 4429 if (!link->sink_count) { 4430 BREAK_TO_DEBUGGER(); 4431 return; 4432 } 4433 4434 for (i = 0; i < link->sink_count; i++) { 4435 if (link->remote_sinks[i] == sink) { 4436 dc_sink_release(sink); 4437 link->remote_sinks[i] = NULL; 4438 4439 /* shrink array to remove empty place */ 4440 while (i < link->sink_count - 1) { 4441 link->remote_sinks[i] = link->remote_sinks[i+1]; 4442 i++; 4443 } 4444 link->remote_sinks[i] = NULL; 4445 link->sink_count--; 4446 return; 4447 } 4448 } 4449 } 4450 4451 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4452 { 4453 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4454 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4455 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4456 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4457 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4458 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4459 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4460 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4461 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4462 } 4463 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4464 { 4465 if (dc->hwss.set_clock) 4466 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4467 return DC_ERROR_UNEXPECTED; 4468 } 4469 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4470 { 4471 if (dc->hwss.get_clock) 4472 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4473 } 4474 4475 /* enable/disable eDP PSR without specify stream for eDP */ 4476 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4477 { 4478 int i; 4479 bool allow_active; 4480 4481 for (i = 0; i < dc->current_state->stream_count ; i++) { 4482 struct dc_link *link; 4483 struct dc_stream_state *stream = dc->current_state->streams[i]; 4484 4485 link = stream->link; 4486 if (!link) 4487 continue; 4488 4489 if (link->psr_settings.psr_feature_enabled) { 4490 if (enable && !link->psr_settings.psr_allow_active) { 4491 allow_active = true; 4492 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4493 return false; 4494 } else if (!enable && link->psr_settings.psr_allow_active) { 4495 allow_active = false; 4496 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4497 return false; 4498 } 4499 } 4500 } 4501 4502 return true; 4503 } 4504 4505 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4506 { 4507 if (dc->debug.disable_idle_power_optimizations) 4508 return; 4509 4510 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4511 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4512 return; 4513 4514 if (allow == dc->idle_optimizations_allowed) 4515 return; 4516 4517 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 4518 dc->idle_optimizations_allowed = allow; 4519 } 4520 4521 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4522 void dc_unlock_memory_clock_frequency(struct dc *dc) 4523 { 4524 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4525 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4526 4527 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4528 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4529 } 4530 4531 /* set min memory clock to the min required for current mode, max to maxDPM */ 4532 void dc_lock_memory_clock_frequency(struct dc *dc) 4533 { 4534 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4535 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4536 4537 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4538 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4539 4540 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4541 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4542 } 4543 4544 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4545 { 4546 struct dc_state *context = dc->current_state; 4547 struct hubp *hubp; 4548 struct pipe_ctx *pipe; 4549 int i; 4550 4551 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4552 pipe = &context->res_ctx.pipe_ctx[i]; 4553 4554 if (pipe->stream != NULL) { 4555 dc->hwss.disable_pixel_data(dc, pipe, true); 4556 4557 // wait for double buffer 4558 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4559 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4560 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4561 4562 hubp = pipe->plane_res.hubp; 4563 hubp->funcs->set_blank_regs(hubp, true); 4564 } 4565 } 4566 4567 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4568 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4569 4570 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4571 pipe = &context->res_ctx.pipe_ctx[i]; 4572 4573 if (pipe->stream != NULL) { 4574 dc->hwss.disable_pixel_data(dc, pipe, false); 4575 4576 hubp = pipe->plane_res.hubp; 4577 hubp->funcs->set_blank_regs(hubp, false); 4578 } 4579 } 4580 } 4581 4582 4583 /** 4584 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4585 * @dc: pointer to dc of the dm calling this 4586 * @enable: True = transition to DC mode, false = transition back to AC mode 4587 * 4588 * Some SoCs define additional clock limits when in DC mode, DM should 4589 * invoke this function when the platform undergoes a power source transition 4590 * so DC can apply/unapply the limit. This interface may be disruptive to 4591 * the onscreen content. 4592 * 4593 * Context: Triggered by OS through DM interface, or manually by escape calls. 4594 * Need to hold a dclock when doing so. 4595 * 4596 * Return: none (void function) 4597 * 4598 */ 4599 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4600 { 4601 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 4602 unsigned int softMax, maxDPM, funcMin; 4603 bool p_state_change_support; 4604 4605 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) 4606 return; 4607 4608 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4609 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; 4610 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4611 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4612 4613 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4614 if (p_state_change_support) { 4615 if (funcMin <= softMax) 4616 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4617 // else: No-Op 4618 } else { 4619 if (funcMin <= softMax) 4620 blank_and_force_memclk(dc, true, softMax); 4621 // else: No-Op 4622 } 4623 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4624 if (p_state_change_support) { 4625 if (funcMin <= softMax) 4626 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4627 // else: No-Op 4628 } else { 4629 if (funcMin <= softMax) 4630 blank_and_force_memclk(dc, true, maxDPM); 4631 // else: No-Op 4632 } 4633 } 4634 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4635 } 4636 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4637 struct dc_cursor_attributes *cursor_attr) 4638 { 4639 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4640 return true; 4641 return false; 4642 } 4643 4644 /* cleanup on driver unload */ 4645 void dc_hardware_release(struct dc *dc) 4646 { 4647 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4648 4649 if (dc->hwss.hardware_release) 4650 dc->hwss.hardware_release(dc); 4651 } 4652 4653 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4654 { 4655 if (dc->current_state) 4656 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4657 } 4658 4659 /** 4660 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 4661 * 4662 * @dc: [in] dc structure 4663 * 4664 * Checks whether DMUB FW supports outbox notifications, if supported DM 4665 * should register outbox interrupt prior to actually enabling interrupts 4666 * via dc_enable_dmub_outbox 4667 * 4668 * Return: 4669 * True if DMUB FW supports outbox notifications, False otherwise 4670 */ 4671 bool dc_is_dmub_outbox_supported(struct dc *dc) 4672 { 4673 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 4674 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 4675 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 4676 !dc->debug.dpia_debug.bits.disable_dpia) 4677 return true; 4678 4679 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && 4680 !dc->debug.dpia_debug.bits.disable_dpia) 4681 return true; 4682 4683 /* dmub aux needs dmub notifications to be enabled */ 4684 return dc->debug.enable_dmub_aux_for_legacy_ddc; 4685 } 4686 4687 /** 4688 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 4689 * 4690 * @dc: [in] dc structure 4691 * 4692 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 4693 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 4694 * API shall be removed after switching. 4695 * 4696 * Return: 4697 * True if DMUB FW supports outbox notifications, False otherwise 4698 */ 4699 bool dc_enable_dmub_notifications(struct dc *dc) 4700 { 4701 return dc_is_dmub_outbox_supported(dc); 4702 } 4703 4704 /** 4705 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 4706 * 4707 * dc: [in] dc structure 4708 * 4709 * Enables DMUB unsolicited notifications to x86 via outbox. 4710 */ 4711 void dc_enable_dmub_outbox(struct dc *dc) 4712 { 4713 struct dc_context *dc_ctx = dc->ctx; 4714 4715 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 4716 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 4717 } 4718 4719 /** 4720 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 4721 * Sets port index appropriately for legacy DDC 4722 * @dc: dc structure 4723 * @link_index: link index 4724 * @payload: aux payload 4725 * 4726 * Returns: True if successful, False if failure 4727 */ 4728 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 4729 uint32_t link_index, 4730 struct aux_payload *payload) 4731 { 4732 uint8_t action; 4733 union dmub_rb_cmd cmd = {0}; 4734 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4735 4736 ASSERT(payload->length <= 16); 4737 4738 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 4739 cmd.dp_aux_access.header.payload_bytes = 0; 4740 /* For dpia, ddc_pin is set to NULL */ 4741 if (!dc->links[link_index]->ddc->ddc_pin) 4742 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 4743 else 4744 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 4745 4746 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 4747 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 4748 cmd.dp_aux_access.aux_control.timeout = 0; 4749 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 4750 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 4751 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 4752 4753 /* set aux action */ 4754 if (payload->i2c_over_aux) { 4755 if (payload->write) { 4756 if (payload->mot) 4757 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 4758 else 4759 action = DP_AUX_REQ_ACTION_I2C_WRITE; 4760 } else { 4761 if (payload->mot) 4762 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 4763 else 4764 action = DP_AUX_REQ_ACTION_I2C_READ; 4765 } 4766 } else { 4767 if (payload->write) 4768 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 4769 else 4770 action = DP_AUX_REQ_ACTION_DPCD_READ; 4771 } 4772 4773 cmd.dp_aux_access.aux_control.dpaux.action = action; 4774 4775 if (payload->length && payload->write) { 4776 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 4777 payload->data, 4778 payload->length 4779 ); 4780 } 4781 4782 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4783 dc_dmub_srv_cmd_execute(dmub_srv); 4784 dc_dmub_srv_wait_idle(dmub_srv); 4785 4786 return true; 4787 } 4788 4789 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 4790 uint8_t dpia_port_index) 4791 { 4792 uint8_t index, link_index = 0xFF; 4793 4794 for (index = 0; index < dc->link_count; index++) { 4795 /* ddc_hw_inst has dpia port index for dpia links 4796 * and ddc instance for legacy links 4797 */ 4798 if (!dc->links[index]->ddc->ddc_pin) { 4799 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 4800 link_index = index; 4801 break; 4802 } 4803 } 4804 } 4805 ASSERT(link_index != 0xFF); 4806 return link_index; 4807 } 4808 4809 /** 4810 * dc_process_dmub_set_config_async - Submits set_config command 4811 * 4812 * @dc: [in] dc structure 4813 * @link_index: [in] link_index: link index 4814 * @payload: [in] aux payload 4815 * @notify: [out] set_config immediate reply 4816 * 4817 * Submits set_config command to dmub via inbox message. 4818 * 4819 * Return: 4820 * True if successful, False if failure 4821 */ 4822 bool dc_process_dmub_set_config_async(struct dc *dc, 4823 uint32_t link_index, 4824 struct set_config_cmd_payload *payload, 4825 struct dmub_notification *notify) 4826 { 4827 union dmub_rb_cmd cmd = {0}; 4828 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4829 bool is_cmd_complete = true; 4830 4831 /* prepare SET_CONFIG command */ 4832 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 4833 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 4834 4835 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 4836 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 4837 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 4838 4839 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { 4840 /* command is not processed by dmub */ 4841 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 4842 return is_cmd_complete; 4843 } 4844 4845 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 4846 if (cmd.set_config_access.header.ret_status == 1) 4847 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 4848 else 4849 /* cmd pending, will receive notification via outbox */ 4850 is_cmd_complete = false; 4851 4852 return is_cmd_complete; 4853 } 4854 4855 /** 4856 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 4857 * 4858 * @dc: [in] dc structure 4859 * @link_index: [in] link index 4860 * @mst_alloc_slots: [in] mst slots to be allotted 4861 * @mst_slots_in_use: [out] mst slots in use returned in failure case 4862 * 4863 * Submits mst slot allocation command to dmub via inbox message 4864 * 4865 * Return: 4866 * DC_OK if successful, DC_ERROR if failure 4867 */ 4868 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 4869 uint32_t link_index, 4870 uint8_t mst_alloc_slots, 4871 uint8_t *mst_slots_in_use) 4872 { 4873 union dmub_rb_cmd cmd = {0}; 4874 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4875 4876 /* prepare MST_ALLOC_SLOTS command */ 4877 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 4878 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 4879 4880 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 4881 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 4882 4883 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) 4884 /* command is not processed by dmub */ 4885 return DC_ERROR_UNEXPECTED; 4886 4887 /* command processed by dmub, if ret_status is 1 */ 4888 if (cmd.set_config_access.header.ret_status != 1) 4889 /* command processing error */ 4890 return DC_ERROR_UNEXPECTED; 4891 4892 /* command processed and we have a status of 2, mst not enabled in dpia */ 4893 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 4894 return DC_FAIL_UNSUPPORTED_1; 4895 4896 /* previously configured mst alloc and used slots did not match */ 4897 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 4898 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 4899 return DC_NOT_SUPPORTED; 4900 } 4901 4902 return DC_OK; 4903 } 4904 4905 /** 4906 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 4907 * 4908 * @dc [in]: dc structure 4909 * @hpd_int_enable [in]: 1 for hpd int enable, 0 to disable 4910 * 4911 * Submits dpia hpd int enable command to dmub via inbox message 4912 */ 4913 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 4914 uint32_t hpd_int_enable) 4915 { 4916 union dmub_rb_cmd cmd = {0}; 4917 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4918 4919 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 4920 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 4921 4922 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4923 dc_dmub_srv_cmd_execute(dmub_srv); 4924 dc_dmub_srv_wait_idle(dmub_srv); 4925 4926 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 4927 } 4928 4929 /** 4930 * dc_disable_accelerated_mode - disable accelerated mode 4931 * @dc: dc structure 4932 */ 4933 void dc_disable_accelerated_mode(struct dc *dc) 4934 { 4935 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 4936 } 4937 4938 4939 /** 4940 * dc_notify_vsync_int_state - notifies vsync enable/disable state 4941 * @dc: dc structure 4942 * @stream: stream where vsync int state changed 4943 * @enable: whether vsync is enabled or disabled 4944 * 4945 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 4946 * interrupts after steady state is reached. 4947 */ 4948 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 4949 { 4950 int i; 4951 int edp_num; 4952 struct pipe_ctx *pipe = NULL; 4953 struct dc_link *link = stream->sink->link; 4954 struct dc_link *edp_links[MAX_NUM_EDP]; 4955 4956 4957 if (link->psr_settings.psr_feature_enabled) 4958 return; 4959 4960 /*find primary pipe associated with stream*/ 4961 for (i = 0; i < MAX_PIPES; i++) { 4962 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4963 4964 if (pipe->stream == stream && pipe->stream_res.tg) 4965 break; 4966 } 4967 4968 if (i == MAX_PIPES) { 4969 ASSERT(0); 4970 return; 4971 } 4972 4973 get_edp_links(dc, edp_links, &edp_num); 4974 4975 /* Determine panel inst */ 4976 for (i = 0; i < edp_num; i++) { 4977 if (edp_links[i] == link) 4978 break; 4979 } 4980 4981 if (i == edp_num) { 4982 return; 4983 } 4984 4985 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 4986 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 4987 } 4988 4989 /** 4990 * dc_extended_blank_supported 0 Decide whether extended blank is supported 4991 * 4992 * @dc: [in] Current DC state 4993 * 4994 * Extended blank is a freesync optimization feature to be enabled in the 4995 * future. During the extra vblank period gained from freesync, we have the 4996 * ability to enter z9/z10. 4997 * 4998 * Return: 4999 * Indicate whether extended blank is supported (true or false) 5000 */ 5001 bool dc_extended_blank_supported(struct dc *dc) 5002 { 5003 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 5004 && dc->caps.zstate_support && dc->caps.is_apu; 5005 } 5006