1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "gpio_service_interface.h" 37 #include "clk_mgr.h" 38 #include "clock_source.h" 39 #include "dc_bios_types.h" 40 41 #include "bios_parser_interface.h" 42 #include "bios/bios_parser_helper.h" 43 #include "include/irq_service_interface.h" 44 #include "transform.h" 45 #include "dmcu.h" 46 #include "dpp.h" 47 #include "timing_generator.h" 48 #include "abm.h" 49 #include "virtual/virtual_link_encoder.h" 50 #include "hubp.h" 51 52 #include "link_hwss.h" 53 #include "link_encoder.h" 54 #include "link_enc_cfg.h" 55 56 #include "dc_link.h" 57 #include "link.h" 58 #include "dm_helpers.h" 59 #include "mem_input.h" 60 61 #include "dc_link_dp.h" 62 #include "dc_dmub_srv.h" 63 64 #include "dsc.h" 65 66 #include "vm_helper.h" 67 68 #include "dce/dce_i2c.h" 69 70 #include "dmub/dmub_srv.h" 71 72 #include "dce/dmub_psr.h" 73 74 #include "dce/dmub_hw_lock_mgr.h" 75 76 #include "dc_trace.h" 77 78 #include "dce/dmub_outbox.h" 79 80 #define CTX \ 81 dc->ctx 82 83 #define DC_LOGGER \ 84 dc->ctx->logger 85 86 static const char DC_BUILD_ID[] = "production-build"; 87 88 /** 89 * DOC: Overview 90 * 91 * DC is the OS-agnostic component of the amdgpu DC driver. 92 * 93 * DC maintains and validates a set of structs representing the state of the 94 * driver and writes that state to AMD hardware 95 * 96 * Main DC HW structs: 97 * 98 * struct dc - The central struct. One per driver. Created on driver load, 99 * destroyed on driver unload. 100 * 101 * struct dc_context - One per driver. 102 * Used as a backpointer by most other structs in dc. 103 * 104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 105 * plugpoints). Created on driver load, destroyed on driver unload. 106 * 107 * struct dc_sink - One per display. Created on boot or hotplug. 108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 109 * (the display directly attached). It may also have one or more remote 110 * sinks (in the Multi-Stream Transport case) 111 * 112 * struct resource_pool - One per driver. Represents the hw blocks not in the 113 * main pipeline. Not directly accessible by dm. 114 * 115 * Main dc state structs: 116 * 117 * These structs can be created and destroyed as needed. There is a full set of 118 * these structs in dc->current_state representing the currently programmed state. 119 * 120 * struct dc_state - The global DC state to track global state information, 121 * such as bandwidth values. 122 * 123 * struct dc_stream_state - Represents the hw configuration for the pipeline from 124 * a framebuffer to a display. Maps one-to-one with dc_sink. 125 * 126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 127 * and may have more in the Multi-Plane Overlay case. 128 * 129 * struct resource_context - Represents the programmable state of everything in 130 * the resource_pool. Not directly accessible by dm. 131 * 132 * struct pipe_ctx - A member of struct resource_context. Represents the 133 * internal hardware pipeline components. Each dc_plane_state has either 134 * one or two (in the pipe-split case). 135 */ 136 137 /* Private functions */ 138 139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 140 { 141 if (new > *original) 142 *original = new; 143 } 144 145 static void destroy_links(struct dc *dc) 146 { 147 uint32_t i; 148 149 for (i = 0; i < dc->link_count; i++) { 150 if (NULL != dc->links[i]) 151 link_destroy(&dc->links[i]); 152 } 153 } 154 155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 156 { 157 int i; 158 uint32_t count = 0; 159 160 for (i = 0; i < num_links; i++) { 161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 162 links[i]->is_internal_display) 163 count++; 164 } 165 166 return count; 167 } 168 169 static int get_seamless_boot_stream_count(struct dc_state *ctx) 170 { 171 uint8_t i; 172 uint8_t seamless_boot_stream_count = 0; 173 174 for (i = 0; i < ctx->stream_count; i++) 175 if (ctx->streams[i]->apply_seamless_boot_optimization) 176 seamless_boot_stream_count++; 177 178 return seamless_boot_stream_count; 179 } 180 181 static bool create_links( 182 struct dc *dc, 183 uint32_t num_virtual_links) 184 { 185 int i; 186 int connectors_num; 187 struct dc_bios *bios = dc->ctx->dc_bios; 188 189 dc->link_count = 0; 190 191 connectors_num = bios->funcs->get_connectors_number(bios); 192 193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 194 195 if (connectors_num > ENUM_ID_COUNT) { 196 dm_error( 197 "DC: Number of connectors %d exceeds maximum of %d!\n", 198 connectors_num, 199 ENUM_ID_COUNT); 200 return false; 201 } 202 203 dm_output_to_console( 204 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 205 __func__, 206 connectors_num, 207 num_virtual_links); 208 209 for (i = 0; i < connectors_num; i++) { 210 struct link_init_data link_init_params = {0}; 211 struct dc_link *link; 212 213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 214 215 link_init_params.ctx = dc->ctx; 216 /* next BIOS object table connector */ 217 link_init_params.connector_index = i; 218 link_init_params.link_index = dc->link_count; 219 link_init_params.dc = dc; 220 link = link_create(&link_init_params); 221 222 if (link) { 223 dc->links[dc->link_count] = link; 224 link->dc = dc; 225 ++dc->link_count; 226 } 227 } 228 229 DC_LOG_DC("BIOS object table - end"); 230 231 /* Create a link for each usb4 dpia port */ 232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 233 struct link_init_data link_init_params = {0}; 234 struct dc_link *link; 235 236 link_init_params.ctx = dc->ctx; 237 link_init_params.connector_index = i; 238 link_init_params.link_index = dc->link_count; 239 link_init_params.dc = dc; 240 link_init_params.is_dpia_link = true; 241 242 link = link_create(&link_init_params); 243 if (link) { 244 dc->links[dc->link_count] = link; 245 link->dc = dc; 246 ++dc->link_count; 247 } 248 } 249 250 for (i = 0; i < num_virtual_links; i++) { 251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 252 struct encoder_init_data enc_init = {0}; 253 254 if (link == NULL) { 255 BREAK_TO_DEBUGGER(); 256 goto failed_alloc; 257 } 258 259 link->link_index = dc->link_count; 260 dc->links[dc->link_count] = link; 261 dc->link_count++; 262 263 link->ctx = dc->ctx; 264 link->dc = dc; 265 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 266 link->link_id.type = OBJECT_TYPE_CONNECTOR; 267 link->link_id.id = CONNECTOR_ID_VIRTUAL; 268 link->link_id.enum_id = ENUM_ID_1; 269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 270 271 if (!link->link_enc) { 272 BREAK_TO_DEBUGGER(); 273 goto failed_alloc; 274 } 275 276 link->link_status.dpcd_caps = &link->dpcd_caps; 277 278 enc_init.ctx = dc->ctx; 279 enc_init.channel = CHANNEL_ID_UNKNOWN; 280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 281 enc_init.transmitter = TRANSMITTER_UNKNOWN; 282 enc_init.connector = link->link_id; 283 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 285 enc_init.encoder.enum_id = ENUM_ID_1; 286 virtual_link_encoder_construct(link->link_enc, &enc_init); 287 } 288 289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 290 291 return true; 292 293 failed_alloc: 294 return false; 295 } 296 297 /* Create additional DIG link encoder objects if fewer than the platform 298 * supports were created during link construction. This can happen if the 299 * number of physical connectors is less than the number of DIGs. 300 */ 301 static bool create_link_encoders(struct dc *dc) 302 { 303 bool res = true; 304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 306 int i; 307 308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 309 * link encoders and physical display endpoints and does not require 310 * additional link encoder objects. 311 */ 312 if (num_usb4_dpia == 0) 313 return res; 314 315 /* Create as many link encoder objects as the platform supports. DPIA 316 * endpoints can be programmably mapped to any DIG. 317 */ 318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 319 for (i = 0; i < num_dig_link_enc; i++) { 320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 321 322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 324 (enum engine_id)(ENGINE_ID_DIGA + i)); 325 if (link_enc) { 326 dc->res_pool->link_encoders[i] = link_enc; 327 dc->res_pool->dig_link_enc_count++; 328 } else { 329 res = false; 330 } 331 } 332 } 333 } 334 335 return res; 336 } 337 338 /* Destroy any additional DIG link encoder objects created by 339 * create_link_encoders(). 340 * NB: Must only be called after destroy_links(). 341 */ 342 static void destroy_link_encoders(struct dc *dc) 343 { 344 unsigned int num_usb4_dpia; 345 unsigned int num_dig_link_enc; 346 int i; 347 348 if (!dc->res_pool) 349 return; 350 351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 353 354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 355 * link encoders and physical display endpoints and does not require 356 * additional link encoder objects. 357 */ 358 if (num_usb4_dpia == 0) 359 return; 360 361 for (i = 0; i < num_dig_link_enc; i++) { 362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 363 364 if (link_enc) { 365 link_enc->funcs->destroy(&link_enc); 366 dc->res_pool->link_encoders[i] = NULL; 367 dc->res_pool->dig_link_enc_count--; 368 } 369 } 370 } 371 372 static struct dc_perf_trace *dc_perf_trace_create(void) 373 { 374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 375 } 376 377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 378 { 379 kfree(*perf_trace); 380 *perf_trace = NULL; 381 } 382 383 /** 384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 385 * @dc: dc reference 386 * @stream: Initial dc stream state 387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 388 * 389 * Looks up the pipe context of dc_stream_state and updates the 390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 391 * Rate, which is a power-saving feature that targets reducing panel 392 * refresh rate while the screen is static 393 * 394 * Return: %true if the pipe context is found and adjusted; 395 * %false if the pipe context is not found. 396 */ 397 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 398 struct dc_stream_state *stream, 399 struct dc_crtc_timing_adjust *adjust) 400 { 401 int i; 402 403 stream->adjust.v_total_max = adjust->v_total_max; 404 stream->adjust.v_total_mid = adjust->v_total_mid; 405 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 406 stream->adjust.v_total_min = adjust->v_total_min; 407 408 for (i = 0; i < MAX_PIPES; i++) { 409 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 410 411 if (pipe->stream == stream && pipe->stream_res.tg) { 412 dc->hwss.set_drr(&pipe, 413 1, 414 *adjust); 415 416 return true; 417 } 418 } 419 return false; 420 } 421 422 /** 423 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 424 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 425 * 426 * @dc: [in] dc reference 427 * @stream: [in] Initial dc stream state 428 * @refresh_rate: [in] new refresh_rate 429 * 430 * Return: %true if the pipe context is found and there is an associated 431 * timing_generator for the DC; 432 * %false if the pipe context is not found or there is no 433 * timing_generator for the DC. 434 */ 435 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 436 struct dc_stream_state *stream, 437 uint32_t *refresh_rate) 438 { 439 bool status = false; 440 441 int i = 0; 442 443 for (i = 0; i < MAX_PIPES; i++) { 444 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 445 446 if (pipe->stream == stream && pipe->stream_res.tg) { 447 /* Only execute if a function pointer has been defined for 448 * the DC version in question 449 */ 450 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 451 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 452 453 status = true; 454 455 break; 456 } 457 } 458 } 459 460 return status; 461 } 462 463 bool dc_stream_get_crtc_position(struct dc *dc, 464 struct dc_stream_state **streams, int num_streams, 465 unsigned int *v_pos, unsigned int *nom_v_pos) 466 { 467 /* TODO: Support multiple streams */ 468 const struct dc_stream_state *stream = streams[0]; 469 int i; 470 bool ret = false; 471 struct crtc_position position; 472 473 for (i = 0; i < MAX_PIPES; i++) { 474 struct pipe_ctx *pipe = 475 &dc->current_state->res_ctx.pipe_ctx[i]; 476 477 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 478 dc->hwss.get_position(&pipe, 1, &position); 479 480 *v_pos = position.vertical_count; 481 *nom_v_pos = position.nominal_vcount; 482 ret = true; 483 } 484 } 485 return ret; 486 } 487 488 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 489 static inline void 490 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 491 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 492 { 493 union dmub_rb_cmd cmd = {0}; 494 495 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 496 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 497 498 if (is_stop) { 499 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 500 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 501 } else { 502 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 503 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 504 cmd.secure_display.roi_info.x_start = rect->x; 505 cmd.secure_display.roi_info.y_start = rect->y; 506 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 507 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 508 } 509 510 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 511 dc_dmub_srv_cmd_execute(dmub_srv); 512 } 513 514 static inline void 515 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 516 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 517 { 518 if (is_stop) 519 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 520 else 521 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 522 } 523 524 bool 525 dc_stream_forward_crc_window(struct dc_stream_state *stream, 526 struct rect *rect, bool is_stop) 527 { 528 struct dmcu *dmcu; 529 struct dc_dmub_srv *dmub_srv; 530 struct otg_phy_mux mux_mapping; 531 struct pipe_ctx *pipe; 532 int i; 533 struct dc *dc = stream->ctx->dc; 534 535 for (i = 0; i < MAX_PIPES; i++) { 536 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 537 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 538 break; 539 } 540 541 /* Stream not found */ 542 if (i == MAX_PIPES) 543 return false; 544 545 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 546 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 547 548 dmcu = dc->res_pool->dmcu; 549 dmub_srv = dc->ctx->dmub_srv; 550 551 /* forward to dmub */ 552 if (dmub_srv) 553 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 554 /* forward to dmcu */ 555 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 556 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 557 else 558 return false; 559 560 return true; 561 } 562 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 563 564 /** 565 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 566 * @dc: DC Object 567 * @stream: The stream to configure CRC on. 568 * @enable: Enable CRC if true, disable otherwise. 569 * @crc_window: CRC window (x/y start/end) information 570 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 571 * once. 572 * 573 * By default, only CRC0 is configured, and the entire frame is used to 574 * calculate the CRC. 575 * 576 * Return: %false if the stream is not found or CRC capture is not supported; 577 * %true if the stream has been configured. 578 */ 579 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 580 struct crc_params *crc_window, bool enable, bool continuous) 581 { 582 int i; 583 struct pipe_ctx *pipe; 584 struct crc_params param; 585 struct timing_generator *tg; 586 587 for (i = 0; i < MAX_PIPES; i++) { 588 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 589 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 590 break; 591 } 592 /* Stream not found */ 593 if (i == MAX_PIPES) 594 return false; 595 596 /* By default, capture the full frame */ 597 param.windowa_x_start = 0; 598 param.windowa_y_start = 0; 599 param.windowa_x_end = pipe->stream->timing.h_addressable; 600 param.windowa_y_end = pipe->stream->timing.v_addressable; 601 param.windowb_x_start = 0; 602 param.windowb_y_start = 0; 603 param.windowb_x_end = pipe->stream->timing.h_addressable; 604 param.windowb_y_end = pipe->stream->timing.v_addressable; 605 606 if (crc_window) { 607 param.windowa_x_start = crc_window->windowa_x_start; 608 param.windowa_y_start = crc_window->windowa_y_start; 609 param.windowa_x_end = crc_window->windowa_x_end; 610 param.windowa_y_end = crc_window->windowa_y_end; 611 param.windowb_x_start = crc_window->windowb_x_start; 612 param.windowb_y_start = crc_window->windowb_y_start; 613 param.windowb_x_end = crc_window->windowb_x_end; 614 param.windowb_y_end = crc_window->windowb_y_end; 615 } 616 617 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 618 param.odm_mode = pipe->next_odm_pipe ? 1:0; 619 620 /* Default to the union of both windows */ 621 param.selection = UNION_WINDOW_A_B; 622 param.continuous_mode = continuous; 623 param.enable = enable; 624 625 tg = pipe->stream_res.tg; 626 627 /* Only call if supported */ 628 if (tg->funcs->configure_crc) 629 return tg->funcs->configure_crc(tg, ¶m); 630 DC_LOG_WARNING("CRC capture not supported."); 631 return false; 632 } 633 634 /** 635 * dc_stream_get_crc() - Get CRC values for the given stream. 636 * 637 * @dc: DC object. 638 * @stream: The DC stream state of the stream to get CRCs from. 639 * @r_cr: CRC value for the red component. 640 * @g_y: CRC value for the green component. 641 * @b_cb: CRC value for the blue component. 642 * 643 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 644 * 645 * Return: 646 * %false if stream is not found, or if CRCs are not enabled. 647 */ 648 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 649 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 650 { 651 int i; 652 struct pipe_ctx *pipe; 653 struct timing_generator *tg; 654 655 for (i = 0; i < MAX_PIPES; i++) { 656 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 657 if (pipe->stream == stream) 658 break; 659 } 660 /* Stream not found */ 661 if (i == MAX_PIPES) 662 return false; 663 664 tg = pipe->stream_res.tg; 665 666 if (tg->funcs->get_crc) 667 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 668 DC_LOG_WARNING("CRC capture not supported."); 669 return false; 670 } 671 672 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 673 enum dc_dynamic_expansion option) 674 { 675 /* OPP FMT dyn expansion updates*/ 676 int i; 677 struct pipe_ctx *pipe_ctx; 678 679 for (i = 0; i < MAX_PIPES; i++) { 680 if (dc->current_state->res_ctx.pipe_ctx[i].stream 681 == stream) { 682 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 683 pipe_ctx->stream_res.opp->dyn_expansion = option; 684 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 685 pipe_ctx->stream_res.opp, 686 COLOR_SPACE_YCBCR601, 687 stream->timing.display_color_depth, 688 stream->signal); 689 } 690 } 691 } 692 693 void dc_stream_set_dither_option(struct dc_stream_state *stream, 694 enum dc_dither_option option) 695 { 696 struct bit_depth_reduction_params params; 697 struct dc_link *link = stream->link; 698 struct pipe_ctx *pipes = NULL; 699 int i; 700 701 for (i = 0; i < MAX_PIPES; i++) { 702 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 703 stream) { 704 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 705 break; 706 } 707 } 708 709 if (!pipes) 710 return; 711 if (option > DITHER_OPTION_MAX) 712 return; 713 714 stream->dither_option = option; 715 716 memset(¶ms, 0, sizeof(params)); 717 resource_build_bit_depth_reduction_params(stream, ¶ms); 718 stream->bit_depth_params = params; 719 720 if (pipes->plane_res.xfm && 721 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 722 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 723 pipes->plane_res.xfm, 724 pipes->plane_res.scl_data.lb_params.depth, 725 &stream->bit_depth_params); 726 } 727 728 pipes->stream_res.opp->funcs-> 729 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 730 } 731 732 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 733 { 734 int i; 735 bool ret = false; 736 struct pipe_ctx *pipes; 737 738 for (i = 0; i < MAX_PIPES; i++) { 739 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 740 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 741 dc->hwss.program_gamut_remap(pipes); 742 ret = true; 743 } 744 } 745 746 return ret; 747 } 748 749 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 750 { 751 int i; 752 bool ret = false; 753 struct pipe_ctx *pipes; 754 755 for (i = 0; i < MAX_PIPES; i++) { 756 if (dc->current_state->res_ctx.pipe_ctx[i].stream 757 == stream) { 758 759 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 760 dc->hwss.program_output_csc(dc, 761 pipes, 762 stream->output_color_space, 763 stream->csc_color_matrix.matrix, 764 pipes->stream_res.opp->inst); 765 ret = true; 766 } 767 } 768 769 return ret; 770 } 771 772 void dc_stream_set_static_screen_params(struct dc *dc, 773 struct dc_stream_state **streams, 774 int num_streams, 775 const struct dc_static_screen_params *params) 776 { 777 int i, j; 778 struct pipe_ctx *pipes_affected[MAX_PIPES]; 779 int num_pipes_affected = 0; 780 781 for (i = 0; i < num_streams; i++) { 782 struct dc_stream_state *stream = streams[i]; 783 784 for (j = 0; j < MAX_PIPES; j++) { 785 if (dc->current_state->res_ctx.pipe_ctx[j].stream 786 == stream) { 787 pipes_affected[num_pipes_affected++] = 788 &dc->current_state->res_ctx.pipe_ctx[j]; 789 } 790 } 791 } 792 793 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 794 } 795 796 static void dc_destruct(struct dc *dc) 797 { 798 // reset link encoder assignment table on destruct 799 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 800 link_enc_cfg_init(dc, dc->current_state); 801 802 if (dc->current_state) { 803 dc_release_state(dc->current_state); 804 dc->current_state = NULL; 805 } 806 807 destroy_links(dc); 808 809 destroy_link_encoders(dc); 810 811 if (dc->clk_mgr) { 812 dc_destroy_clk_mgr(dc->clk_mgr); 813 dc->clk_mgr = NULL; 814 } 815 816 dc_destroy_resource_pool(dc); 817 818 if (dc->ctx->gpio_service) 819 dal_gpio_service_destroy(&dc->ctx->gpio_service); 820 821 if (dc->ctx->created_bios) 822 dal_bios_parser_destroy(&dc->ctx->dc_bios); 823 824 dc_perf_trace_destroy(&dc->ctx->perf_trace); 825 826 kfree(dc->ctx); 827 dc->ctx = NULL; 828 829 kfree(dc->bw_vbios); 830 dc->bw_vbios = NULL; 831 832 kfree(dc->bw_dceip); 833 dc->bw_dceip = NULL; 834 835 kfree(dc->dcn_soc); 836 dc->dcn_soc = NULL; 837 838 kfree(dc->dcn_ip); 839 dc->dcn_ip = NULL; 840 841 kfree(dc->vm_helper); 842 dc->vm_helper = NULL; 843 844 } 845 846 static bool dc_construct_ctx(struct dc *dc, 847 const struct dc_init_data *init_params) 848 { 849 struct dc_context *dc_ctx; 850 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 851 852 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 853 if (!dc_ctx) 854 return false; 855 856 dc_ctx->cgs_device = init_params->cgs_device; 857 dc_ctx->driver_context = init_params->driver; 858 dc_ctx->dc = dc; 859 dc_ctx->asic_id = init_params->asic_id; 860 dc_ctx->dc_sink_id_count = 0; 861 dc_ctx->dc_stream_id_count = 0; 862 dc_ctx->dce_environment = init_params->dce_environment; 863 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 864 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 865 866 /* Create logger */ 867 868 dc_version = resource_parse_asic_id(init_params->asic_id); 869 dc_ctx->dce_version = dc_version; 870 871 dc_ctx->perf_trace = dc_perf_trace_create(); 872 if (!dc_ctx->perf_trace) { 873 kfree(dc_ctx); 874 ASSERT_CRITICAL(false); 875 return false; 876 } 877 878 dc->ctx = dc_ctx; 879 880 return true; 881 } 882 883 static bool dc_construct(struct dc *dc, 884 const struct dc_init_data *init_params) 885 { 886 struct dc_context *dc_ctx; 887 struct bw_calcs_dceip *dc_dceip; 888 struct bw_calcs_vbios *dc_vbios; 889 struct dcn_soc_bounding_box *dcn_soc; 890 struct dcn_ip_params *dcn_ip; 891 892 dc->config = init_params->flags; 893 894 // Allocate memory for the vm_helper 895 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 896 if (!dc->vm_helper) { 897 dm_error("%s: failed to create dc->vm_helper\n", __func__); 898 goto fail; 899 } 900 901 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 902 903 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 904 if (!dc_dceip) { 905 dm_error("%s: failed to create dceip\n", __func__); 906 goto fail; 907 } 908 909 dc->bw_dceip = dc_dceip; 910 911 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 912 if (!dc_vbios) { 913 dm_error("%s: failed to create vbios\n", __func__); 914 goto fail; 915 } 916 917 dc->bw_vbios = dc_vbios; 918 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 919 if (!dcn_soc) { 920 dm_error("%s: failed to create dcn_soc\n", __func__); 921 goto fail; 922 } 923 924 dc->dcn_soc = dcn_soc; 925 926 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 927 if (!dcn_ip) { 928 dm_error("%s: failed to create dcn_ip\n", __func__); 929 goto fail; 930 } 931 932 dc->dcn_ip = dcn_ip; 933 934 if (!dc_construct_ctx(dc, init_params)) { 935 dm_error("%s: failed to create ctx\n", __func__); 936 goto fail; 937 } 938 939 dc_ctx = dc->ctx; 940 941 /* Resource should construct all asic specific resources. 942 * This should be the only place where we need to parse the asic id 943 */ 944 if (init_params->vbios_override) 945 dc_ctx->dc_bios = init_params->vbios_override; 946 else { 947 /* Create BIOS parser */ 948 struct bp_init_data bp_init_data; 949 950 bp_init_data.ctx = dc_ctx; 951 bp_init_data.bios = init_params->asic_id.atombios_base_address; 952 953 dc_ctx->dc_bios = dal_bios_parser_create( 954 &bp_init_data, dc_ctx->dce_version); 955 956 if (!dc_ctx->dc_bios) { 957 ASSERT_CRITICAL(false); 958 goto fail; 959 } 960 961 dc_ctx->created_bios = true; 962 } 963 964 dc->vendor_signature = init_params->vendor_signature; 965 966 /* Create GPIO service */ 967 dc_ctx->gpio_service = dal_gpio_service_create( 968 dc_ctx->dce_version, 969 dc_ctx->dce_environment, 970 dc_ctx); 971 972 if (!dc_ctx->gpio_service) { 973 ASSERT_CRITICAL(false); 974 goto fail; 975 } 976 977 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 978 if (!dc->res_pool) 979 goto fail; 980 981 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 982 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 983 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 984 985 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 986 if (!dc->clk_mgr) 987 goto fail; 988 #ifdef CONFIG_DRM_AMD_DC_DCN 989 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 990 991 if (dc->res_pool->funcs->update_bw_bounding_box) { 992 DC_FP_START(); 993 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 994 DC_FP_END(); 995 } 996 #endif 997 998 /* Creation of current_state must occur after dc->dml 999 * is initialized in dc_create_resource_pool because 1000 * on creation it copies the contents of dc->dml 1001 */ 1002 1003 dc->current_state = dc_create_state(dc); 1004 1005 if (!dc->current_state) { 1006 dm_error("%s: failed to create validate ctx\n", __func__); 1007 goto fail; 1008 } 1009 1010 if (!create_links(dc, init_params->num_virtual_links)) 1011 goto fail; 1012 1013 /* Create additional DIG link encoder objects if fewer than the platform 1014 * supports were created during link construction. 1015 */ 1016 if (!create_link_encoders(dc)) 1017 goto fail; 1018 1019 dc_resource_state_construct(dc, dc->current_state); 1020 1021 return true; 1022 1023 fail: 1024 return false; 1025 } 1026 1027 static void disable_all_writeback_pipes_for_stream( 1028 const struct dc *dc, 1029 struct dc_stream_state *stream, 1030 struct dc_state *context) 1031 { 1032 int i; 1033 1034 for (i = 0; i < stream->num_wb_info; i++) 1035 stream->writeback_info[i].wb_enabled = false; 1036 } 1037 1038 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, 1039 struct dc_stream_state *stream, bool lock) 1040 { 1041 int i; 1042 1043 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1044 if (dc->hwss.interdependent_update_lock) 1045 dc->hwss.interdependent_update_lock(dc, context, lock); 1046 else { 1047 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1048 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1049 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1050 1051 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1052 if (stream == pipe_ctx->stream) { 1053 if (!pipe_ctx->top_pipe && 1054 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1055 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1056 } 1057 } 1058 } 1059 } 1060 1061 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1062 { 1063 int i, j; 1064 struct dc_state *dangling_context = dc_create_state(dc); 1065 struct dc_state *current_ctx; 1066 struct pipe_ctx *pipe; 1067 struct timing_generator *tg; 1068 1069 if (dangling_context == NULL) 1070 return; 1071 1072 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1073 1074 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1075 struct dc_stream_state *old_stream = 1076 dc->current_state->res_ctx.pipe_ctx[i].stream; 1077 bool should_disable = true; 1078 bool pipe_split_change = false; 1079 1080 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1081 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1082 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1083 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1084 else 1085 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1086 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1087 1088 for (j = 0; j < context->stream_count; j++) { 1089 if (old_stream == context->streams[j]) { 1090 should_disable = false; 1091 break; 1092 } 1093 } 1094 if (!should_disable && pipe_split_change && 1095 dc->current_state->stream_count != context->stream_count) 1096 should_disable = true; 1097 1098 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1099 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1100 struct pipe_ctx *old_pipe, *new_pipe; 1101 1102 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1103 new_pipe = &context->res_ctx.pipe_ctx[i]; 1104 1105 if (old_pipe->plane_state && !new_pipe->plane_state) 1106 should_disable = true; 1107 } 1108 1109 if (should_disable && old_stream) { 1110 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1111 tg = pipe->stream_res.tg; 1112 /* When disabling plane for a phantom pipe, we must turn on the 1113 * phantom OTG so the disable programming gets the double buffer 1114 * update. Otherwise the pipe will be left in a partially disabled 1115 * state that can result in underflow or hang when enabling it 1116 * again for different use. 1117 */ 1118 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1119 if (tg->funcs->enable_crtc) 1120 tg->funcs->enable_crtc(tg); 1121 } 1122 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1123 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1124 1125 if (dc->hwss.apply_ctx_for_surface) { 1126 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1127 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1128 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1129 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1130 } 1131 if (dc->hwss.program_front_end_for_ctx) { 1132 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1133 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1134 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1135 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1136 } 1137 /* We need to put the phantom OTG back into it's default (disabled) state or we 1138 * can get corruption when transition from one SubVP config to a different one. 1139 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1140 * will still get it's double buffer update. 1141 */ 1142 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1143 if (tg->funcs->disable_phantom_crtc) 1144 tg->funcs->disable_phantom_crtc(tg); 1145 } 1146 } 1147 } 1148 1149 current_ctx = dc->current_state; 1150 dc->current_state = dangling_context; 1151 dc_release_state(current_ctx); 1152 } 1153 1154 static void disable_vbios_mode_if_required( 1155 struct dc *dc, 1156 struct dc_state *context) 1157 { 1158 unsigned int i, j; 1159 1160 /* check if timing_changed, disable stream*/ 1161 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1162 struct dc_stream_state *stream = NULL; 1163 struct dc_link *link = NULL; 1164 struct pipe_ctx *pipe = NULL; 1165 1166 pipe = &context->res_ctx.pipe_ctx[i]; 1167 stream = pipe->stream; 1168 if (stream == NULL) 1169 continue; 1170 1171 // only looking for first odm pipe 1172 if (pipe->prev_odm_pipe) 1173 continue; 1174 1175 if (stream->link->local_sink && 1176 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1177 link = stream->link; 1178 } 1179 1180 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1181 unsigned int enc_inst, tg_inst = 0; 1182 unsigned int pix_clk_100hz; 1183 1184 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1185 if (enc_inst != ENGINE_ID_UNKNOWN) { 1186 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1187 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1188 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1189 dc->res_pool->stream_enc[j]); 1190 break; 1191 } 1192 } 1193 1194 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1195 dc->res_pool->dp_clock_source, 1196 tg_inst, &pix_clk_100hz); 1197 1198 if (link->link_status.link_active) { 1199 uint32_t requested_pix_clk_100hz = 1200 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1201 1202 if (pix_clk_100hz != requested_pix_clk_100hz) { 1203 core_link_disable_stream(pipe); 1204 pipe->stream->dpms_off = false; 1205 } 1206 } 1207 } 1208 } 1209 } 1210 } 1211 1212 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1213 { 1214 int i; 1215 PERF_TRACE(); 1216 for (i = 0; i < MAX_PIPES; i++) { 1217 int count = 0; 1218 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1219 1220 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1221 continue; 1222 1223 /* Timeout 100 ms */ 1224 while (count < 100000) { 1225 /* Must set to false to start with, due to OR in update function */ 1226 pipe->plane_state->status.is_flip_pending = false; 1227 dc->hwss.update_pending_status(pipe); 1228 if (!pipe->plane_state->status.is_flip_pending) 1229 break; 1230 udelay(1); 1231 count++; 1232 } 1233 ASSERT(!pipe->plane_state->status.is_flip_pending); 1234 } 1235 PERF_TRACE(); 1236 } 1237 1238 /* Public functions */ 1239 1240 struct dc *dc_create(const struct dc_init_data *init_params) 1241 { 1242 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1243 unsigned int full_pipe_count; 1244 1245 if (!dc) 1246 return NULL; 1247 1248 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1249 if (!dc_construct_ctx(dc, init_params)) 1250 goto destruct_dc; 1251 } else { 1252 if (!dc_construct(dc, init_params)) 1253 goto destruct_dc; 1254 1255 full_pipe_count = dc->res_pool->pipe_count; 1256 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1257 full_pipe_count--; 1258 dc->caps.max_streams = min( 1259 full_pipe_count, 1260 dc->res_pool->stream_enc_count); 1261 1262 dc->caps.max_links = dc->link_count; 1263 dc->caps.max_audios = dc->res_pool->audio_count; 1264 dc->caps.linear_pitch_alignment = 64; 1265 1266 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1267 1268 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1269 1270 if (dc->res_pool->dmcu != NULL) 1271 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1272 } 1273 1274 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1275 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1276 1277 /* Populate versioning information */ 1278 dc->versions.dc_ver = DC_VER; 1279 1280 dc->build_id = DC_BUILD_ID; 1281 1282 DC_LOG_DC("Display Core initialized\n"); 1283 1284 1285 1286 return dc; 1287 1288 destruct_dc: 1289 dc_destruct(dc); 1290 kfree(dc); 1291 return NULL; 1292 } 1293 1294 static void detect_edp_presence(struct dc *dc) 1295 { 1296 struct dc_link *edp_links[MAX_NUM_EDP]; 1297 struct dc_link *edp_link = NULL; 1298 enum dc_connection_type type; 1299 int i; 1300 int edp_num; 1301 1302 get_edp_links(dc, edp_links, &edp_num); 1303 if (!edp_num) 1304 return; 1305 1306 for (i = 0; i < edp_num; i++) { 1307 edp_link = edp_links[i]; 1308 if (dc->config.edp_not_connected) { 1309 edp_link->edp_sink_present = false; 1310 } else { 1311 dc_link_detect_sink(edp_link, &type); 1312 edp_link->edp_sink_present = (type != dc_connection_none); 1313 } 1314 } 1315 } 1316 1317 void dc_hardware_init(struct dc *dc) 1318 { 1319 1320 detect_edp_presence(dc); 1321 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1322 dc->hwss.init_hw(dc); 1323 } 1324 1325 void dc_init_callbacks(struct dc *dc, 1326 const struct dc_callback_init *init_params) 1327 { 1328 #ifdef CONFIG_DRM_AMD_DC_HDCP 1329 dc->ctx->cp_psp = init_params->cp_psp; 1330 #endif 1331 } 1332 1333 void dc_deinit_callbacks(struct dc *dc) 1334 { 1335 #ifdef CONFIG_DRM_AMD_DC_HDCP 1336 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1337 #endif 1338 } 1339 1340 void dc_destroy(struct dc **dc) 1341 { 1342 dc_destruct(*dc); 1343 kfree(*dc); 1344 *dc = NULL; 1345 } 1346 1347 static void enable_timing_multisync( 1348 struct dc *dc, 1349 struct dc_state *ctx) 1350 { 1351 int i, multisync_count = 0; 1352 int pipe_count = dc->res_pool->pipe_count; 1353 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1354 1355 for (i = 0; i < pipe_count; i++) { 1356 if (!ctx->res_ctx.pipe_ctx[i].stream || 1357 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1358 continue; 1359 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1360 continue; 1361 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1362 multisync_count++; 1363 } 1364 1365 if (multisync_count > 0) { 1366 dc->hwss.enable_per_frame_crtc_position_reset( 1367 dc, multisync_count, multisync_pipes); 1368 } 1369 } 1370 1371 static void program_timing_sync( 1372 struct dc *dc, 1373 struct dc_state *ctx) 1374 { 1375 int i, j, k; 1376 int group_index = 0; 1377 int num_group = 0; 1378 int pipe_count = dc->res_pool->pipe_count; 1379 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1380 1381 for (i = 0; i < pipe_count; i++) { 1382 if (!ctx->res_ctx.pipe_ctx[i].stream 1383 || ctx->res_ctx.pipe_ctx[i].top_pipe 1384 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1385 continue; 1386 1387 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1388 } 1389 1390 for (i = 0; i < pipe_count; i++) { 1391 int group_size = 1; 1392 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1393 struct pipe_ctx *pipe_set[MAX_PIPES]; 1394 1395 if (!unsynced_pipes[i]) 1396 continue; 1397 1398 pipe_set[0] = unsynced_pipes[i]; 1399 unsynced_pipes[i] = NULL; 1400 1401 /* Add tg to the set, search rest of the tg's for ones with 1402 * same timing, add all tgs with same timing to the group 1403 */ 1404 for (j = i + 1; j < pipe_count; j++) { 1405 if (!unsynced_pipes[j]) 1406 continue; 1407 if (sync_type != TIMING_SYNCHRONIZABLE && 1408 dc->hwss.enable_vblanks_synchronization && 1409 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1410 resource_are_vblanks_synchronizable( 1411 unsynced_pipes[j]->stream, 1412 pipe_set[0]->stream)) { 1413 sync_type = VBLANK_SYNCHRONIZABLE; 1414 pipe_set[group_size] = unsynced_pipes[j]; 1415 unsynced_pipes[j] = NULL; 1416 group_size++; 1417 } else 1418 if (sync_type != VBLANK_SYNCHRONIZABLE && 1419 resource_are_streams_timing_synchronizable( 1420 unsynced_pipes[j]->stream, 1421 pipe_set[0]->stream)) { 1422 sync_type = TIMING_SYNCHRONIZABLE; 1423 pipe_set[group_size] = unsynced_pipes[j]; 1424 unsynced_pipes[j] = NULL; 1425 group_size++; 1426 } 1427 } 1428 1429 /* set first unblanked pipe as master */ 1430 for (j = 0; j < group_size; j++) { 1431 bool is_blanked; 1432 1433 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1434 is_blanked = 1435 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1436 else 1437 is_blanked = 1438 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1439 if (!is_blanked) { 1440 if (j == 0) 1441 break; 1442 1443 swap(pipe_set[0], pipe_set[j]); 1444 break; 1445 } 1446 } 1447 1448 for (k = 0; k < group_size; k++) { 1449 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1450 1451 status->timing_sync_info.group_id = num_group; 1452 status->timing_sync_info.group_size = group_size; 1453 if (k == 0) 1454 status->timing_sync_info.master = true; 1455 else 1456 status->timing_sync_info.master = false; 1457 1458 } 1459 1460 /* remove any other pipes that are already been synced */ 1461 if (dc->config.use_pipe_ctx_sync_logic) { 1462 /* check pipe's syncd to decide which pipe to be removed */ 1463 for (j = 1; j < group_size; j++) { 1464 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1465 group_size--; 1466 pipe_set[j] = pipe_set[group_size]; 1467 j--; 1468 } else 1469 /* link slave pipe's syncd with master pipe */ 1470 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1471 } 1472 } else { 1473 for (j = j + 1; j < group_size; j++) { 1474 bool is_blanked; 1475 1476 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1477 is_blanked = 1478 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1479 else 1480 is_blanked = 1481 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1482 if (!is_blanked) { 1483 group_size--; 1484 pipe_set[j] = pipe_set[group_size]; 1485 j--; 1486 } 1487 } 1488 } 1489 1490 if (group_size > 1) { 1491 if (sync_type == TIMING_SYNCHRONIZABLE) { 1492 dc->hwss.enable_timing_synchronization( 1493 dc, group_index, group_size, pipe_set); 1494 } else 1495 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1496 dc->hwss.enable_vblanks_synchronization( 1497 dc, group_index, group_size, pipe_set); 1498 } 1499 group_index++; 1500 } 1501 num_group++; 1502 } 1503 } 1504 1505 static bool streams_changed(struct dc *dc, 1506 struct dc_stream_state *streams[], 1507 uint8_t stream_count) 1508 { 1509 uint8_t i; 1510 1511 if (stream_count != dc->current_state->stream_count) 1512 return true; 1513 1514 for (i = 0; i < dc->current_state->stream_count; i++) { 1515 if (dc->current_state->streams[i] != streams[i]) 1516 return true; 1517 if (!streams[i]->link->link_state_valid) 1518 return true; 1519 } 1520 1521 return false; 1522 } 1523 1524 bool dc_validate_boot_timing(const struct dc *dc, 1525 const struct dc_sink *sink, 1526 struct dc_crtc_timing *crtc_timing) 1527 { 1528 struct timing_generator *tg; 1529 struct stream_encoder *se = NULL; 1530 1531 struct dc_crtc_timing hw_crtc_timing = {0}; 1532 1533 struct dc_link *link = sink->link; 1534 unsigned int i, enc_inst, tg_inst = 0; 1535 1536 /* Support seamless boot on EDP displays only */ 1537 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1538 return false; 1539 } 1540 1541 /* Check for enabled DIG to identify enabled display */ 1542 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1543 return false; 1544 1545 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1546 1547 if (enc_inst == ENGINE_ID_UNKNOWN) 1548 return false; 1549 1550 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1551 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1552 1553 se = dc->res_pool->stream_enc[i]; 1554 1555 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1556 dc->res_pool->stream_enc[i]); 1557 break; 1558 } 1559 } 1560 1561 // tg_inst not found 1562 if (i == dc->res_pool->stream_enc_count) 1563 return false; 1564 1565 if (tg_inst >= dc->res_pool->timing_generator_count) 1566 return false; 1567 1568 if (tg_inst != link->link_enc->preferred_engine) 1569 return false; 1570 1571 tg = dc->res_pool->timing_generators[tg_inst]; 1572 1573 if (!tg->funcs->get_hw_timing) 1574 return false; 1575 1576 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1577 return false; 1578 1579 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1580 return false; 1581 1582 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1583 return false; 1584 1585 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1586 return false; 1587 1588 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1589 return false; 1590 1591 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1592 return false; 1593 1594 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1595 return false; 1596 1597 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1598 return false; 1599 1600 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1601 return false; 1602 1603 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1604 return false; 1605 1606 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1607 return false; 1608 1609 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1610 return false; 1611 1612 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1613 return false; 1614 1615 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1616 if (crtc_timing->flags.DSC) 1617 return false; 1618 1619 if (dc_is_dp_signal(link->connector_signal)) { 1620 unsigned int pix_clk_100hz; 1621 uint32_t numOdmPipes = 1; 1622 uint32_t id_src[4] = {0}; 1623 1624 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1625 dc->res_pool->dp_clock_source, 1626 tg_inst, &pix_clk_100hz); 1627 1628 if (tg->funcs->get_optc_source) 1629 tg->funcs->get_optc_source(tg, 1630 &numOdmPipes, &id_src[0], &id_src[1]); 1631 1632 if (numOdmPipes == 2) 1633 pix_clk_100hz *= 2; 1634 if (numOdmPipes == 4) 1635 pix_clk_100hz *= 4; 1636 1637 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1638 // slightly due to rounding issues in 10 kHz units. 1639 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1640 return false; 1641 1642 if (!se->funcs->dp_get_pixel_format) 1643 return false; 1644 1645 if (!se->funcs->dp_get_pixel_format( 1646 se, 1647 &hw_crtc_timing.pixel_encoding, 1648 &hw_crtc_timing.display_color_depth)) 1649 return false; 1650 1651 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1652 return false; 1653 1654 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1655 return false; 1656 } 1657 1658 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1659 return false; 1660 } 1661 1662 if (is_edp_ilr_optimization_required(link, crtc_timing)) { 1663 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1664 return false; 1665 } 1666 1667 return true; 1668 } 1669 1670 static inline bool should_update_pipe_for_stream( 1671 struct dc_state *context, 1672 struct pipe_ctx *pipe_ctx, 1673 struct dc_stream_state *stream) 1674 { 1675 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1676 } 1677 1678 static inline bool should_update_pipe_for_plane( 1679 struct dc_state *context, 1680 struct pipe_ctx *pipe_ctx, 1681 struct dc_plane_state *plane_state) 1682 { 1683 return (pipe_ctx->plane_state == plane_state); 1684 } 1685 1686 void dc_enable_stereo( 1687 struct dc *dc, 1688 struct dc_state *context, 1689 struct dc_stream_state *streams[], 1690 uint8_t stream_count) 1691 { 1692 int i, j; 1693 struct pipe_ctx *pipe; 1694 1695 for (i = 0; i < MAX_PIPES; i++) { 1696 if (context != NULL) { 1697 pipe = &context->res_ctx.pipe_ctx[i]; 1698 } else { 1699 context = dc->current_state; 1700 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1701 } 1702 1703 for (j = 0; pipe && j < stream_count; j++) { 1704 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1705 dc->hwss.setup_stereo) 1706 dc->hwss.setup_stereo(pipe, dc); 1707 } 1708 } 1709 } 1710 1711 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1712 { 1713 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1714 enable_timing_multisync(dc, context); 1715 program_timing_sync(dc, context); 1716 } 1717 } 1718 1719 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1720 { 1721 int i; 1722 unsigned int stream_mask = 0; 1723 1724 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1725 if (context->res_ctx.pipe_ctx[i].stream) 1726 stream_mask |= 1 << i; 1727 } 1728 1729 return stream_mask; 1730 } 1731 1732 void dc_z10_restore(const struct dc *dc) 1733 { 1734 if (dc->hwss.z10_restore) 1735 dc->hwss.z10_restore(dc); 1736 } 1737 1738 void dc_z10_save_init(struct dc *dc) 1739 { 1740 if (dc->hwss.z10_save_init) 1741 dc->hwss.z10_save_init(dc); 1742 } 1743 1744 /** 1745 * dc_commit_state_no_check - Apply context to the hardware 1746 * 1747 * @dc: DC object with the current status to be updated 1748 * @context: New state that will become the current status at the end of this function 1749 * 1750 * Applies given context to the hardware and copy it into current context. 1751 * It's up to the user to release the src context afterwards. 1752 * 1753 * Return: an enum dc_status result code for the operation 1754 */ 1755 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1756 { 1757 struct dc_bios *dcb = dc->ctx->dc_bios; 1758 enum dc_status result = DC_ERROR_UNEXPECTED; 1759 struct pipe_ctx *pipe; 1760 int i, k, l; 1761 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1762 struct dc_state *old_state; 1763 bool subvp_prev_use = false; 1764 1765 dc_z10_restore(dc); 1766 dc_allow_idle_optimizations(dc, false); 1767 1768 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1769 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1770 1771 /* Check old context for SubVP */ 1772 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1773 if (subvp_prev_use) 1774 break; 1775 } 1776 1777 for (i = 0; i < context->stream_count; i++) 1778 dc_streams[i] = context->streams[i]; 1779 1780 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1781 disable_vbios_mode_if_required(dc, context); 1782 dc->hwss.enable_accelerated_mode(dc, context); 1783 } 1784 1785 if (context->stream_count > get_seamless_boot_stream_count(context) || 1786 context->stream_count == 0) 1787 dc->hwss.prepare_bandwidth(dc, context); 1788 1789 /* When SubVP is active, all HW programming must be done while 1790 * SubVP lock is acquired 1791 */ 1792 if (dc->hwss.subvp_pipe_control_lock) 1793 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1794 1795 if (dc->debug.enable_double_buffered_dsc_pg_support) 1796 dc->hwss.update_dsc_pg(dc, context, false); 1797 1798 disable_dangling_plane(dc, context); 1799 /* re-program planes for existing stream, in case we need to 1800 * free up plane resource for later use 1801 */ 1802 if (dc->hwss.apply_ctx_for_surface) { 1803 for (i = 0; i < context->stream_count; i++) { 1804 if (context->streams[i]->mode_changed) 1805 continue; 1806 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1807 dc->hwss.apply_ctx_for_surface( 1808 dc, context->streams[i], 1809 context->stream_status[i].plane_count, 1810 context); /* use new pipe config in new context */ 1811 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1812 dc->hwss.post_unlock_program_front_end(dc, context); 1813 } 1814 } 1815 1816 /* Program hardware */ 1817 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1818 pipe = &context->res_ctx.pipe_ctx[i]; 1819 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1820 } 1821 1822 result = dc->hwss.apply_ctx_to_hw(dc, context); 1823 1824 if (result != DC_OK) { 1825 /* Application of dc_state to hardware stopped. */ 1826 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1827 return result; 1828 } 1829 1830 dc_trigger_sync(dc, context); 1831 1832 /* Program all planes within new context*/ 1833 if (dc->hwss.program_front_end_for_ctx) { 1834 dc->hwss.interdependent_update_lock(dc, context, true); 1835 dc->hwss.program_front_end_for_ctx(dc, context); 1836 dc->hwss.interdependent_update_lock(dc, context, false); 1837 dc->hwss.post_unlock_program_front_end(dc, context); 1838 } 1839 1840 if (dc->hwss.commit_subvp_config) 1841 dc->hwss.commit_subvp_config(dc, context); 1842 if (dc->hwss.subvp_pipe_control_lock) 1843 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1844 1845 for (i = 0; i < context->stream_count; i++) { 1846 const struct dc_link *link = context->streams[i]->link; 1847 1848 if (!context->streams[i]->mode_changed) 1849 continue; 1850 1851 if (dc->hwss.apply_ctx_for_surface) { 1852 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1853 dc->hwss.apply_ctx_for_surface( 1854 dc, context->streams[i], 1855 context->stream_status[i].plane_count, 1856 context); 1857 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1858 dc->hwss.post_unlock_program_front_end(dc, context); 1859 } 1860 1861 /* 1862 * enable stereo 1863 * TODO rework dc_enable_stereo call to work with validation sets? 1864 */ 1865 for (k = 0; k < MAX_PIPES; k++) { 1866 pipe = &context->res_ctx.pipe_ctx[k]; 1867 1868 for (l = 0 ; pipe && l < context->stream_count; l++) { 1869 if (context->streams[l] && 1870 context->streams[l] == pipe->stream && 1871 dc->hwss.setup_stereo) 1872 dc->hwss.setup_stereo(pipe, dc); 1873 } 1874 } 1875 1876 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1877 context->streams[i]->timing.h_addressable, 1878 context->streams[i]->timing.v_addressable, 1879 context->streams[i]->timing.h_total, 1880 context->streams[i]->timing.v_total, 1881 context->streams[i]->timing.pix_clk_100hz / 10); 1882 } 1883 1884 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1885 1886 if (context->stream_count > get_seamless_boot_stream_count(context) || 1887 context->stream_count == 0) { 1888 /* Must wait for no flips to be pending before doing optimize bw */ 1889 wait_for_no_pipes_pending(dc, context); 1890 /* pplib is notified if disp_num changed */ 1891 dc->hwss.optimize_bandwidth(dc, context); 1892 } 1893 1894 if (dc->debug.enable_double_buffered_dsc_pg_support) 1895 dc->hwss.update_dsc_pg(dc, context, true); 1896 1897 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1898 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1899 else 1900 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1901 1902 context->stream_mask = get_stream_mask(dc, context); 1903 1904 if (context->stream_mask != dc->current_state->stream_mask) 1905 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1906 1907 for (i = 0; i < context->stream_count; i++) 1908 context->streams[i]->mode_changed = false; 1909 1910 old_state = dc->current_state; 1911 dc->current_state = context; 1912 1913 dc_release_state(old_state); 1914 1915 dc_retain_state(dc->current_state); 1916 1917 return result; 1918 } 1919 1920 /** 1921 * dc_commit_streams - Commit current stream state 1922 * 1923 * @dc: DC object with the commit state to be configured in the hardware 1924 * @streams: Array with a list of stream state 1925 * @stream_count: Total of streams 1926 * 1927 * Function responsible for commit streams change to the hardware. 1928 * 1929 * Return: 1930 * Return DC_OK if everything work as expected, otherwise, return a dc_status 1931 * code. 1932 */ 1933 enum dc_status dc_commit_streams(struct dc *dc, 1934 struct dc_stream_state *streams[], 1935 uint8_t stream_count) 1936 { 1937 int i, j; 1938 struct dc_state *context; 1939 enum dc_status res = DC_OK; 1940 struct dc_validation_set set[MAX_STREAMS] = {0}; 1941 1942 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 1943 return res; 1944 1945 if (!streams_changed(dc, streams, stream_count)) 1946 return res; 1947 1948 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 1949 1950 for (i = 0; i < stream_count; i++) { 1951 struct dc_stream_state *stream = streams[i]; 1952 struct dc_stream_status *status = dc_stream_get_status(stream); 1953 1954 dc_stream_log(dc, stream); 1955 1956 set[i].stream = stream; 1957 1958 if (status) { 1959 set[i].plane_count = status->plane_count; 1960 for (j = 0; j < status->plane_count; j++) 1961 set[i].plane_states[j] = status->plane_states[j]; 1962 } 1963 } 1964 1965 context = dc_create_state(dc); 1966 if (!context) 1967 goto context_alloc_fail; 1968 1969 dc_resource_state_copy_construct_current(dc, context); 1970 1971 res = dc_validate_with_context(dc, set, stream_count, context, false); 1972 if (res != DC_OK) { 1973 BREAK_TO_DEBUGGER(); 1974 goto fail; 1975 } 1976 1977 res = dc_commit_state_no_check(dc, context); 1978 1979 for (i = 0; i < stream_count; i++) { 1980 for (j = 0; j < context->stream_count; j++) { 1981 if (streams[i]->stream_id == context->streams[j]->stream_id) 1982 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 1983 1984 if (dc_is_embedded_signal(streams[i]->signal)) { 1985 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); 1986 1987 if (dc->hwss.is_abm_supported) 1988 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); 1989 else 1990 status->is_abm_supported = true; 1991 } 1992 } 1993 } 1994 1995 fail: 1996 dc_release_state(context); 1997 1998 context_alloc_fail: 1999 2000 DC_LOG_DC("%s Finished.\n", __func__); 2001 2002 return res; 2003 } 2004 2005 /* TODO: When the transition to the new commit sequence is done, remove this 2006 * function in favor of dc_commit_streams. */ 2007 bool dc_commit_state(struct dc *dc, struct dc_state *context) 2008 { 2009 enum dc_status result = DC_ERROR_UNEXPECTED; 2010 int i; 2011 2012 /* TODO: Since change commit sequence can have a huge impact, 2013 * we decided to only enable it for DCN3x. However, as soon as 2014 * we get more confident about this change we'll need to enable 2015 * the new sequence for all ASICs. */ 2016 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 2017 result = dc_commit_streams(dc, context->streams, context->stream_count); 2018 return result == DC_OK; 2019 } 2020 2021 if (!streams_changed(dc, context->streams, context->stream_count)) { 2022 return DC_OK; 2023 } 2024 2025 DC_LOG_DC("%s: %d streams\n", 2026 __func__, context->stream_count); 2027 2028 for (i = 0; i < context->stream_count; i++) { 2029 struct dc_stream_state *stream = context->streams[i]; 2030 2031 dc_stream_log(dc, stream); 2032 } 2033 2034 /* 2035 * Previous validation was perfomred with fast_validation = true and 2036 * the full DML state required for hardware programming was skipped. 2037 * 2038 * Re-validate here to calculate these parameters / watermarks. 2039 */ 2040 result = dc_validate_global_state(dc, context, false); 2041 if (result != DC_OK) { 2042 DC_LOG_ERROR("DC commit global validation failure: %s (%d)", 2043 dc_status_to_str(result), result); 2044 return result; 2045 } 2046 2047 result = dc_commit_state_no_check(dc, context); 2048 2049 return (result == DC_OK); 2050 } 2051 2052 bool dc_acquire_release_mpc_3dlut( 2053 struct dc *dc, bool acquire, 2054 struct dc_stream_state *stream, 2055 struct dc_3dlut **lut, 2056 struct dc_transfer_func **shaper) 2057 { 2058 int pipe_idx; 2059 bool ret = false; 2060 bool found_pipe_idx = false; 2061 const struct resource_pool *pool = dc->res_pool; 2062 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2063 int mpcc_id = 0; 2064 2065 if (pool && res_ctx) { 2066 if (acquire) { 2067 /*find pipe idx for the given stream*/ 2068 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2069 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2070 found_pipe_idx = true; 2071 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2072 break; 2073 } 2074 } 2075 } else 2076 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2077 2078 if (found_pipe_idx) { 2079 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2080 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2081 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2082 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2083 } 2084 } 2085 return ret; 2086 } 2087 2088 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2089 { 2090 int i; 2091 struct pipe_ctx *pipe; 2092 2093 for (i = 0; i < MAX_PIPES; i++) { 2094 pipe = &context->res_ctx.pipe_ctx[i]; 2095 2096 // Don't check flip pending on phantom pipes 2097 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2098 continue; 2099 2100 /* Must set to false to start with, due to OR in update function */ 2101 pipe->plane_state->status.is_flip_pending = false; 2102 dc->hwss.update_pending_status(pipe); 2103 if (pipe->plane_state->status.is_flip_pending) 2104 return true; 2105 } 2106 return false; 2107 } 2108 2109 /* Perform updates here which need to be deferred until next vupdate 2110 * 2111 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2112 * but forcing lut memory to shutdown state is immediate. This causes 2113 * single frame corruption as lut gets disabled mid-frame unless shutdown 2114 * is deferred until after entering bypass. 2115 */ 2116 static void process_deferred_updates(struct dc *dc) 2117 { 2118 int i = 0; 2119 2120 if (dc->debug.enable_mem_low_power.bits.cm) { 2121 ASSERT(dc->dcn_ip->max_num_dpp); 2122 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2123 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2124 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2125 } 2126 } 2127 2128 void dc_post_update_surfaces_to_stream(struct dc *dc) 2129 { 2130 int i; 2131 struct dc_state *context = dc->current_state; 2132 2133 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2134 return; 2135 2136 post_surface_trace(dc); 2137 2138 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 2139 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2140 else 2141 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2142 2143 if (is_flip_pending_in_pipes(dc, context)) 2144 return; 2145 2146 for (i = 0; i < dc->res_pool->pipe_count; i++) 2147 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2148 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2149 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2150 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 2151 } 2152 2153 process_deferred_updates(dc); 2154 2155 dc->hwss.optimize_bandwidth(dc, context); 2156 2157 if (dc->debug.enable_double_buffered_dsc_pg_support) 2158 dc->hwss.update_dsc_pg(dc, context, true); 2159 2160 dc->optimized_required = false; 2161 dc->wm_optimized_required = false; 2162 } 2163 2164 static void init_state(struct dc *dc, struct dc_state *context) 2165 { 2166 /* Each context must have their own instance of VBA and in order to 2167 * initialize and obtain IP and SOC the base DML instance from DC is 2168 * initially copied into every context 2169 */ 2170 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2171 } 2172 2173 struct dc_state *dc_create_state(struct dc *dc) 2174 { 2175 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2176 GFP_KERNEL); 2177 2178 if (!context) 2179 return NULL; 2180 2181 init_state(dc, context); 2182 2183 kref_init(&context->refcount); 2184 2185 return context; 2186 } 2187 2188 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2189 { 2190 int i, j; 2191 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2192 2193 if (!new_ctx) 2194 return NULL; 2195 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2196 2197 for (i = 0; i < MAX_PIPES; i++) { 2198 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2199 2200 if (cur_pipe->top_pipe) 2201 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2202 2203 if (cur_pipe->bottom_pipe) 2204 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2205 2206 if (cur_pipe->prev_odm_pipe) 2207 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2208 2209 if (cur_pipe->next_odm_pipe) 2210 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2211 2212 } 2213 2214 for (i = 0; i < new_ctx->stream_count; i++) { 2215 dc_stream_retain(new_ctx->streams[i]); 2216 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2217 dc_plane_state_retain( 2218 new_ctx->stream_status[i].plane_states[j]); 2219 } 2220 2221 kref_init(&new_ctx->refcount); 2222 2223 return new_ctx; 2224 } 2225 2226 void dc_retain_state(struct dc_state *context) 2227 { 2228 kref_get(&context->refcount); 2229 } 2230 2231 static void dc_state_free(struct kref *kref) 2232 { 2233 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2234 dc_resource_state_destruct(context); 2235 kvfree(context); 2236 } 2237 2238 void dc_release_state(struct dc_state *context) 2239 { 2240 kref_put(&context->refcount, dc_state_free); 2241 } 2242 2243 bool dc_set_generic_gpio_for_stereo(bool enable, 2244 struct gpio_service *gpio_service) 2245 { 2246 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2247 struct gpio_pin_info pin_info; 2248 struct gpio *generic; 2249 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2250 GFP_KERNEL); 2251 2252 if (!config) 2253 return false; 2254 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2255 2256 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2257 kfree(config); 2258 return false; 2259 } else { 2260 generic = dal_gpio_service_create_generic_mux( 2261 gpio_service, 2262 pin_info.offset, 2263 pin_info.mask); 2264 } 2265 2266 if (!generic) { 2267 kfree(config); 2268 return false; 2269 } 2270 2271 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2272 2273 config->enable_output_from_mux = enable; 2274 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2275 2276 if (gpio_result == GPIO_RESULT_OK) 2277 gpio_result = dal_mux_setup_config(generic, config); 2278 2279 if (gpio_result == GPIO_RESULT_OK) { 2280 dal_gpio_close(generic); 2281 dal_gpio_destroy_generic_mux(&generic); 2282 kfree(config); 2283 return true; 2284 } else { 2285 dal_gpio_close(generic); 2286 dal_gpio_destroy_generic_mux(&generic); 2287 kfree(config); 2288 return false; 2289 } 2290 } 2291 2292 static bool is_surface_in_context( 2293 const struct dc_state *context, 2294 const struct dc_plane_state *plane_state) 2295 { 2296 int j; 2297 2298 for (j = 0; j < MAX_PIPES; j++) { 2299 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2300 2301 if (plane_state == pipe_ctx->plane_state) { 2302 return true; 2303 } 2304 } 2305 2306 return false; 2307 } 2308 2309 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2310 { 2311 union surface_update_flags *update_flags = &u->surface->update_flags; 2312 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2313 2314 if (!u->plane_info) 2315 return UPDATE_TYPE_FAST; 2316 2317 if (u->plane_info->color_space != u->surface->color_space) { 2318 update_flags->bits.color_space_change = 1; 2319 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2320 } 2321 2322 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2323 update_flags->bits.horizontal_mirror_change = 1; 2324 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2325 } 2326 2327 if (u->plane_info->rotation != u->surface->rotation) { 2328 update_flags->bits.rotation_change = 1; 2329 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2330 } 2331 2332 if (u->plane_info->format != u->surface->format) { 2333 update_flags->bits.pixel_format_change = 1; 2334 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2335 } 2336 2337 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2338 update_flags->bits.stereo_format_change = 1; 2339 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2340 } 2341 2342 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2343 update_flags->bits.per_pixel_alpha_change = 1; 2344 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2345 } 2346 2347 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2348 update_flags->bits.global_alpha_change = 1; 2349 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2350 } 2351 2352 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2353 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2354 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2355 /* During DCC on/off, stutter period is calculated before 2356 * DCC has fully transitioned. This results in incorrect 2357 * stutter period calculation. Triggering a full update will 2358 * recalculate stutter period. 2359 */ 2360 update_flags->bits.dcc_change = 1; 2361 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2362 } 2363 2364 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2365 resource_pixel_format_to_bpp(u->surface->format)) { 2366 /* different bytes per element will require full bandwidth 2367 * and DML calculation 2368 */ 2369 update_flags->bits.bpp_change = 1; 2370 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2371 } 2372 2373 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2374 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2375 update_flags->bits.plane_size_change = 1; 2376 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2377 } 2378 2379 2380 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2381 sizeof(union dc_tiling_info)) != 0) { 2382 update_flags->bits.swizzle_change = 1; 2383 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2384 2385 /* todo: below are HW dependent, we should add a hook to 2386 * DCE/N resource and validated there. 2387 */ 2388 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2389 /* swizzled mode requires RQ to be setup properly, 2390 * thus need to run DML to calculate RQ settings 2391 */ 2392 update_flags->bits.bandwidth_change = 1; 2393 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2394 } 2395 } 2396 2397 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2398 return update_type; 2399 } 2400 2401 static enum surface_update_type get_scaling_info_update_type( 2402 const struct dc_surface_update *u) 2403 { 2404 union surface_update_flags *update_flags = &u->surface->update_flags; 2405 2406 if (!u->scaling_info) 2407 return UPDATE_TYPE_FAST; 2408 2409 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2410 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2411 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2412 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2413 || u->scaling_info->scaling_quality.integer_scaling != 2414 u->surface->scaling_quality.integer_scaling 2415 ) { 2416 update_flags->bits.scaling_change = 1; 2417 2418 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2419 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2420 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2421 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2422 /* Making dst rect smaller requires a bandwidth change */ 2423 update_flags->bits.bandwidth_change = 1; 2424 } 2425 2426 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2427 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2428 2429 update_flags->bits.scaling_change = 1; 2430 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2431 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2432 /* Making src rect bigger requires a bandwidth change */ 2433 update_flags->bits.clock_change = 1; 2434 } 2435 2436 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2437 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2438 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2439 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2440 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2441 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2442 update_flags->bits.position_change = 1; 2443 2444 if (update_flags->bits.clock_change 2445 || update_flags->bits.bandwidth_change 2446 || update_flags->bits.scaling_change) 2447 return UPDATE_TYPE_FULL; 2448 2449 if (update_flags->bits.position_change) 2450 return UPDATE_TYPE_MED; 2451 2452 return UPDATE_TYPE_FAST; 2453 } 2454 2455 static enum surface_update_type det_surface_update(const struct dc *dc, 2456 const struct dc_surface_update *u) 2457 { 2458 const struct dc_state *context = dc->current_state; 2459 enum surface_update_type type; 2460 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2461 union surface_update_flags *update_flags = &u->surface->update_flags; 2462 2463 if (u->flip_addr) 2464 update_flags->bits.addr_update = 1; 2465 2466 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2467 update_flags->raw = 0xFFFFFFFF; 2468 return UPDATE_TYPE_FULL; 2469 } 2470 2471 update_flags->raw = 0; // Reset all flags 2472 2473 type = get_plane_info_update_type(u); 2474 elevate_update_type(&overall_type, type); 2475 2476 type = get_scaling_info_update_type(u); 2477 elevate_update_type(&overall_type, type); 2478 2479 if (u->flip_addr) { 2480 update_flags->bits.addr_update = 1; 2481 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2482 update_flags->bits.tmz_changed = 1; 2483 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2484 } 2485 } 2486 if (u->in_transfer_func) 2487 update_flags->bits.in_transfer_func_change = 1; 2488 2489 if (u->input_csc_color_matrix) 2490 update_flags->bits.input_csc_change = 1; 2491 2492 if (u->coeff_reduction_factor) 2493 update_flags->bits.coeff_reduction_change = 1; 2494 2495 if (u->gamut_remap_matrix) 2496 update_flags->bits.gamut_remap_change = 1; 2497 2498 if (u->gamma) { 2499 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2500 2501 if (u->plane_info) 2502 format = u->plane_info->format; 2503 else if (u->surface) 2504 format = u->surface->format; 2505 2506 if (dce_use_lut(format)) 2507 update_flags->bits.gamma_change = 1; 2508 } 2509 2510 if (u->lut3d_func || u->func_shaper) 2511 update_flags->bits.lut_3d = 1; 2512 2513 if (u->hdr_mult.value) 2514 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2515 update_flags->bits.hdr_mult = 1; 2516 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2517 } 2518 2519 if (update_flags->bits.in_transfer_func_change) { 2520 type = UPDATE_TYPE_MED; 2521 elevate_update_type(&overall_type, type); 2522 } 2523 2524 if (update_flags->bits.input_csc_change 2525 || update_flags->bits.coeff_reduction_change 2526 || update_flags->bits.lut_3d 2527 || update_flags->bits.gamma_change 2528 || update_flags->bits.gamut_remap_change) { 2529 type = UPDATE_TYPE_FULL; 2530 elevate_update_type(&overall_type, type); 2531 } 2532 2533 return overall_type; 2534 } 2535 2536 static enum surface_update_type check_update_surfaces_for_stream( 2537 struct dc *dc, 2538 struct dc_surface_update *updates, 2539 int surface_count, 2540 struct dc_stream_update *stream_update, 2541 const struct dc_stream_status *stream_status) 2542 { 2543 int i; 2544 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2545 2546 if (dc->idle_optimizations_allowed) 2547 overall_type = UPDATE_TYPE_FULL; 2548 2549 if (stream_status == NULL || stream_status->plane_count != surface_count) 2550 overall_type = UPDATE_TYPE_FULL; 2551 2552 if (stream_update && stream_update->pending_test_pattern) { 2553 overall_type = UPDATE_TYPE_FULL; 2554 } 2555 2556 /* some stream updates require passive update */ 2557 if (stream_update) { 2558 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2559 2560 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2561 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2562 stream_update->integer_scaling_update) 2563 su_flags->bits.scaling = 1; 2564 2565 if (stream_update->out_transfer_func) 2566 su_flags->bits.out_tf = 1; 2567 2568 if (stream_update->abm_level) 2569 su_flags->bits.abm_level = 1; 2570 2571 if (stream_update->dpms_off) 2572 su_flags->bits.dpms_off = 1; 2573 2574 if (stream_update->gamut_remap) 2575 su_flags->bits.gamut_remap = 1; 2576 2577 if (stream_update->wb_update) 2578 su_flags->bits.wb_update = 1; 2579 2580 if (stream_update->dsc_config) 2581 su_flags->bits.dsc_changed = 1; 2582 2583 if (stream_update->mst_bw_update) 2584 su_flags->bits.mst_bw = 1; 2585 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) 2586 su_flags->bits.crtc_timing_adjust = 1; 2587 2588 if (su_flags->raw != 0) 2589 overall_type = UPDATE_TYPE_FULL; 2590 2591 if (stream_update->output_csc_transform || stream_update->output_color_space) 2592 su_flags->bits.out_csc = 1; 2593 } 2594 2595 for (i = 0 ; i < surface_count; i++) { 2596 enum surface_update_type type = 2597 det_surface_update(dc, &updates[i]); 2598 2599 elevate_update_type(&overall_type, type); 2600 } 2601 2602 return overall_type; 2603 } 2604 2605 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect) 2606 { 2607 int view_height, view_width, clip_x, clip_y, clip_width, clip_height; 2608 2609 view_height = src.height; 2610 view_width = src.width; 2611 2612 clip_x = clip_rect.x; 2613 clip_y = clip_rect.y; 2614 2615 clip_width = clip_rect.width; 2616 clip_height = clip_rect.height; 2617 2618 /* check for centered video accounting for off by 1 scaling truncation */ 2619 if ((view_height - clip_y - clip_height <= clip_y + 1) && 2620 (view_width - clip_x - clip_width <= clip_x + 1) && 2621 (view_height - clip_y - clip_height >= clip_y - 1) && 2622 (view_width - clip_x - clip_width >= clip_x - 1)) { 2623 2624 /* when OS scales up/down to letter box, it may end up 2625 * with few blank pixels on the border due to truncating. 2626 * Add offset margin to account for this 2627 */ 2628 if (clip_x <= 4 || clip_y <= 4) 2629 return true; 2630 } 2631 2632 return false; 2633 } 2634 2635 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc, 2636 struct dc_surface_update *srf_updates, int surface_count, 2637 enum surface_update_type update_type) 2638 { 2639 enum surface_update_type new_update_type = update_type; 2640 int i, j; 2641 struct pipe_ctx *pipe = NULL; 2642 struct dc_stream_state *stream; 2643 2644 /* Check that we are in windowed MPO with ODM 2645 * - look for MPO pipe by scanning pipes for first pipe matching 2646 * surface that has moved ( position change ) 2647 * - MPO pipe will have top pipe 2648 * - check that top pipe has ODM pointer 2649 */ 2650 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) { 2651 for (i = 0; i < surface_count; i++) { 2652 if (srf_updates[i].surface && srf_updates[i].scaling_info 2653 && srf_updates[i].surface->update_flags.bits.position_change) { 2654 2655 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2656 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) { 2657 pipe = &dc->current_state->res_ctx.pipe_ctx[j]; 2658 stream = pipe->stream; 2659 break; 2660 } 2661 } 2662 2663 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream 2664 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) { 2665 struct rect old_clip_rect, new_clip_rect; 2666 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle; 2667 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle; 2668 2669 old_clip_rect = srf_updates[i].surface->clip_rect; 2670 new_clip_rect = srf_updates[i].scaling_info->clip_rect; 2671 2672 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2673 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2674 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right; 2675 2676 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2677 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2678 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right; 2679 2680 if (old_clip_rect_left && new_clip_rect_middle) 2681 new_update_type = UPDATE_TYPE_FULL; 2682 else if (old_clip_rect_middle && new_clip_rect_right) 2683 new_update_type = UPDATE_TYPE_FULL; 2684 else if (old_clip_rect_right && new_clip_rect_middle) 2685 new_update_type = UPDATE_TYPE_FULL; 2686 else if (old_clip_rect_middle && new_clip_rect_left) 2687 new_update_type = UPDATE_TYPE_FULL; 2688 } 2689 } 2690 } 2691 } 2692 return new_update_type; 2693 } 2694 2695 /* 2696 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2697 * 2698 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2699 */ 2700 enum surface_update_type dc_check_update_surfaces_for_stream( 2701 struct dc *dc, 2702 struct dc_surface_update *updates, 2703 int surface_count, 2704 struct dc_stream_update *stream_update, 2705 const struct dc_stream_status *stream_status) 2706 { 2707 int i; 2708 enum surface_update_type type; 2709 2710 if (stream_update) 2711 stream_update->stream->update_flags.raw = 0; 2712 for (i = 0; i < surface_count; i++) 2713 updates[i].surface->update_flags.raw = 0; 2714 2715 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2716 if (type == UPDATE_TYPE_FULL) { 2717 if (stream_update) { 2718 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2719 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2720 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2721 } 2722 for (i = 0; i < surface_count; i++) 2723 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2724 } 2725 2726 if (type == UPDATE_TYPE_MED) 2727 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc, 2728 updates, surface_count, type); 2729 2730 if (type == UPDATE_TYPE_FAST) { 2731 // If there's an available clock comparator, we use that. 2732 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2733 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2734 dc->optimized_required = true; 2735 // Else we fallback to mem compare. 2736 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2737 dc->optimized_required = true; 2738 } 2739 2740 dc->optimized_required |= dc->wm_optimized_required; 2741 } 2742 2743 return type; 2744 } 2745 2746 static struct dc_stream_status *stream_get_status( 2747 struct dc_state *ctx, 2748 struct dc_stream_state *stream) 2749 { 2750 uint8_t i; 2751 2752 for (i = 0; i < ctx->stream_count; i++) { 2753 if (stream == ctx->streams[i]) { 2754 return &ctx->stream_status[i]; 2755 } 2756 } 2757 2758 return NULL; 2759 } 2760 2761 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2762 2763 static void copy_surface_update_to_plane( 2764 struct dc_plane_state *surface, 2765 struct dc_surface_update *srf_update) 2766 { 2767 if (srf_update->flip_addr) { 2768 surface->address = srf_update->flip_addr->address; 2769 surface->flip_immediate = 2770 srf_update->flip_addr->flip_immediate; 2771 surface->time.time_elapsed_in_us[surface->time.index] = 2772 srf_update->flip_addr->flip_timestamp_in_us - 2773 surface->time.prev_update_time_in_us; 2774 surface->time.prev_update_time_in_us = 2775 srf_update->flip_addr->flip_timestamp_in_us; 2776 surface->time.index++; 2777 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2778 surface->time.index = 0; 2779 2780 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2781 } 2782 2783 if (srf_update->scaling_info) { 2784 surface->scaling_quality = 2785 srf_update->scaling_info->scaling_quality; 2786 surface->dst_rect = 2787 srf_update->scaling_info->dst_rect; 2788 surface->src_rect = 2789 srf_update->scaling_info->src_rect; 2790 surface->clip_rect = 2791 srf_update->scaling_info->clip_rect; 2792 } 2793 2794 if (srf_update->plane_info) { 2795 surface->color_space = 2796 srf_update->plane_info->color_space; 2797 surface->format = 2798 srf_update->plane_info->format; 2799 surface->plane_size = 2800 srf_update->plane_info->plane_size; 2801 surface->rotation = 2802 srf_update->plane_info->rotation; 2803 surface->horizontal_mirror = 2804 srf_update->plane_info->horizontal_mirror; 2805 surface->stereo_format = 2806 srf_update->plane_info->stereo_format; 2807 surface->tiling_info = 2808 srf_update->plane_info->tiling_info; 2809 surface->visible = 2810 srf_update->plane_info->visible; 2811 surface->per_pixel_alpha = 2812 srf_update->plane_info->per_pixel_alpha; 2813 surface->global_alpha = 2814 srf_update->plane_info->global_alpha; 2815 surface->global_alpha_value = 2816 srf_update->plane_info->global_alpha_value; 2817 surface->dcc = 2818 srf_update->plane_info->dcc; 2819 surface->layer_index = 2820 srf_update->plane_info->layer_index; 2821 } 2822 2823 if (srf_update->gamma && 2824 (surface->gamma_correction != 2825 srf_update->gamma)) { 2826 memcpy(&surface->gamma_correction->entries, 2827 &srf_update->gamma->entries, 2828 sizeof(struct dc_gamma_entries)); 2829 surface->gamma_correction->is_identity = 2830 srf_update->gamma->is_identity; 2831 surface->gamma_correction->num_entries = 2832 srf_update->gamma->num_entries; 2833 surface->gamma_correction->type = 2834 srf_update->gamma->type; 2835 } 2836 2837 if (srf_update->in_transfer_func && 2838 (surface->in_transfer_func != 2839 srf_update->in_transfer_func)) { 2840 surface->in_transfer_func->sdr_ref_white_level = 2841 srf_update->in_transfer_func->sdr_ref_white_level; 2842 surface->in_transfer_func->tf = 2843 srf_update->in_transfer_func->tf; 2844 surface->in_transfer_func->type = 2845 srf_update->in_transfer_func->type; 2846 memcpy(&surface->in_transfer_func->tf_pts, 2847 &srf_update->in_transfer_func->tf_pts, 2848 sizeof(struct dc_transfer_func_distributed_points)); 2849 } 2850 2851 if (srf_update->func_shaper && 2852 (surface->in_shaper_func != 2853 srf_update->func_shaper)) 2854 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2855 sizeof(*surface->in_shaper_func)); 2856 2857 if (srf_update->lut3d_func && 2858 (surface->lut3d_func != 2859 srf_update->lut3d_func)) 2860 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2861 sizeof(*surface->lut3d_func)); 2862 2863 if (srf_update->hdr_mult.value) 2864 surface->hdr_mult = 2865 srf_update->hdr_mult; 2866 2867 if (srf_update->blend_tf && 2868 (surface->blend_tf != 2869 srf_update->blend_tf)) 2870 memcpy(surface->blend_tf, srf_update->blend_tf, 2871 sizeof(*surface->blend_tf)); 2872 2873 if (srf_update->input_csc_color_matrix) 2874 surface->input_csc_color_matrix = 2875 *srf_update->input_csc_color_matrix; 2876 2877 if (srf_update->coeff_reduction_factor) 2878 surface->coeff_reduction_factor = 2879 *srf_update->coeff_reduction_factor; 2880 2881 if (srf_update->gamut_remap_matrix) 2882 surface->gamut_remap_matrix = 2883 *srf_update->gamut_remap_matrix; 2884 } 2885 2886 static void copy_stream_update_to_stream(struct dc *dc, 2887 struct dc_state *context, 2888 struct dc_stream_state *stream, 2889 struct dc_stream_update *update) 2890 { 2891 struct dc_context *dc_ctx = dc->ctx; 2892 2893 if (update == NULL || stream == NULL) 2894 return; 2895 2896 if (update->src.height && update->src.width) 2897 stream->src = update->src; 2898 2899 if (update->dst.height && update->dst.width) 2900 stream->dst = update->dst; 2901 2902 if (update->out_transfer_func && 2903 stream->out_transfer_func != update->out_transfer_func) { 2904 stream->out_transfer_func->sdr_ref_white_level = 2905 update->out_transfer_func->sdr_ref_white_level; 2906 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2907 stream->out_transfer_func->type = 2908 update->out_transfer_func->type; 2909 memcpy(&stream->out_transfer_func->tf_pts, 2910 &update->out_transfer_func->tf_pts, 2911 sizeof(struct dc_transfer_func_distributed_points)); 2912 } 2913 2914 if (update->hdr_static_metadata) 2915 stream->hdr_static_metadata = *update->hdr_static_metadata; 2916 2917 if (update->abm_level) 2918 stream->abm_level = *update->abm_level; 2919 2920 if (update->periodic_interrupt) 2921 stream->periodic_interrupt = *update->periodic_interrupt; 2922 2923 if (update->gamut_remap) 2924 stream->gamut_remap_matrix = *update->gamut_remap; 2925 2926 /* Note: this being updated after mode set is currently not a use case 2927 * however if it arises OCSC would need to be reprogrammed at the 2928 * minimum 2929 */ 2930 if (update->output_color_space) 2931 stream->output_color_space = *update->output_color_space; 2932 2933 if (update->output_csc_transform) 2934 stream->csc_color_matrix = *update->output_csc_transform; 2935 2936 if (update->vrr_infopacket) 2937 stream->vrr_infopacket = *update->vrr_infopacket; 2938 2939 if (update->allow_freesync) 2940 stream->allow_freesync = *update->allow_freesync; 2941 2942 if (update->vrr_active_variable) 2943 stream->vrr_active_variable = *update->vrr_active_variable; 2944 2945 if (update->crtc_timing_adjust) 2946 stream->adjust = *update->crtc_timing_adjust; 2947 2948 if (update->dpms_off) 2949 stream->dpms_off = *update->dpms_off; 2950 2951 if (update->hfvsif_infopacket) 2952 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2953 2954 if (update->vtem_infopacket) 2955 stream->vtem_infopacket = *update->vtem_infopacket; 2956 2957 if (update->vsc_infopacket) 2958 stream->vsc_infopacket = *update->vsc_infopacket; 2959 2960 if (update->vsp_infopacket) 2961 stream->vsp_infopacket = *update->vsp_infopacket; 2962 2963 if (update->dither_option) 2964 stream->dither_option = *update->dither_option; 2965 2966 if (update->pending_test_pattern) 2967 stream->test_pattern = *update->pending_test_pattern; 2968 /* update current stream with writeback info */ 2969 if (update->wb_update) { 2970 int i; 2971 2972 stream->num_wb_info = update->wb_update->num_wb_info; 2973 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2974 for (i = 0; i < stream->num_wb_info; i++) 2975 stream->writeback_info[i] = 2976 update->wb_update->writeback_info[i]; 2977 } 2978 if (update->dsc_config) { 2979 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2980 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2981 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2982 update->dsc_config->num_slices_v != 0); 2983 2984 /* Use temporarry context for validating new DSC config */ 2985 struct dc_state *dsc_validate_context = dc_create_state(dc); 2986 2987 if (dsc_validate_context) { 2988 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2989 2990 stream->timing.dsc_cfg = *update->dsc_config; 2991 stream->timing.flags.DSC = enable_dsc; 2992 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2993 stream->timing.dsc_cfg = old_dsc_cfg; 2994 stream->timing.flags.DSC = old_dsc_enabled; 2995 update->dsc_config = NULL; 2996 } 2997 2998 dc_release_state(dsc_validate_context); 2999 } else { 3000 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 3001 update->dsc_config = NULL; 3002 } 3003 } 3004 } 3005 3006 static bool update_planes_and_stream_state(struct dc *dc, 3007 struct dc_surface_update *srf_updates, int surface_count, 3008 struct dc_stream_state *stream, 3009 struct dc_stream_update *stream_update, 3010 enum surface_update_type *new_update_type, 3011 struct dc_state **new_context) 3012 { 3013 struct dc_state *context; 3014 int i, j; 3015 enum surface_update_type update_type; 3016 const struct dc_stream_status *stream_status; 3017 struct dc_context *dc_ctx = dc->ctx; 3018 3019 stream_status = dc_stream_get_status(stream); 3020 3021 if (!stream_status) { 3022 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3023 ASSERT(false); 3024 3025 return false; /* Cannot commit surface to stream that is not committed */ 3026 } 3027 3028 context = dc->current_state; 3029 3030 update_type = dc_check_update_surfaces_for_stream( 3031 dc, srf_updates, surface_count, stream_update, stream_status); 3032 3033 /* update current stream with the new updates */ 3034 copy_stream_update_to_stream(dc, context, stream, stream_update); 3035 3036 /* do not perform surface update if surface has invalid dimensions 3037 * (all zero) and no scaling_info is provided 3038 */ 3039 if (surface_count > 0) { 3040 for (i = 0; i < surface_count; i++) { 3041 if ((srf_updates[i].surface->src_rect.width == 0 || 3042 srf_updates[i].surface->src_rect.height == 0 || 3043 srf_updates[i].surface->dst_rect.width == 0 || 3044 srf_updates[i].surface->dst_rect.height == 0) && 3045 (!srf_updates[i].scaling_info || 3046 srf_updates[i].scaling_info->src_rect.width == 0 || 3047 srf_updates[i].scaling_info->src_rect.height == 0 || 3048 srf_updates[i].scaling_info->dst_rect.width == 0 || 3049 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3050 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3051 return false; 3052 } 3053 } 3054 } 3055 3056 if (update_type >= update_surface_trace_level) 3057 update_surface_trace(dc, srf_updates, surface_count); 3058 3059 if (update_type >= UPDATE_TYPE_FULL) { 3060 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3061 3062 for (i = 0; i < surface_count; i++) 3063 new_planes[i] = srf_updates[i].surface; 3064 3065 /* initialize scratch memory for building context */ 3066 context = dc_create_state(dc); 3067 if (context == NULL) { 3068 DC_ERROR("Failed to allocate new validate context!\n"); 3069 return false; 3070 } 3071 3072 dc_resource_state_copy_construct( 3073 dc->current_state, context); 3074 3075 /* For each full update, remove all existing phantom pipes first. 3076 * Ensures that we have enough pipes for newly added MPO planes 3077 */ 3078 if (dc->res_pool->funcs->remove_phantom_pipes) 3079 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 3080 3081 /*remove old surfaces from context */ 3082 if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3083 3084 BREAK_TO_DEBUGGER(); 3085 goto fail; 3086 } 3087 3088 /* add surface to context */ 3089 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3090 3091 BREAK_TO_DEBUGGER(); 3092 goto fail; 3093 } 3094 } 3095 3096 /* save update parameters into surface */ 3097 for (i = 0; i < surface_count; i++) { 3098 struct dc_plane_state *surface = srf_updates[i].surface; 3099 3100 copy_surface_update_to_plane(surface, &srf_updates[i]); 3101 3102 if (update_type >= UPDATE_TYPE_MED) { 3103 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3104 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3105 3106 if (pipe_ctx->plane_state != surface) 3107 continue; 3108 3109 resource_build_scaling_params(pipe_ctx); 3110 } 3111 } 3112 } 3113 3114 if (update_type == UPDATE_TYPE_FULL) { 3115 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3116 /* For phantom pipes we remove and create a new set of phantom pipes 3117 * for each full update (because we don't know if we'll need phantom 3118 * pipes until after the first round of validation). However, if validation 3119 * fails we need to keep the existing phantom pipes (because we don't update 3120 * the dc->current_state). 3121 * 3122 * The phantom stream/plane refcount is decremented for validation because 3123 * we assume it'll be removed (the free comes when the dc_state is freed), 3124 * but if validation fails we have to increment back the refcount so it's 3125 * consistent. 3126 */ 3127 if (dc->res_pool->funcs->retain_phantom_pipes) 3128 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); 3129 BREAK_TO_DEBUGGER(); 3130 goto fail; 3131 } 3132 } 3133 3134 *new_context = context; 3135 *new_update_type = update_type; 3136 3137 return true; 3138 3139 fail: 3140 dc_release_state(context); 3141 3142 return false; 3143 3144 } 3145 3146 static void commit_planes_do_stream_update(struct dc *dc, 3147 struct dc_stream_state *stream, 3148 struct dc_stream_update *stream_update, 3149 enum surface_update_type update_type, 3150 struct dc_state *context) 3151 { 3152 int j; 3153 3154 // Stream updates 3155 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3156 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3157 3158 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 3159 3160 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3161 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3162 3163 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3164 stream_update->vrr_infopacket || 3165 stream_update->vsc_infopacket || 3166 stream_update->vsp_infopacket || 3167 stream_update->hfvsif_infopacket || 3168 stream_update->vtem_infopacket) { 3169 resource_build_info_frame(pipe_ctx); 3170 dc->hwss.update_info_frame(pipe_ctx); 3171 3172 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3173 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3174 } 3175 3176 if (stream_update->hdr_static_metadata && 3177 stream->use_dynamic_meta && 3178 dc->hwss.set_dmdata_attributes && 3179 pipe_ctx->stream->dmdata_address.quad_part != 0) 3180 dc->hwss.set_dmdata_attributes(pipe_ctx); 3181 3182 if (stream_update->gamut_remap) 3183 dc_stream_set_gamut_remap(dc, stream); 3184 3185 if (stream_update->output_csc_transform) 3186 dc_stream_program_csc_matrix(dc, stream); 3187 3188 if (stream_update->dither_option) { 3189 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3190 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3191 &pipe_ctx->stream->bit_depth_params); 3192 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3193 &stream->bit_depth_params, 3194 &stream->clamping); 3195 while (odm_pipe) { 3196 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3197 &stream->bit_depth_params, 3198 &stream->clamping); 3199 odm_pipe = odm_pipe->next_odm_pipe; 3200 } 3201 } 3202 3203 3204 /* Full fe update*/ 3205 if (update_type == UPDATE_TYPE_FAST) 3206 continue; 3207 3208 if (stream_update->dsc_config) 3209 dp_update_dsc_config(pipe_ctx); 3210 3211 if (stream_update->mst_bw_update) { 3212 if (stream_update->mst_bw_update->is_increase) 3213 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 3214 else 3215 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 3216 } 3217 3218 if (stream_update->pending_test_pattern) { 3219 dc_link_dp_set_test_pattern(stream->link, 3220 stream->test_pattern.type, 3221 stream->test_pattern.color_space, 3222 stream->test_pattern.p_link_settings, 3223 stream->test_pattern.p_custom_pattern, 3224 stream->test_pattern.cust_pattern_size); 3225 } 3226 3227 if (stream_update->dpms_off) { 3228 if (*stream_update->dpms_off) { 3229 core_link_disable_stream(pipe_ctx); 3230 /* for dpms, keep acquired resources*/ 3231 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3232 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3233 3234 dc->optimized_required = true; 3235 3236 } else { 3237 if (get_seamless_boot_stream_count(context) == 0) 3238 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3239 core_link_enable_stream(dc->current_state, pipe_ctx); 3240 } 3241 } 3242 3243 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3244 bool should_program_abm = true; 3245 3246 // if otg funcs defined check if blanked before programming 3247 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3248 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3249 should_program_abm = false; 3250 3251 if (should_program_abm) { 3252 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3253 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3254 } else { 3255 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3256 pipe_ctx->stream_res.abm, stream->abm_level); 3257 } 3258 } 3259 } 3260 } 3261 } 3262 } 3263 3264 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3265 { 3266 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3267 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3268 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3269 return true; 3270 3271 return false; 3272 } 3273 3274 void dc_dmub_update_dirty_rect(struct dc *dc, 3275 int surface_count, 3276 struct dc_stream_state *stream, 3277 struct dc_surface_update *srf_updates, 3278 struct dc_state *context) 3279 { 3280 union dmub_rb_cmd cmd; 3281 struct dc_context *dc_ctx = dc->ctx; 3282 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3283 unsigned int i, j; 3284 unsigned int panel_inst = 0; 3285 3286 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3287 return; 3288 3289 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3290 return; 3291 3292 memset(&cmd, 0x0, sizeof(cmd)); 3293 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3294 cmd.update_dirty_rect.header.sub_type = 0; 3295 cmd.update_dirty_rect.header.payload_bytes = 3296 sizeof(cmd.update_dirty_rect) - 3297 sizeof(cmd.update_dirty_rect.header); 3298 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3299 for (i = 0; i < surface_count; i++) { 3300 struct dc_plane_state *plane_state = srf_updates[i].surface; 3301 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3302 3303 if (!srf_updates[i].surface || !flip_addr) 3304 continue; 3305 /* Do not send in immediate flip mode */ 3306 if (srf_updates[i].surface->flip_immediate) 3307 continue; 3308 3309 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3310 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3311 sizeof(flip_addr->dirty_rects)); 3312 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3313 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3314 3315 if (pipe_ctx->stream != stream) 3316 continue; 3317 if (pipe_ctx->plane_state != plane_state) 3318 continue; 3319 3320 update_dirty_rect->panel_inst = panel_inst; 3321 update_dirty_rect->pipe_idx = j; 3322 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); 3323 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); 3324 } 3325 } 3326 } 3327 3328 static void commit_planes_for_stream(struct dc *dc, 3329 struct dc_surface_update *srf_updates, 3330 int surface_count, 3331 struct dc_stream_state *stream, 3332 struct dc_stream_update *stream_update, 3333 enum surface_update_type update_type, 3334 struct dc_state *context) 3335 { 3336 int i, j; 3337 struct pipe_ctx *top_pipe_to_program = NULL; 3338 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3339 bool subvp_prev_use = false; 3340 bool subvp_curr_use = false; 3341 3342 // Once we apply the new subvp context to hardware it won't be in the 3343 // dc->current_state anymore, so we have to cache it before we apply 3344 // the new SubVP context 3345 subvp_prev_use = false; 3346 3347 3348 dc_z10_restore(dc); 3349 3350 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 3351 /* Optimize seamless boot flag keeps clocks and watermarks high until 3352 * first flip. After first flip, optimization is required to lower 3353 * bandwidth. Important to note that it is expected UEFI will 3354 * only light up a single display on POST, therefore we only expect 3355 * one stream with seamless boot flag set. 3356 */ 3357 if (stream->apply_seamless_boot_optimization) { 3358 stream->apply_seamless_boot_optimization = false; 3359 3360 if (get_seamless_boot_stream_count(context) == 0) 3361 dc->optimized_required = true; 3362 } 3363 } 3364 3365 if (update_type == UPDATE_TYPE_FULL) { 3366 dc_allow_idle_optimizations(dc, false); 3367 3368 if (get_seamless_boot_stream_count(context) == 0) 3369 dc->hwss.prepare_bandwidth(dc, context); 3370 3371 if (dc->debug.enable_double_buffered_dsc_pg_support) 3372 dc->hwss.update_dsc_pg(dc, context, false); 3373 3374 context_clock_trace(dc, context); 3375 } 3376 3377 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3378 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3379 3380 if (!pipe_ctx->top_pipe && 3381 !pipe_ctx->prev_odm_pipe && 3382 pipe_ctx->stream && 3383 pipe_ctx->stream == stream) { 3384 top_pipe_to_program = pipe_ctx; 3385 } 3386 } 3387 3388 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3389 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3390 3391 // Check old context for SubVP 3392 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3393 if (subvp_prev_use) 3394 break; 3395 } 3396 3397 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3398 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3399 3400 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3401 subvp_curr_use = true; 3402 break; 3403 } 3404 } 3405 3406 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3407 struct pipe_ctx *mpcc_pipe; 3408 struct pipe_ctx *odm_pipe; 3409 3410 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3411 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3412 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3413 } 3414 3415 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3416 if (top_pipe_to_program && 3417 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3418 if (should_use_dmub_lock(stream->link)) { 3419 union dmub_hw_lock_flags hw_locks = { 0 }; 3420 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3421 3422 hw_locks.bits.lock_dig = 1; 3423 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3424 3425 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3426 true, 3427 &hw_locks, 3428 &inst_flags); 3429 } else 3430 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3431 top_pipe_to_program->stream_res.tg); 3432 } 3433 3434 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3435 if (dc->hwss.subvp_pipe_control_lock) 3436 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3437 dc->hwss.interdependent_update_lock(dc, context, true); 3438 3439 } else { 3440 if (dc->hwss.subvp_pipe_control_lock) 3441 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3442 /* Lock the top pipe while updating plane addrs, since freesync requires 3443 * plane addr update event triggers to be synchronized. 3444 * top_pipe_to_program is expected to never be NULL 3445 */ 3446 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3447 } 3448 3449 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3450 3451 if (update_type != UPDATE_TYPE_FAST) { 3452 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3453 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3454 3455 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || 3456 subvp_prev_use) { 3457 // If old context or new context has phantom pipes, apply 3458 // the phantom timings now. We can't change the phantom 3459 // pipe configuration safely without driver acquiring 3460 // the DMCUB lock first. 3461 dc->hwss.apply_ctx_to_hw(dc, context); 3462 break; 3463 } 3464 } 3465 } 3466 3467 // Stream updates 3468 if (stream_update) 3469 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3470 3471 if (surface_count == 0) { 3472 /* 3473 * In case of turning off screen, no need to program front end a second time. 3474 * just return after program blank. 3475 */ 3476 if (dc->hwss.apply_ctx_for_surface) 3477 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3478 if (dc->hwss.program_front_end_for_ctx) 3479 dc->hwss.program_front_end_for_ctx(dc, context); 3480 3481 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3482 dc->hwss.interdependent_update_lock(dc, context, false); 3483 } else { 3484 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3485 } 3486 dc->hwss.post_unlock_program_front_end(dc, context); 3487 3488 if (update_type != UPDATE_TYPE_FAST) 3489 if (dc->hwss.commit_subvp_config) 3490 dc->hwss.commit_subvp_config(dc, context); 3491 3492 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3493 * move the SubVP lock to after the phantom pipes have been setup 3494 */ 3495 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3496 if (dc->hwss.subvp_pipe_control_lock) 3497 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3498 } else { 3499 if (dc->hwss.subvp_pipe_control_lock) 3500 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3501 } 3502 3503 return; 3504 } 3505 3506 if (update_type != UPDATE_TYPE_FAST) { 3507 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3508 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3509 3510 if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && 3511 pipe_ctx->stream && pipe_ctx->plane_state) { 3512 /* Only update visual confirm for SUBVP here. 3513 * The bar appears on all pipes, so we need to update the bar on all displays, 3514 * so the information doesn't get stale. 3515 */ 3516 struct mpcc_blnd_cfg blnd_cfg = { 0 }; 3517 3518 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, 3519 pipe_ctx->plane_res.hubp->inst); 3520 } 3521 } 3522 } 3523 3524 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 3525 for (i = 0; i < surface_count; i++) { 3526 struct dc_plane_state *plane_state = srf_updates[i].surface; 3527 /*set logical flag for lock/unlock use*/ 3528 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3529 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3530 if (!pipe_ctx->plane_state) 3531 continue; 3532 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3533 continue; 3534 pipe_ctx->plane_state->triplebuffer_flips = false; 3535 if (update_type == UPDATE_TYPE_FAST && 3536 dc->hwss.program_triplebuffer != NULL && 3537 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3538 /*triple buffer for VUpdate only*/ 3539 pipe_ctx->plane_state->triplebuffer_flips = true; 3540 } 3541 } 3542 if (update_type == UPDATE_TYPE_FULL) { 3543 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3544 plane_state->flip_immediate = false; 3545 } 3546 } 3547 } 3548 3549 // Update Type FULL, Surface updates 3550 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3551 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3552 3553 if (!pipe_ctx->top_pipe && 3554 !pipe_ctx->prev_odm_pipe && 3555 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3556 struct dc_stream_status *stream_status = NULL; 3557 3558 if (!pipe_ctx->plane_state) 3559 continue; 3560 3561 /* Full fe update*/ 3562 if (update_type == UPDATE_TYPE_FAST) 3563 continue; 3564 3565 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3566 3567 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3568 /*turn off triple buffer for full update*/ 3569 dc->hwss.program_triplebuffer( 3570 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3571 } 3572 stream_status = 3573 stream_get_status(context, pipe_ctx->stream); 3574 3575 if (dc->hwss.apply_ctx_for_surface) 3576 dc->hwss.apply_ctx_for_surface( 3577 dc, pipe_ctx->stream, stream_status->plane_count, context); 3578 } 3579 } 3580 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3581 dc->hwss.program_front_end_for_ctx(dc, context); 3582 if (dc->debug.validate_dml_output) { 3583 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3584 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3585 if (cur_pipe->stream == NULL) 3586 continue; 3587 3588 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3589 cur_pipe->plane_res.hubp, dc->ctx, 3590 &context->res_ctx.pipe_ctx[i].rq_regs, 3591 &context->res_ctx.pipe_ctx[i].dlg_regs, 3592 &context->res_ctx.pipe_ctx[i].ttu_regs); 3593 } 3594 } 3595 } 3596 3597 // Update Type FAST, Surface updates 3598 if (update_type == UPDATE_TYPE_FAST) { 3599 if (dc->hwss.set_flip_control_gsl) 3600 for (i = 0; i < surface_count; i++) { 3601 struct dc_plane_state *plane_state = srf_updates[i].surface; 3602 3603 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3604 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3605 3606 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3607 continue; 3608 3609 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3610 continue; 3611 3612 // GSL has to be used for flip immediate 3613 dc->hwss.set_flip_control_gsl(pipe_ctx, 3614 pipe_ctx->plane_state->flip_immediate); 3615 } 3616 } 3617 3618 /* Perform requested Updates */ 3619 for (i = 0; i < surface_count; i++) { 3620 struct dc_plane_state *plane_state = srf_updates[i].surface; 3621 3622 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3623 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3624 3625 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3626 continue; 3627 3628 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3629 continue; 3630 3631 /*program triple buffer after lock based on flip type*/ 3632 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3633 /*only enable triplebuffer for fast_update*/ 3634 dc->hwss.program_triplebuffer( 3635 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3636 } 3637 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3638 dc->hwss.update_plane_addr(dc, pipe_ctx); 3639 } 3640 } 3641 } 3642 3643 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3644 dc->hwss.interdependent_update_lock(dc, context, false); 3645 } else { 3646 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3647 } 3648 3649 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3650 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3651 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3652 top_pipe_to_program->stream_res.tg, 3653 CRTC_STATE_VACTIVE); 3654 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3655 top_pipe_to_program->stream_res.tg, 3656 CRTC_STATE_VBLANK); 3657 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3658 top_pipe_to_program->stream_res.tg, 3659 CRTC_STATE_VACTIVE); 3660 3661 if (should_use_dmub_lock(stream->link)) { 3662 union dmub_hw_lock_flags hw_locks = { 0 }; 3663 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3664 3665 hw_locks.bits.lock_dig = 1; 3666 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3667 3668 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3669 false, 3670 &hw_locks, 3671 &inst_flags); 3672 } else 3673 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3674 top_pipe_to_program->stream_res.tg); 3675 } 3676 3677 if (subvp_curr_use) { 3678 /* If enabling subvp or transitioning from subvp->subvp, enable the 3679 * phantom streams before we program front end for the phantom pipes. 3680 */ 3681 if (update_type != UPDATE_TYPE_FAST) { 3682 if (dc->hwss.enable_phantom_streams) 3683 dc->hwss.enable_phantom_streams(dc, context); 3684 } 3685 } 3686 3687 if (subvp_prev_use && !subvp_curr_use) { 3688 /* If disabling subvp, disable phantom streams after front end 3689 * programming has completed (we turn on phantom OTG in order 3690 * to complete the plane disable for phantom pipes). 3691 */ 3692 dc->hwss.apply_ctx_to_hw(dc, context); 3693 } 3694 3695 if (update_type != UPDATE_TYPE_FAST) 3696 dc->hwss.post_unlock_program_front_end(dc, context); 3697 if (update_type != UPDATE_TYPE_FAST) 3698 if (dc->hwss.commit_subvp_config) 3699 dc->hwss.commit_subvp_config(dc, context); 3700 3701 if (update_type != UPDATE_TYPE_FAST) 3702 if (dc->hwss.commit_subvp_config) 3703 dc->hwss.commit_subvp_config(dc, context); 3704 3705 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3706 * move the SubVP lock to after the phantom pipes have been setup 3707 */ 3708 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3709 if (dc->hwss.subvp_pipe_control_lock) 3710 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3711 } else { 3712 if (dc->hwss.subvp_pipe_control_lock) 3713 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3714 } 3715 3716 // Fire manual trigger only when bottom plane is flipped 3717 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3718 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3719 3720 if (!pipe_ctx->plane_state) 3721 continue; 3722 3723 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3724 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3725 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3726 pipe_ctx->plane_state->skip_manual_trigger) 3727 continue; 3728 3729 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3730 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3731 } 3732 } 3733 3734 /** 3735 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 3736 * 3737 * @dc: Used to get the current state status 3738 * @stream: Target stream, which we want to remove the attached planes 3739 * @surface_count: Number of surface update 3740 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 3741 * 3742 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 3743 * the MPO if used simultaneously in some specific configurations (e.g., 3744 * 4k@144). This function checks if the incoming context requires applying a 3745 * transition state with unnecessary pipe splitting and ODM disabled to 3746 * circumvent our hardware limitations to prevent this edge case. If the OPP 3747 * associated with an MPCC might change due to plane additions, this function 3748 * returns true. 3749 * 3750 * Return: 3751 * Return true if OPP and MPCC might change, otherwise, return false. 3752 */ 3753 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3754 struct dc_stream_state *stream, 3755 int surface_count, 3756 bool *is_plane_addition) 3757 { 3758 3759 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3760 bool force_minimal_pipe_splitting = false; 3761 bool subvp_active = false; 3762 uint32_t i; 3763 3764 *is_plane_addition = false; 3765 3766 if (cur_stream_status && 3767 dc->current_state->stream_count > 0 && 3768 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3769 /* determine if minimal transition is required due to MPC*/ 3770 if (surface_count > 0) { 3771 if (cur_stream_status->plane_count > surface_count) { 3772 force_minimal_pipe_splitting = true; 3773 } else if (cur_stream_status->plane_count < surface_count) { 3774 force_minimal_pipe_splitting = true; 3775 *is_plane_addition = true; 3776 } 3777 } 3778 } 3779 3780 if (cur_stream_status && 3781 dc->current_state->stream_count == 1 && 3782 dc->debug.enable_single_display_2to1_odm_policy) { 3783 /* determine if minimal transition is required due to dynamic ODM*/ 3784 if (surface_count > 0) { 3785 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3786 force_minimal_pipe_splitting = true; 3787 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3788 force_minimal_pipe_splitting = true; 3789 *is_plane_addition = true; 3790 } 3791 } 3792 } 3793 3794 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3795 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3796 3797 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 3798 subvp_active = true; 3799 break; 3800 } 3801 } 3802 3803 /* For SubVP when adding or removing planes we need to add a minimal transition 3804 * (even when disabling all planes). Whenever disabling a phantom pipe, we 3805 * must use the minimal transition path to disable the pipe correctly. 3806 * 3807 * We want to use the minimal transition whenever subvp is active, not only if 3808 * a plane is being added / removed from a subvp stream (MPO plane can be added 3809 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 3810 * a min transition to disable subvp. 3811 */ 3812 if (cur_stream_status && subvp_active) { 3813 /* determine if minimal transition is required due to SubVP*/ 3814 if (cur_stream_status->plane_count > surface_count) { 3815 force_minimal_pipe_splitting = true; 3816 } else if (cur_stream_status->plane_count < surface_count) { 3817 force_minimal_pipe_splitting = true; 3818 *is_plane_addition = true; 3819 } 3820 } 3821 3822 return force_minimal_pipe_splitting; 3823 } 3824 3825 /** 3826 * commit_minimal_transition_state - Create a transition pipe split state 3827 * 3828 * @dc: Used to get the current state status 3829 * @transition_base_context: New transition state 3830 * 3831 * In some specific configurations, such as pipe split on multi-display with 3832 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 3833 * programming when moving to new planes. To mitigate those types of problems, 3834 * this function adds a transition state that minimizes pipe usage before 3835 * programming the new configuration. When adding a new plane, the current 3836 * state requires the least pipes, so it is applied without splitting. When 3837 * removing a plane, the new state requires the least pipes, so it is applied 3838 * without splitting. 3839 * 3840 * Return: 3841 * Return false if something is wrong in the transition state. 3842 */ 3843 static bool commit_minimal_transition_state(struct dc *dc, 3844 struct dc_state *transition_base_context) 3845 { 3846 struct dc_state *transition_context = dc_create_state(dc); 3847 enum pipe_split_policy tmp_mpc_policy; 3848 bool temp_dynamic_odm_policy; 3849 bool temp_subvp_policy; 3850 enum dc_status ret = DC_ERROR_UNEXPECTED; 3851 unsigned int i, j; 3852 unsigned int pipe_in_use = 0; 3853 bool subvp_in_use = false; 3854 3855 if (!transition_context) 3856 return false; 3857 /* Setup: 3858 * Store the current ODM and MPC config in some temp variables to be 3859 * restored after we commit the transition state. 3860 */ 3861 3862 /* check current pipes in use*/ 3863 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3864 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 3865 3866 if (pipe->plane_state) 3867 pipe_in_use++; 3868 } 3869 3870 /* If SubVP is enabled and we are adding or removing planes from any main subvp 3871 * pipe, we must use the minimal transition. 3872 */ 3873 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3874 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3875 3876 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3877 subvp_in_use = true; 3878 break; 3879 } 3880 } 3881 3882 /* When the OS add a new surface if we have been used all of pipes with odm combine 3883 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 3884 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 3885 * call it again. Otherwise return true to skip. 3886 * 3887 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 3888 * enter/exit MPO when DCN still have enough resources. 3889 */ 3890 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { 3891 dc_release_state(transition_context); 3892 return true; 3893 } 3894 3895 if (!dc->config.is_vmin_only_asic) { 3896 tmp_mpc_policy = dc->debug.pipe_split_policy; 3897 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3898 } 3899 3900 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3901 dc->debug.enable_single_display_2to1_odm_policy = false; 3902 3903 temp_subvp_policy = dc->debug.force_disable_subvp; 3904 dc->debug.force_disable_subvp = true; 3905 3906 dc_resource_state_copy_construct(transition_base_context, transition_context); 3907 3908 /* commit minimal state */ 3909 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { 3910 for (i = 0; i < transition_context->stream_count; i++) { 3911 struct dc_stream_status *stream_status = &transition_context->stream_status[i]; 3912 3913 for (j = 0; j < stream_status->plane_count; j++) { 3914 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 3915 3916 /* force vsync flip when reconfiguring pipes to prevent underflow 3917 * and corruption 3918 */ 3919 plane_state->flip_immediate = false; 3920 } 3921 } 3922 3923 ret = dc_commit_state_no_check(dc, transition_context); 3924 } 3925 3926 /* always release as dc_commit_state_no_check retains in good case */ 3927 dc_release_state(transition_context); 3928 3929 /* TearDown: 3930 * Restore original configuration for ODM and MPO. 3931 */ 3932 if (!dc->config.is_vmin_only_asic) 3933 dc->debug.pipe_split_policy = tmp_mpc_policy; 3934 3935 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; 3936 dc->debug.force_disable_subvp = temp_subvp_policy; 3937 3938 if (ret != DC_OK) { 3939 /* this should never happen */ 3940 BREAK_TO_DEBUGGER(); 3941 return false; 3942 } 3943 3944 /* force full surface update */ 3945 for (i = 0; i < dc->current_state->stream_count; i++) { 3946 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 3947 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 3948 } 3949 } 3950 3951 return true; 3952 } 3953 3954 bool dc_update_planes_and_stream(struct dc *dc, 3955 struct dc_surface_update *srf_updates, int surface_count, 3956 struct dc_stream_state *stream, 3957 struct dc_stream_update *stream_update) 3958 { 3959 struct dc_state *context; 3960 enum surface_update_type update_type; 3961 int i; 3962 struct mall_temp_config mall_temp_config; 3963 3964 /* In cases where MPO and split or ODM are used transitions can 3965 * cause underflow. Apply stream configuration with minimal pipe 3966 * split first to avoid unsupported transitions for active pipes. 3967 */ 3968 bool force_minimal_pipe_splitting; 3969 bool is_plane_addition; 3970 3971 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 3972 dc, 3973 stream, 3974 surface_count, 3975 &is_plane_addition); 3976 3977 /* on plane addition, minimal state is the current one */ 3978 if (force_minimal_pipe_splitting && is_plane_addition && 3979 !commit_minimal_transition_state(dc, dc->current_state)) 3980 return false; 3981 3982 if (!update_planes_and_stream_state( 3983 dc, 3984 srf_updates, 3985 surface_count, 3986 stream, 3987 stream_update, 3988 &update_type, 3989 &context)) 3990 return false; 3991 3992 /* on plane removal, minimal state is the new one */ 3993 if (force_minimal_pipe_splitting && !is_plane_addition) { 3994 /* Since all phantom pipes are removed in full validation, 3995 * we have to save and restore the subvp/mall config when 3996 * we do a minimal transition since the flags marking the 3997 * pipe as subvp/phantom will be cleared (dc copy constructor 3998 * creates a shallow copy). 3999 */ 4000 if (dc->res_pool->funcs->save_mall_state) 4001 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); 4002 if (!commit_minimal_transition_state(dc, context)) { 4003 dc_release_state(context); 4004 return false; 4005 } 4006 if (dc->res_pool->funcs->restore_mall_state) 4007 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); 4008 4009 /* If we do a minimal transition with plane removal and the context 4010 * has subvp we also have to retain back the phantom stream / planes 4011 * since the refcount is decremented as part of the min transition 4012 * (we commit a state with no subvp, so the phantom streams / planes 4013 * had to be removed). 4014 */ 4015 if (dc->res_pool->funcs->retain_phantom_pipes) 4016 dc->res_pool->funcs->retain_phantom_pipes(dc, context); 4017 update_type = UPDATE_TYPE_FULL; 4018 } 4019 4020 commit_planes_for_stream( 4021 dc, 4022 srf_updates, 4023 surface_count, 4024 stream, 4025 stream_update, 4026 update_type, 4027 context); 4028 4029 if (dc->current_state != context) { 4030 4031 /* Since memory free requires elevated IRQL, an interrupt 4032 * request is generated by mem free. If this happens 4033 * between freeing and reassigning the context, our vsync 4034 * interrupt will call into dc and cause a memory 4035 * corruption BSOD. Hence, we first reassign the context, 4036 * then free the old context. 4037 */ 4038 4039 struct dc_state *old = dc->current_state; 4040 4041 dc->current_state = context; 4042 dc_release_state(old); 4043 4044 // clear any forced full updates 4045 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4046 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4047 4048 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4049 pipe_ctx->plane_state->force_full_update = false; 4050 } 4051 } 4052 return true; 4053 } 4054 4055 void dc_commit_updates_for_stream(struct dc *dc, 4056 struct dc_surface_update *srf_updates, 4057 int surface_count, 4058 struct dc_stream_state *stream, 4059 struct dc_stream_update *stream_update, 4060 struct dc_state *state) 4061 { 4062 const struct dc_stream_status *stream_status; 4063 enum surface_update_type update_type; 4064 struct dc_state *context; 4065 struct dc_context *dc_ctx = dc->ctx; 4066 int i, j; 4067 4068 /* TODO: Since change commit sequence can have a huge impact, 4069 * we decided to only enable it for DCN3x. However, as soon as 4070 * we get more confident about this change we'll need to enable 4071 * the new sequence for all ASICs. 4072 */ 4073 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 4074 dc_update_planes_and_stream(dc, srf_updates, 4075 surface_count, stream, 4076 stream_update); 4077 return; 4078 } 4079 4080 stream_status = dc_stream_get_status(stream); 4081 context = dc->current_state; 4082 4083 update_type = dc_check_update_surfaces_for_stream( 4084 dc, srf_updates, surface_count, stream_update, stream_status); 4085 4086 if (update_type >= update_surface_trace_level) 4087 update_surface_trace(dc, srf_updates, surface_count); 4088 4089 4090 if (update_type >= UPDATE_TYPE_FULL) { 4091 4092 /* initialize scratch memory for building context */ 4093 context = dc_create_state(dc); 4094 if (context == NULL) { 4095 DC_ERROR("Failed to allocate new validate context!\n"); 4096 return; 4097 } 4098 4099 dc_resource_state_copy_construct(state, context); 4100 4101 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4102 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4103 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4104 4105 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4106 new_pipe->plane_state->force_full_update = true; 4107 } 4108 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) { 4109 /* 4110 * Previous frame finished and HW is ready for optimization. 4111 * 4112 * Only relevant for DCN behavior where we can guarantee the optimization 4113 * is safe to apply - retain the legacy behavior for DCE. 4114 */ 4115 dc_post_update_surfaces_to_stream(dc); 4116 } 4117 4118 4119 for (i = 0; i < surface_count; i++) { 4120 struct dc_plane_state *surface = srf_updates[i].surface; 4121 4122 copy_surface_update_to_plane(surface, &srf_updates[i]); 4123 4124 if (update_type >= UPDATE_TYPE_MED) { 4125 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4126 struct pipe_ctx *pipe_ctx = 4127 &context->res_ctx.pipe_ctx[j]; 4128 4129 if (pipe_ctx->plane_state != surface) 4130 continue; 4131 4132 resource_build_scaling_params(pipe_ctx); 4133 } 4134 } 4135 } 4136 4137 copy_stream_update_to_stream(dc, context, stream, stream_update); 4138 4139 if (update_type >= UPDATE_TYPE_FULL) { 4140 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4141 DC_ERROR("Mode validation failed for stream update!\n"); 4142 dc_release_state(context); 4143 return; 4144 } 4145 } 4146 4147 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4148 4149 commit_planes_for_stream( 4150 dc, 4151 srf_updates, 4152 surface_count, 4153 stream, 4154 stream_update, 4155 update_type, 4156 context); 4157 /*update current_State*/ 4158 if (dc->current_state != context) { 4159 4160 struct dc_state *old = dc->current_state; 4161 4162 dc->current_state = context; 4163 dc_release_state(old); 4164 4165 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4166 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4167 4168 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4169 pipe_ctx->plane_state->force_full_update = false; 4170 } 4171 } 4172 4173 /* Legacy optimization path for DCE. */ 4174 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4175 dc_post_update_surfaces_to_stream(dc); 4176 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4177 } 4178 4179 return; 4180 4181 } 4182 4183 uint8_t dc_get_current_stream_count(struct dc *dc) 4184 { 4185 return dc->current_state->stream_count; 4186 } 4187 4188 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4189 { 4190 if (i < dc->current_state->stream_count) 4191 return dc->current_state->streams[i]; 4192 return NULL; 4193 } 4194 4195 enum dc_irq_source dc_interrupt_to_irq_source( 4196 struct dc *dc, 4197 uint32_t src_id, 4198 uint32_t ext_id) 4199 { 4200 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4201 } 4202 4203 /* 4204 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4205 */ 4206 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4207 { 4208 4209 if (dc == NULL) 4210 return false; 4211 4212 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4213 } 4214 4215 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4216 { 4217 dal_irq_service_ack(dc->res_pool->irqs, src); 4218 } 4219 4220 void dc_power_down_on_boot(struct dc *dc) 4221 { 4222 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4223 dc->hwss.power_down_on_boot) 4224 dc->hwss.power_down_on_boot(dc); 4225 } 4226 4227 void dc_set_power_state( 4228 struct dc *dc, 4229 enum dc_acpi_cm_power_state power_state) 4230 { 4231 struct kref refcount; 4232 struct display_mode_lib *dml; 4233 4234 if (!dc->current_state) 4235 return; 4236 4237 switch (power_state) { 4238 case DC_ACPI_CM_POWER_STATE_D0: 4239 dc_resource_state_construct(dc, dc->current_state); 4240 4241 dc_z10_restore(dc); 4242 4243 if (dc->ctx->dmub_srv) 4244 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); 4245 4246 dc->hwss.init_hw(dc); 4247 4248 if (dc->hwss.init_sys_ctx != NULL && 4249 dc->vm_pa_config.valid) { 4250 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4251 } 4252 4253 break; 4254 default: 4255 ASSERT(dc->current_state->stream_count == 0); 4256 /* Zero out the current context so that on resume we start with 4257 * clean state, and dc hw programming optimizations will not 4258 * cause any trouble. 4259 */ 4260 dml = kzalloc(sizeof(struct display_mode_lib), 4261 GFP_KERNEL); 4262 4263 ASSERT(dml); 4264 if (!dml) 4265 return; 4266 4267 /* Preserve refcount */ 4268 refcount = dc->current_state->refcount; 4269 /* Preserve display mode lib */ 4270 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 4271 4272 dc_resource_state_destruct(dc->current_state); 4273 memset(dc->current_state, 0, 4274 sizeof(*dc->current_state)); 4275 4276 dc->current_state->refcount = refcount; 4277 dc->current_state->bw_ctx.dml = *dml; 4278 4279 kfree(dml); 4280 4281 break; 4282 } 4283 } 4284 4285 void dc_resume(struct dc *dc) 4286 { 4287 uint32_t i; 4288 4289 for (i = 0; i < dc->link_count; i++) 4290 core_link_resume(dc->links[i]); 4291 } 4292 4293 bool dc_is_dmcu_initialized(struct dc *dc) 4294 { 4295 struct dmcu *dmcu = dc->res_pool->dmcu; 4296 4297 if (dmcu) 4298 return dmcu->funcs->is_dmcu_initialized(dmcu); 4299 return false; 4300 } 4301 4302 bool dc_is_oem_i2c_device_present( 4303 struct dc *dc, 4304 size_t slave_address) 4305 { 4306 if (dc->res_pool->oem_device) 4307 return dce_i2c_oem_device_present( 4308 dc->res_pool, 4309 dc->res_pool->oem_device, 4310 slave_address); 4311 4312 return false; 4313 } 4314 4315 bool dc_submit_i2c( 4316 struct dc *dc, 4317 uint32_t link_index, 4318 struct i2c_command *cmd) 4319 { 4320 4321 struct dc_link *link = dc->links[link_index]; 4322 struct ddc_service *ddc = link->ddc; 4323 return dce_i2c_submit_command( 4324 dc->res_pool, 4325 ddc->ddc_pin, 4326 cmd); 4327 } 4328 4329 bool dc_submit_i2c_oem( 4330 struct dc *dc, 4331 struct i2c_command *cmd) 4332 { 4333 struct ddc_service *ddc = dc->res_pool->oem_device; 4334 if (ddc) 4335 return dce_i2c_submit_command( 4336 dc->res_pool, 4337 ddc->ddc_pin, 4338 cmd); 4339 4340 return false; 4341 } 4342 4343 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) 4344 { 4345 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { 4346 BREAK_TO_DEBUGGER(); 4347 return false; 4348 } 4349 4350 dc_sink_retain(sink); 4351 4352 dc_link->remote_sinks[dc_link->sink_count] = sink; 4353 dc_link->sink_count++; 4354 4355 return true; 4356 } 4357 4358 /* 4359 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link 4360 * 4361 * EDID length is in bytes 4362 */ 4363 struct dc_sink *dc_link_add_remote_sink( 4364 struct dc_link *link, 4365 const uint8_t *edid, 4366 int len, 4367 struct dc_sink_init_data *init_data) 4368 { 4369 struct dc_sink *dc_sink; 4370 enum dc_edid_status edid_status; 4371 4372 if (len > DC_MAX_EDID_BUFFER_SIZE) { 4373 dm_error("Max EDID buffer size breached!\n"); 4374 return NULL; 4375 } 4376 4377 if (!init_data) { 4378 BREAK_TO_DEBUGGER(); 4379 return NULL; 4380 } 4381 4382 if (!init_data->link) { 4383 BREAK_TO_DEBUGGER(); 4384 return NULL; 4385 } 4386 4387 dc_sink = dc_sink_create(init_data); 4388 4389 if (!dc_sink) 4390 return NULL; 4391 4392 memmove(dc_sink->dc_edid.raw_edid, edid, len); 4393 dc_sink->dc_edid.length = len; 4394 4395 if (!link_add_remote_sink_helper( 4396 link, 4397 dc_sink)) 4398 goto fail_add_sink; 4399 4400 edid_status = dm_helpers_parse_edid_caps( 4401 link, 4402 &dc_sink->dc_edid, 4403 &dc_sink->edid_caps); 4404 4405 /* 4406 * Treat device as no EDID device if EDID 4407 * parsing fails 4408 */ 4409 if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) { 4410 dc_sink->dc_edid.length = 0; 4411 dm_error("Bad EDID, status%d!\n", edid_status); 4412 } 4413 4414 return dc_sink; 4415 4416 fail_add_sink: 4417 dc_sink_release(dc_sink); 4418 return NULL; 4419 } 4420 4421 /* 4422 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link 4423 * 4424 * Note that this just removes the struct dc_sink - it doesn't 4425 * program hardware or alter other members of dc_link 4426 */ 4427 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) 4428 { 4429 int i; 4430 4431 if (!link->sink_count) { 4432 BREAK_TO_DEBUGGER(); 4433 return; 4434 } 4435 4436 for (i = 0; i < link->sink_count; i++) { 4437 if (link->remote_sinks[i] == sink) { 4438 dc_sink_release(sink); 4439 link->remote_sinks[i] = NULL; 4440 4441 /* shrink array to remove empty place */ 4442 while (i < link->sink_count - 1) { 4443 link->remote_sinks[i] = link->remote_sinks[i+1]; 4444 i++; 4445 } 4446 link->remote_sinks[i] = NULL; 4447 link->sink_count--; 4448 return; 4449 } 4450 } 4451 } 4452 4453 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4454 { 4455 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4456 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4457 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4458 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4459 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4460 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4461 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4462 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4463 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4464 } 4465 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4466 { 4467 if (dc->hwss.set_clock) 4468 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4469 return DC_ERROR_UNEXPECTED; 4470 } 4471 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4472 { 4473 if (dc->hwss.get_clock) 4474 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4475 } 4476 4477 /* enable/disable eDP PSR without specify stream for eDP */ 4478 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4479 { 4480 int i; 4481 bool allow_active; 4482 4483 for (i = 0; i < dc->current_state->stream_count ; i++) { 4484 struct dc_link *link; 4485 struct dc_stream_state *stream = dc->current_state->streams[i]; 4486 4487 link = stream->link; 4488 if (!link) 4489 continue; 4490 4491 if (link->psr_settings.psr_feature_enabled) { 4492 if (enable && !link->psr_settings.psr_allow_active) { 4493 allow_active = true; 4494 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4495 return false; 4496 } else if (!enable && link->psr_settings.psr_allow_active) { 4497 allow_active = false; 4498 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4499 return false; 4500 } 4501 } 4502 } 4503 4504 return true; 4505 } 4506 4507 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4508 { 4509 if (dc->debug.disable_idle_power_optimizations) 4510 return; 4511 4512 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4513 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4514 return; 4515 4516 if (allow == dc->idle_optimizations_allowed) 4517 return; 4518 4519 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 4520 dc->idle_optimizations_allowed = allow; 4521 } 4522 4523 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4524 void dc_unlock_memory_clock_frequency(struct dc *dc) 4525 { 4526 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4527 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4528 4529 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4530 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4531 } 4532 4533 /* set min memory clock to the min required for current mode, max to maxDPM */ 4534 void dc_lock_memory_clock_frequency(struct dc *dc) 4535 { 4536 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4537 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4538 4539 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4540 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4541 4542 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4543 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4544 } 4545 4546 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4547 { 4548 struct dc_state *context = dc->current_state; 4549 struct hubp *hubp; 4550 struct pipe_ctx *pipe; 4551 int i; 4552 4553 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4554 pipe = &context->res_ctx.pipe_ctx[i]; 4555 4556 if (pipe->stream != NULL) { 4557 dc->hwss.disable_pixel_data(dc, pipe, true); 4558 4559 // wait for double buffer 4560 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4561 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4562 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4563 4564 hubp = pipe->plane_res.hubp; 4565 hubp->funcs->set_blank_regs(hubp, true); 4566 } 4567 } 4568 4569 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4570 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4571 4572 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4573 pipe = &context->res_ctx.pipe_ctx[i]; 4574 4575 if (pipe->stream != NULL) { 4576 dc->hwss.disable_pixel_data(dc, pipe, false); 4577 4578 hubp = pipe->plane_res.hubp; 4579 hubp->funcs->set_blank_regs(hubp, false); 4580 } 4581 } 4582 } 4583 4584 4585 /** 4586 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4587 * @dc: pointer to dc of the dm calling this 4588 * @enable: True = transition to DC mode, false = transition back to AC mode 4589 * 4590 * Some SoCs define additional clock limits when in DC mode, DM should 4591 * invoke this function when the platform undergoes a power source transition 4592 * so DC can apply/unapply the limit. This interface may be disruptive to 4593 * the onscreen content. 4594 * 4595 * Context: Triggered by OS through DM interface, or manually by escape calls. 4596 * Need to hold a dclock when doing so. 4597 * 4598 * Return: none (void function) 4599 * 4600 */ 4601 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4602 { 4603 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 4604 unsigned int softMax, maxDPM, funcMin; 4605 bool p_state_change_support; 4606 4607 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) 4608 return; 4609 4610 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4611 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; 4612 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4613 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4614 4615 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4616 if (p_state_change_support) { 4617 if (funcMin <= softMax) 4618 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4619 // else: No-Op 4620 } else { 4621 if (funcMin <= softMax) 4622 blank_and_force_memclk(dc, true, softMax); 4623 // else: No-Op 4624 } 4625 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4626 if (p_state_change_support) { 4627 if (funcMin <= softMax) 4628 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4629 // else: No-Op 4630 } else { 4631 if (funcMin <= softMax) 4632 blank_and_force_memclk(dc, true, maxDPM); 4633 // else: No-Op 4634 } 4635 } 4636 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4637 } 4638 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4639 struct dc_cursor_attributes *cursor_attr) 4640 { 4641 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4642 return true; 4643 return false; 4644 } 4645 4646 /* cleanup on driver unload */ 4647 void dc_hardware_release(struct dc *dc) 4648 { 4649 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4650 4651 if (dc->hwss.hardware_release) 4652 dc->hwss.hardware_release(dc); 4653 } 4654 4655 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4656 { 4657 if (dc->current_state) 4658 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4659 } 4660 4661 /** 4662 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 4663 * 4664 * @dc: [in] dc structure 4665 * 4666 * Checks whether DMUB FW supports outbox notifications, if supported DM 4667 * should register outbox interrupt prior to actually enabling interrupts 4668 * via dc_enable_dmub_outbox 4669 * 4670 * Return: 4671 * True if DMUB FW supports outbox notifications, False otherwise 4672 */ 4673 bool dc_is_dmub_outbox_supported(struct dc *dc) 4674 { 4675 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 4676 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 4677 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 4678 !dc->debug.dpia_debug.bits.disable_dpia) 4679 return true; 4680 4681 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && 4682 !dc->debug.dpia_debug.bits.disable_dpia) 4683 return true; 4684 4685 /* dmub aux needs dmub notifications to be enabled */ 4686 return dc->debug.enable_dmub_aux_for_legacy_ddc; 4687 } 4688 4689 /** 4690 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 4691 * 4692 * @dc: [in] dc structure 4693 * 4694 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 4695 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 4696 * API shall be removed after switching. 4697 * 4698 * Return: 4699 * True if DMUB FW supports outbox notifications, False otherwise 4700 */ 4701 bool dc_enable_dmub_notifications(struct dc *dc) 4702 { 4703 return dc_is_dmub_outbox_supported(dc); 4704 } 4705 4706 /** 4707 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 4708 * 4709 * @dc: [in] dc structure 4710 * 4711 * Enables DMUB unsolicited notifications to x86 via outbox. 4712 */ 4713 void dc_enable_dmub_outbox(struct dc *dc) 4714 { 4715 struct dc_context *dc_ctx = dc->ctx; 4716 4717 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 4718 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 4719 } 4720 4721 /** 4722 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 4723 * Sets port index appropriately for legacy DDC 4724 * @dc: dc structure 4725 * @link_index: link index 4726 * @payload: aux payload 4727 * 4728 * Returns: True if successful, False if failure 4729 */ 4730 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 4731 uint32_t link_index, 4732 struct aux_payload *payload) 4733 { 4734 uint8_t action; 4735 union dmub_rb_cmd cmd = {0}; 4736 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4737 4738 ASSERT(payload->length <= 16); 4739 4740 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 4741 cmd.dp_aux_access.header.payload_bytes = 0; 4742 /* For dpia, ddc_pin is set to NULL */ 4743 if (!dc->links[link_index]->ddc->ddc_pin) 4744 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 4745 else 4746 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 4747 4748 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 4749 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 4750 cmd.dp_aux_access.aux_control.timeout = 0; 4751 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 4752 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 4753 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 4754 4755 /* set aux action */ 4756 if (payload->i2c_over_aux) { 4757 if (payload->write) { 4758 if (payload->mot) 4759 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 4760 else 4761 action = DP_AUX_REQ_ACTION_I2C_WRITE; 4762 } else { 4763 if (payload->mot) 4764 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 4765 else 4766 action = DP_AUX_REQ_ACTION_I2C_READ; 4767 } 4768 } else { 4769 if (payload->write) 4770 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 4771 else 4772 action = DP_AUX_REQ_ACTION_DPCD_READ; 4773 } 4774 4775 cmd.dp_aux_access.aux_control.dpaux.action = action; 4776 4777 if (payload->length && payload->write) { 4778 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 4779 payload->data, 4780 payload->length 4781 ); 4782 } 4783 4784 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4785 dc_dmub_srv_cmd_execute(dmub_srv); 4786 dc_dmub_srv_wait_idle(dmub_srv); 4787 4788 return true; 4789 } 4790 4791 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 4792 uint8_t dpia_port_index) 4793 { 4794 uint8_t index, link_index = 0xFF; 4795 4796 for (index = 0; index < dc->link_count; index++) { 4797 /* ddc_hw_inst has dpia port index for dpia links 4798 * and ddc instance for legacy links 4799 */ 4800 if (!dc->links[index]->ddc->ddc_pin) { 4801 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 4802 link_index = index; 4803 break; 4804 } 4805 } 4806 } 4807 ASSERT(link_index != 0xFF); 4808 return link_index; 4809 } 4810 4811 /** 4812 * dc_process_dmub_set_config_async - Submits set_config command 4813 * 4814 * @dc: [in] dc structure 4815 * @link_index: [in] link_index: link index 4816 * @payload: [in] aux payload 4817 * @notify: [out] set_config immediate reply 4818 * 4819 * Submits set_config command to dmub via inbox message. 4820 * 4821 * Return: 4822 * True if successful, False if failure 4823 */ 4824 bool dc_process_dmub_set_config_async(struct dc *dc, 4825 uint32_t link_index, 4826 struct set_config_cmd_payload *payload, 4827 struct dmub_notification *notify) 4828 { 4829 union dmub_rb_cmd cmd = {0}; 4830 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4831 bool is_cmd_complete = true; 4832 4833 /* prepare SET_CONFIG command */ 4834 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 4835 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 4836 4837 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 4838 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 4839 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 4840 4841 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { 4842 /* command is not processed by dmub */ 4843 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 4844 return is_cmd_complete; 4845 } 4846 4847 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 4848 if (cmd.set_config_access.header.ret_status == 1) 4849 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 4850 else 4851 /* cmd pending, will receive notification via outbox */ 4852 is_cmd_complete = false; 4853 4854 return is_cmd_complete; 4855 } 4856 4857 /** 4858 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 4859 * 4860 * @dc: [in] dc structure 4861 * @link_index: [in] link index 4862 * @mst_alloc_slots: [in] mst slots to be allotted 4863 * @mst_slots_in_use: [out] mst slots in use returned in failure case 4864 * 4865 * Submits mst slot allocation command to dmub via inbox message 4866 * 4867 * Return: 4868 * DC_OK if successful, DC_ERROR if failure 4869 */ 4870 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 4871 uint32_t link_index, 4872 uint8_t mst_alloc_slots, 4873 uint8_t *mst_slots_in_use) 4874 { 4875 union dmub_rb_cmd cmd = {0}; 4876 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4877 4878 /* prepare MST_ALLOC_SLOTS command */ 4879 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 4880 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 4881 4882 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 4883 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 4884 4885 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) 4886 /* command is not processed by dmub */ 4887 return DC_ERROR_UNEXPECTED; 4888 4889 /* command processed by dmub, if ret_status is 1 */ 4890 if (cmd.set_config_access.header.ret_status != 1) 4891 /* command processing error */ 4892 return DC_ERROR_UNEXPECTED; 4893 4894 /* command processed and we have a status of 2, mst not enabled in dpia */ 4895 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 4896 return DC_FAIL_UNSUPPORTED_1; 4897 4898 /* previously configured mst alloc and used slots did not match */ 4899 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 4900 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 4901 return DC_NOT_SUPPORTED; 4902 } 4903 4904 return DC_OK; 4905 } 4906 4907 /** 4908 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 4909 * 4910 * @dc: [in] dc structure 4911 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 4912 * 4913 * Submits dpia hpd int enable command to dmub via inbox message 4914 */ 4915 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 4916 uint32_t hpd_int_enable) 4917 { 4918 union dmub_rb_cmd cmd = {0}; 4919 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4920 4921 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 4922 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 4923 4924 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4925 dc_dmub_srv_cmd_execute(dmub_srv); 4926 dc_dmub_srv_wait_idle(dmub_srv); 4927 4928 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 4929 } 4930 4931 /** 4932 * dc_disable_accelerated_mode - disable accelerated mode 4933 * @dc: dc structure 4934 */ 4935 void dc_disable_accelerated_mode(struct dc *dc) 4936 { 4937 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 4938 } 4939 4940 4941 /** 4942 * dc_notify_vsync_int_state - notifies vsync enable/disable state 4943 * @dc: dc structure 4944 * @stream: stream where vsync int state changed 4945 * @enable: whether vsync is enabled or disabled 4946 * 4947 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 4948 * interrupts after steady state is reached. 4949 */ 4950 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 4951 { 4952 int i; 4953 int edp_num; 4954 struct pipe_ctx *pipe = NULL; 4955 struct dc_link *link = stream->sink->link; 4956 struct dc_link *edp_links[MAX_NUM_EDP]; 4957 4958 4959 if (link->psr_settings.psr_feature_enabled) 4960 return; 4961 4962 /*find primary pipe associated with stream*/ 4963 for (i = 0; i < MAX_PIPES; i++) { 4964 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4965 4966 if (pipe->stream == stream && pipe->stream_res.tg) 4967 break; 4968 } 4969 4970 if (i == MAX_PIPES) { 4971 ASSERT(0); 4972 return; 4973 } 4974 4975 get_edp_links(dc, edp_links, &edp_num); 4976 4977 /* Determine panel inst */ 4978 for (i = 0; i < edp_num; i++) { 4979 if (edp_links[i] == link) 4980 break; 4981 } 4982 4983 if (i == edp_num) { 4984 return; 4985 } 4986 4987 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 4988 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 4989 } 4990 4991 /** 4992 * dc_extended_blank_supported - Decide whether extended blank is supported 4993 * 4994 * @dc: [in] Current DC state 4995 * 4996 * Extended blank is a freesync optimization feature to be enabled in the 4997 * future. During the extra vblank period gained from freesync, we have the 4998 * ability to enter z9/z10. 4999 * 5000 * Return: 5001 * Indicate whether extended blank is supported (%true or %false) 5002 */ 5003 bool dc_extended_blank_supported(struct dc *dc) 5004 { 5005 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 5006 && dc->caps.zstate_support && dc->caps.is_apu; 5007 } 5008