1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "gpio_service_interface.h" 37 #include "clk_mgr.h" 38 #include "clock_source.h" 39 #include "dc_bios_types.h" 40 41 #include "bios_parser_interface.h" 42 #include "bios/bios_parser_helper.h" 43 #include "include/irq_service_interface.h" 44 #include "transform.h" 45 #include "dmcu.h" 46 #include "dpp.h" 47 #include "timing_generator.h" 48 #include "abm.h" 49 #include "virtual/virtual_link_encoder.h" 50 #include "hubp.h" 51 52 #include "link_hwss.h" 53 #include "link_encoder.h" 54 #include "link_enc_cfg.h" 55 56 #include "link.h" 57 #include "dm_helpers.h" 58 #include "mem_input.h" 59 60 #include "dc_dmub_srv.h" 61 62 #include "dsc.h" 63 64 #include "vm_helper.h" 65 66 #include "dce/dce_i2c.h" 67 68 #include "dmub/dmub_srv.h" 69 70 #include "dce/dmub_psr.h" 71 72 #include "dce/dmub_hw_lock_mgr.h" 73 74 #include "dc_trace.h" 75 76 #include "hw_sequencer_private.h" 77 78 #include "dce/dmub_outbox.h" 79 80 #define CTX \ 81 dc->ctx 82 83 #define DC_LOGGER \ 84 dc->ctx->logger 85 86 static const char DC_BUILD_ID[] = "production-build"; 87 88 /** 89 * DOC: Overview 90 * 91 * DC is the OS-agnostic component of the amdgpu DC driver. 92 * 93 * DC maintains and validates a set of structs representing the state of the 94 * driver and writes that state to AMD hardware 95 * 96 * Main DC HW structs: 97 * 98 * struct dc - The central struct. One per driver. Created on driver load, 99 * destroyed on driver unload. 100 * 101 * struct dc_context - One per driver. 102 * Used as a backpointer by most other structs in dc. 103 * 104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 105 * plugpoints). Created on driver load, destroyed on driver unload. 106 * 107 * struct dc_sink - One per display. Created on boot or hotplug. 108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 109 * (the display directly attached). It may also have one or more remote 110 * sinks (in the Multi-Stream Transport case) 111 * 112 * struct resource_pool - One per driver. Represents the hw blocks not in the 113 * main pipeline. Not directly accessible by dm. 114 * 115 * Main dc state structs: 116 * 117 * These structs can be created and destroyed as needed. There is a full set of 118 * these structs in dc->current_state representing the currently programmed state. 119 * 120 * struct dc_state - The global DC state to track global state information, 121 * such as bandwidth values. 122 * 123 * struct dc_stream_state - Represents the hw configuration for the pipeline from 124 * a framebuffer to a display. Maps one-to-one with dc_sink. 125 * 126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 127 * and may have more in the Multi-Plane Overlay case. 128 * 129 * struct resource_context - Represents the programmable state of everything in 130 * the resource_pool. Not directly accessible by dm. 131 * 132 * struct pipe_ctx - A member of struct resource_context. Represents the 133 * internal hardware pipeline components. Each dc_plane_state has either 134 * one or two (in the pipe-split case). 135 */ 136 137 /* Private functions */ 138 139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 140 { 141 if (new > *original) 142 *original = new; 143 } 144 145 static void destroy_links(struct dc *dc) 146 { 147 uint32_t i; 148 149 for (i = 0; i < dc->link_count; i++) { 150 if (NULL != dc->links[i]) 151 dc->link_srv->destroy_link(&dc->links[i]); 152 } 153 } 154 155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 156 { 157 int i; 158 uint32_t count = 0; 159 160 for (i = 0; i < num_links; i++) { 161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 162 links[i]->is_internal_display) 163 count++; 164 } 165 166 return count; 167 } 168 169 static int get_seamless_boot_stream_count(struct dc_state *ctx) 170 { 171 uint8_t i; 172 uint8_t seamless_boot_stream_count = 0; 173 174 for (i = 0; i < ctx->stream_count; i++) 175 if (ctx->streams[i]->apply_seamless_boot_optimization) 176 seamless_boot_stream_count++; 177 178 return seamless_boot_stream_count; 179 } 180 181 static bool create_links( 182 struct dc *dc, 183 uint32_t num_virtual_links) 184 { 185 int i; 186 int connectors_num; 187 struct dc_bios *bios = dc->ctx->dc_bios; 188 189 dc->link_count = 0; 190 191 connectors_num = bios->funcs->get_connectors_number(bios); 192 193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 194 195 if (connectors_num > ENUM_ID_COUNT) { 196 dm_error( 197 "DC: Number of connectors %d exceeds maximum of %d!\n", 198 connectors_num, 199 ENUM_ID_COUNT); 200 return false; 201 } 202 203 dm_output_to_console( 204 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 205 __func__, 206 connectors_num, 207 num_virtual_links); 208 209 for (i = 0; i < connectors_num; i++) { 210 struct link_init_data link_init_params = {0}; 211 struct dc_link *link; 212 213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 214 215 link_init_params.ctx = dc->ctx; 216 /* next BIOS object table connector */ 217 link_init_params.connector_index = i; 218 link_init_params.link_index = dc->link_count; 219 link_init_params.dc = dc; 220 link = dc->link_srv->create_link(&link_init_params); 221 222 if (link) { 223 dc->links[dc->link_count] = link; 224 link->dc = dc; 225 ++dc->link_count; 226 } 227 } 228 229 DC_LOG_DC("BIOS object table - end"); 230 231 /* Create a link for each usb4 dpia port */ 232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 233 struct link_init_data link_init_params = {0}; 234 struct dc_link *link; 235 236 link_init_params.ctx = dc->ctx; 237 link_init_params.connector_index = i; 238 link_init_params.link_index = dc->link_count; 239 link_init_params.dc = dc; 240 link_init_params.is_dpia_link = true; 241 242 link = dc->link_srv->create_link(&link_init_params); 243 if (link) { 244 dc->links[dc->link_count] = link; 245 link->dc = dc; 246 ++dc->link_count; 247 } 248 } 249 250 for (i = 0; i < num_virtual_links; i++) { 251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 252 struct encoder_init_data enc_init = {0}; 253 254 if (link == NULL) { 255 BREAK_TO_DEBUGGER(); 256 goto failed_alloc; 257 } 258 259 link->link_index = dc->link_count; 260 dc->links[dc->link_count] = link; 261 dc->link_count++; 262 263 link->ctx = dc->ctx; 264 link->dc = dc; 265 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 266 link->link_id.type = OBJECT_TYPE_CONNECTOR; 267 link->link_id.id = CONNECTOR_ID_VIRTUAL; 268 link->link_id.enum_id = ENUM_ID_1; 269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 270 271 if (!link->link_enc) { 272 BREAK_TO_DEBUGGER(); 273 goto failed_alloc; 274 } 275 276 link->link_status.dpcd_caps = &link->dpcd_caps; 277 278 enc_init.ctx = dc->ctx; 279 enc_init.channel = CHANNEL_ID_UNKNOWN; 280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 281 enc_init.transmitter = TRANSMITTER_UNKNOWN; 282 enc_init.connector = link->link_id; 283 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 285 enc_init.encoder.enum_id = ENUM_ID_1; 286 virtual_link_encoder_construct(link->link_enc, &enc_init); 287 } 288 289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 290 291 return true; 292 293 failed_alloc: 294 return false; 295 } 296 297 /* Create additional DIG link encoder objects if fewer than the platform 298 * supports were created during link construction. This can happen if the 299 * number of physical connectors is less than the number of DIGs. 300 */ 301 static bool create_link_encoders(struct dc *dc) 302 { 303 bool res = true; 304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 306 int i; 307 308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 309 * link encoders and physical display endpoints and does not require 310 * additional link encoder objects. 311 */ 312 if (num_usb4_dpia == 0) 313 return res; 314 315 /* Create as many link encoder objects as the platform supports. DPIA 316 * endpoints can be programmably mapped to any DIG. 317 */ 318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 319 for (i = 0; i < num_dig_link_enc; i++) { 320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 321 322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 324 (enum engine_id)(ENGINE_ID_DIGA + i)); 325 if (link_enc) { 326 dc->res_pool->link_encoders[i] = link_enc; 327 dc->res_pool->dig_link_enc_count++; 328 } else { 329 res = false; 330 } 331 } 332 } 333 } 334 335 return res; 336 } 337 338 /* Destroy any additional DIG link encoder objects created by 339 * create_link_encoders(). 340 * NB: Must only be called after destroy_links(). 341 */ 342 static void destroy_link_encoders(struct dc *dc) 343 { 344 unsigned int num_usb4_dpia; 345 unsigned int num_dig_link_enc; 346 int i; 347 348 if (!dc->res_pool) 349 return; 350 351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 353 354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 355 * link encoders and physical display endpoints and does not require 356 * additional link encoder objects. 357 */ 358 if (num_usb4_dpia == 0) 359 return; 360 361 for (i = 0; i < num_dig_link_enc; i++) { 362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 363 364 if (link_enc) { 365 link_enc->funcs->destroy(&link_enc); 366 dc->res_pool->link_encoders[i] = NULL; 367 dc->res_pool->dig_link_enc_count--; 368 } 369 } 370 } 371 372 static struct dc_perf_trace *dc_perf_trace_create(void) 373 { 374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 375 } 376 377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 378 { 379 kfree(*perf_trace); 380 *perf_trace = NULL; 381 } 382 383 /** 384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 385 * @dc: dc reference 386 * @stream: Initial dc stream state 387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 388 * 389 * Looks up the pipe context of dc_stream_state and updates the 390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 391 * Rate, which is a power-saving feature that targets reducing panel 392 * refresh rate while the screen is static 393 * 394 * Return: %true if the pipe context is found and adjusted; 395 * %false if the pipe context is not found. 396 */ 397 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 398 struct dc_stream_state *stream, 399 struct dc_crtc_timing_adjust *adjust) 400 { 401 int i; 402 403 /* 404 * Don't adjust DRR while there's bandwidth optimizations pending to 405 * avoid conflicting with firmware updates. 406 */ 407 if (dc->ctx->dce_version > DCE_VERSION_MAX) 408 if (dc->optimized_required || dc->wm_optimized_required) 409 return false; 410 411 stream->adjust.v_total_max = adjust->v_total_max; 412 stream->adjust.v_total_mid = adjust->v_total_mid; 413 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 414 stream->adjust.v_total_min = adjust->v_total_min; 415 416 for (i = 0; i < MAX_PIPES; i++) { 417 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 418 419 if (pipe->stream == stream && pipe->stream_res.tg) { 420 dc->hwss.set_drr(&pipe, 421 1, 422 *adjust); 423 424 return true; 425 } 426 } 427 return false; 428 } 429 430 /** 431 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 432 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 433 * 434 * @dc: [in] dc reference 435 * @stream: [in] Initial dc stream state 436 * @refresh_rate: [in] new refresh_rate 437 * 438 * Return: %true if the pipe context is found and there is an associated 439 * timing_generator for the DC; 440 * %false if the pipe context is not found or there is no 441 * timing_generator for the DC. 442 */ 443 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 444 struct dc_stream_state *stream, 445 uint32_t *refresh_rate) 446 { 447 bool status = false; 448 449 int i = 0; 450 451 for (i = 0; i < MAX_PIPES; i++) { 452 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 453 454 if (pipe->stream == stream && pipe->stream_res.tg) { 455 /* Only execute if a function pointer has been defined for 456 * the DC version in question 457 */ 458 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 459 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 460 461 status = true; 462 463 break; 464 } 465 } 466 } 467 468 return status; 469 } 470 471 bool dc_stream_get_crtc_position(struct dc *dc, 472 struct dc_stream_state **streams, int num_streams, 473 unsigned int *v_pos, unsigned int *nom_v_pos) 474 { 475 /* TODO: Support multiple streams */ 476 const struct dc_stream_state *stream = streams[0]; 477 int i; 478 bool ret = false; 479 struct crtc_position position; 480 481 for (i = 0; i < MAX_PIPES; i++) { 482 struct pipe_ctx *pipe = 483 &dc->current_state->res_ctx.pipe_ctx[i]; 484 485 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 486 dc->hwss.get_position(&pipe, 1, &position); 487 488 *v_pos = position.vertical_count; 489 *nom_v_pos = position.nominal_vcount; 490 ret = true; 491 } 492 } 493 return ret; 494 } 495 496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 497 static inline void 498 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 499 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 500 { 501 union dmub_rb_cmd cmd = {0}; 502 503 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 504 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 505 506 if (is_stop) { 507 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 508 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 509 } else { 510 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 511 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 512 cmd.secure_display.roi_info.x_start = rect->x; 513 cmd.secure_display.roi_info.y_start = rect->y; 514 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 515 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 516 } 517 518 dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 519 } 520 521 static inline void 522 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 523 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 524 { 525 if (is_stop) 526 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 527 else 528 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 529 } 530 531 bool 532 dc_stream_forward_crc_window(struct dc_stream_state *stream, 533 struct rect *rect, bool is_stop) 534 { 535 struct dmcu *dmcu; 536 struct dc_dmub_srv *dmub_srv; 537 struct otg_phy_mux mux_mapping; 538 struct pipe_ctx *pipe; 539 int i; 540 struct dc *dc = stream->ctx->dc; 541 542 for (i = 0; i < MAX_PIPES; i++) { 543 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 544 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 545 break; 546 } 547 548 /* Stream not found */ 549 if (i == MAX_PIPES) 550 return false; 551 552 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 553 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 554 555 dmcu = dc->res_pool->dmcu; 556 dmub_srv = dc->ctx->dmub_srv; 557 558 /* forward to dmub */ 559 if (dmub_srv) 560 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 561 /* forward to dmcu */ 562 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 563 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 564 else 565 return false; 566 567 return true; 568 } 569 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 570 571 /** 572 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 573 * @dc: DC Object 574 * @stream: The stream to configure CRC on. 575 * @enable: Enable CRC if true, disable otherwise. 576 * @crc_window: CRC window (x/y start/end) information 577 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 578 * once. 579 * 580 * By default, only CRC0 is configured, and the entire frame is used to 581 * calculate the CRC. 582 * 583 * Return: %false if the stream is not found or CRC capture is not supported; 584 * %true if the stream has been configured. 585 */ 586 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 587 struct crc_params *crc_window, bool enable, bool continuous) 588 { 589 struct pipe_ctx *pipe; 590 struct crc_params param; 591 struct timing_generator *tg; 592 593 pipe = resource_get_otg_master_for_stream( 594 &dc->current_state->res_ctx, stream); 595 596 /* Stream not found */ 597 if (pipe == NULL) 598 return false; 599 600 /* By default, capture the full frame */ 601 param.windowa_x_start = 0; 602 param.windowa_y_start = 0; 603 param.windowa_x_end = pipe->stream->timing.h_addressable; 604 param.windowa_y_end = pipe->stream->timing.v_addressable; 605 param.windowb_x_start = 0; 606 param.windowb_y_start = 0; 607 param.windowb_x_end = pipe->stream->timing.h_addressable; 608 param.windowb_y_end = pipe->stream->timing.v_addressable; 609 610 if (crc_window) { 611 param.windowa_x_start = crc_window->windowa_x_start; 612 param.windowa_y_start = crc_window->windowa_y_start; 613 param.windowa_x_end = crc_window->windowa_x_end; 614 param.windowa_y_end = crc_window->windowa_y_end; 615 param.windowb_x_start = crc_window->windowb_x_start; 616 param.windowb_y_start = crc_window->windowb_y_start; 617 param.windowb_x_end = crc_window->windowb_x_end; 618 param.windowb_y_end = crc_window->windowb_y_end; 619 } 620 621 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 622 param.odm_mode = pipe->next_odm_pipe ? 1:0; 623 624 /* Default to the union of both windows */ 625 param.selection = UNION_WINDOW_A_B; 626 param.continuous_mode = continuous; 627 param.enable = enable; 628 629 tg = pipe->stream_res.tg; 630 631 /* Only call if supported */ 632 if (tg->funcs->configure_crc) 633 return tg->funcs->configure_crc(tg, ¶m); 634 DC_LOG_WARNING("CRC capture not supported."); 635 return false; 636 } 637 638 /** 639 * dc_stream_get_crc() - Get CRC values for the given stream. 640 * 641 * @dc: DC object. 642 * @stream: The DC stream state of the stream to get CRCs from. 643 * @r_cr: CRC value for the red component. 644 * @g_y: CRC value for the green component. 645 * @b_cb: CRC value for the blue component. 646 * 647 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 648 * 649 * Return: 650 * %false if stream is not found, or if CRCs are not enabled. 651 */ 652 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 653 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 654 { 655 int i; 656 struct pipe_ctx *pipe; 657 struct timing_generator *tg; 658 659 for (i = 0; i < MAX_PIPES; i++) { 660 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 661 if (pipe->stream == stream) 662 break; 663 } 664 /* Stream not found */ 665 if (i == MAX_PIPES) 666 return false; 667 668 tg = pipe->stream_res.tg; 669 670 if (tg->funcs->get_crc) 671 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 672 DC_LOG_WARNING("CRC capture not supported."); 673 return false; 674 } 675 676 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 677 enum dc_dynamic_expansion option) 678 { 679 /* OPP FMT dyn expansion updates*/ 680 int i; 681 struct pipe_ctx *pipe_ctx; 682 683 for (i = 0; i < MAX_PIPES; i++) { 684 if (dc->current_state->res_ctx.pipe_ctx[i].stream 685 == stream) { 686 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 687 pipe_ctx->stream_res.opp->dyn_expansion = option; 688 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 689 pipe_ctx->stream_res.opp, 690 COLOR_SPACE_YCBCR601, 691 stream->timing.display_color_depth, 692 stream->signal); 693 } 694 } 695 } 696 697 void dc_stream_set_dither_option(struct dc_stream_state *stream, 698 enum dc_dither_option option) 699 { 700 struct bit_depth_reduction_params params; 701 struct dc_link *link = stream->link; 702 struct pipe_ctx *pipes = NULL; 703 int i; 704 705 for (i = 0; i < MAX_PIPES; i++) { 706 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 707 stream) { 708 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 709 break; 710 } 711 } 712 713 if (!pipes) 714 return; 715 if (option > DITHER_OPTION_MAX) 716 return; 717 718 stream->dither_option = option; 719 720 memset(¶ms, 0, sizeof(params)); 721 resource_build_bit_depth_reduction_params(stream, ¶ms); 722 stream->bit_depth_params = params; 723 724 if (pipes->plane_res.xfm && 725 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 726 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 727 pipes->plane_res.xfm, 728 pipes->plane_res.scl_data.lb_params.depth, 729 &stream->bit_depth_params); 730 } 731 732 pipes->stream_res.opp->funcs-> 733 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 734 } 735 736 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 737 { 738 int i; 739 bool ret = false; 740 struct pipe_ctx *pipes; 741 742 for (i = 0; i < MAX_PIPES; i++) { 743 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 744 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 745 dc->hwss.program_gamut_remap(pipes); 746 ret = true; 747 } 748 } 749 750 return ret; 751 } 752 753 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 754 { 755 int i; 756 bool ret = false; 757 struct pipe_ctx *pipes; 758 759 for (i = 0; i < MAX_PIPES; i++) { 760 if (dc->current_state->res_ctx.pipe_ctx[i].stream 761 == stream) { 762 763 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 764 dc->hwss.program_output_csc(dc, 765 pipes, 766 stream->output_color_space, 767 stream->csc_color_matrix.matrix, 768 pipes->stream_res.opp->inst); 769 ret = true; 770 } 771 } 772 773 return ret; 774 } 775 776 void dc_stream_set_static_screen_params(struct dc *dc, 777 struct dc_stream_state **streams, 778 int num_streams, 779 const struct dc_static_screen_params *params) 780 { 781 int i, j; 782 struct pipe_ctx *pipes_affected[MAX_PIPES]; 783 int num_pipes_affected = 0; 784 785 for (i = 0; i < num_streams; i++) { 786 struct dc_stream_state *stream = streams[i]; 787 788 for (j = 0; j < MAX_PIPES; j++) { 789 if (dc->current_state->res_ctx.pipe_ctx[j].stream 790 == stream) { 791 pipes_affected[num_pipes_affected++] = 792 &dc->current_state->res_ctx.pipe_ctx[j]; 793 } 794 } 795 } 796 797 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 798 } 799 800 static void dc_destruct(struct dc *dc) 801 { 802 // reset link encoder assignment table on destruct 803 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 804 link_enc_cfg_init(dc, dc->current_state); 805 806 if (dc->current_state) { 807 dc_release_state(dc->current_state); 808 dc->current_state = NULL; 809 } 810 811 destroy_links(dc); 812 813 destroy_link_encoders(dc); 814 815 if (dc->clk_mgr) { 816 dc_destroy_clk_mgr(dc->clk_mgr); 817 dc->clk_mgr = NULL; 818 } 819 820 dc_destroy_resource_pool(dc); 821 822 if (dc->link_srv) 823 link_destroy_link_service(&dc->link_srv); 824 825 if (dc->ctx->gpio_service) 826 dal_gpio_service_destroy(&dc->ctx->gpio_service); 827 828 if (dc->ctx->created_bios) 829 dal_bios_parser_destroy(&dc->ctx->dc_bios); 830 831 dc_perf_trace_destroy(&dc->ctx->perf_trace); 832 833 kfree(dc->ctx); 834 dc->ctx = NULL; 835 836 kfree(dc->bw_vbios); 837 dc->bw_vbios = NULL; 838 839 kfree(dc->bw_dceip); 840 dc->bw_dceip = NULL; 841 842 kfree(dc->dcn_soc); 843 dc->dcn_soc = NULL; 844 845 kfree(dc->dcn_ip); 846 dc->dcn_ip = NULL; 847 848 kfree(dc->vm_helper); 849 dc->vm_helper = NULL; 850 851 } 852 853 static bool dc_construct_ctx(struct dc *dc, 854 const struct dc_init_data *init_params) 855 { 856 struct dc_context *dc_ctx; 857 858 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 859 if (!dc_ctx) 860 return false; 861 862 dc_ctx->cgs_device = init_params->cgs_device; 863 dc_ctx->driver_context = init_params->driver; 864 dc_ctx->dc = dc; 865 dc_ctx->asic_id = init_params->asic_id; 866 dc_ctx->dc_sink_id_count = 0; 867 dc_ctx->dc_stream_id_count = 0; 868 dc_ctx->dce_environment = init_params->dce_environment; 869 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 870 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 871 872 /* Create logger */ 873 874 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id); 875 876 dc_ctx->perf_trace = dc_perf_trace_create(); 877 if (!dc_ctx->perf_trace) { 878 kfree(dc_ctx); 879 ASSERT_CRITICAL(false); 880 return false; 881 } 882 883 dc->ctx = dc_ctx; 884 885 dc->link_srv = link_create_link_service(); 886 if (!dc->link_srv) 887 return false; 888 889 return true; 890 } 891 892 static bool dc_construct(struct dc *dc, 893 const struct dc_init_data *init_params) 894 { 895 struct dc_context *dc_ctx; 896 struct bw_calcs_dceip *dc_dceip; 897 struct bw_calcs_vbios *dc_vbios; 898 struct dcn_soc_bounding_box *dcn_soc; 899 struct dcn_ip_params *dcn_ip; 900 901 dc->config = init_params->flags; 902 903 // Allocate memory for the vm_helper 904 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 905 if (!dc->vm_helper) { 906 dm_error("%s: failed to create dc->vm_helper\n", __func__); 907 goto fail; 908 } 909 910 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 911 912 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 913 if (!dc_dceip) { 914 dm_error("%s: failed to create dceip\n", __func__); 915 goto fail; 916 } 917 918 dc->bw_dceip = dc_dceip; 919 920 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 921 if (!dc_vbios) { 922 dm_error("%s: failed to create vbios\n", __func__); 923 goto fail; 924 } 925 926 dc->bw_vbios = dc_vbios; 927 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 928 if (!dcn_soc) { 929 dm_error("%s: failed to create dcn_soc\n", __func__); 930 goto fail; 931 } 932 933 dc->dcn_soc = dcn_soc; 934 935 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 936 if (!dcn_ip) { 937 dm_error("%s: failed to create dcn_ip\n", __func__); 938 goto fail; 939 } 940 941 dc->dcn_ip = dcn_ip; 942 943 if (!dc_construct_ctx(dc, init_params)) { 944 dm_error("%s: failed to create ctx\n", __func__); 945 goto fail; 946 } 947 948 dc_ctx = dc->ctx; 949 950 /* Resource should construct all asic specific resources. 951 * This should be the only place where we need to parse the asic id 952 */ 953 if (init_params->vbios_override) 954 dc_ctx->dc_bios = init_params->vbios_override; 955 else { 956 /* Create BIOS parser */ 957 struct bp_init_data bp_init_data; 958 959 bp_init_data.ctx = dc_ctx; 960 bp_init_data.bios = init_params->asic_id.atombios_base_address; 961 962 dc_ctx->dc_bios = dal_bios_parser_create( 963 &bp_init_data, dc_ctx->dce_version); 964 965 if (!dc_ctx->dc_bios) { 966 ASSERT_CRITICAL(false); 967 goto fail; 968 } 969 970 dc_ctx->created_bios = true; 971 } 972 973 dc->vendor_signature = init_params->vendor_signature; 974 975 /* Create GPIO service */ 976 dc_ctx->gpio_service = dal_gpio_service_create( 977 dc_ctx->dce_version, 978 dc_ctx->dce_environment, 979 dc_ctx); 980 981 if (!dc_ctx->gpio_service) { 982 ASSERT_CRITICAL(false); 983 goto fail; 984 } 985 986 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 987 if (!dc->res_pool) 988 goto fail; 989 990 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 991 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 992 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 993 if (dc->caps.max_optimizable_video_width == 0) 994 dc->caps.max_optimizable_video_width = 5120; 995 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 996 if (!dc->clk_mgr) 997 goto fail; 998 #ifdef CONFIG_DRM_AMD_DC_FP 999 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1000 1001 if (dc->res_pool->funcs->update_bw_bounding_box) { 1002 DC_FP_START(); 1003 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1004 DC_FP_END(); 1005 } 1006 #endif 1007 1008 /* Creation of current_state must occur after dc->dml 1009 * is initialized in dc_create_resource_pool because 1010 * on creation it copies the contents of dc->dml 1011 */ 1012 1013 dc->current_state = dc_create_state(dc); 1014 1015 if (!dc->current_state) { 1016 dm_error("%s: failed to create validate ctx\n", __func__); 1017 goto fail; 1018 } 1019 1020 if (!create_links(dc, init_params->num_virtual_links)) 1021 goto fail; 1022 1023 /* Create additional DIG link encoder objects if fewer than the platform 1024 * supports were created during link construction. 1025 */ 1026 if (!create_link_encoders(dc)) 1027 goto fail; 1028 1029 dc_resource_state_construct(dc, dc->current_state); 1030 1031 return true; 1032 1033 fail: 1034 return false; 1035 } 1036 1037 static void disable_all_writeback_pipes_for_stream( 1038 const struct dc *dc, 1039 struct dc_stream_state *stream, 1040 struct dc_state *context) 1041 { 1042 int i; 1043 1044 for (i = 0; i < stream->num_wb_info; i++) 1045 stream->writeback_info[i].wb_enabled = false; 1046 } 1047 1048 static void apply_ctx_interdependent_lock(struct dc *dc, 1049 struct dc_state *context, 1050 struct dc_stream_state *stream, 1051 bool lock) 1052 { 1053 int i; 1054 1055 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1056 if (dc->hwss.interdependent_update_lock) 1057 dc->hwss.interdependent_update_lock(dc, context, lock); 1058 else { 1059 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1060 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1061 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1062 1063 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1064 if (stream == pipe_ctx->stream) { 1065 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) && 1066 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1067 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1068 } 1069 } 1070 } 1071 } 1072 1073 static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1074 { 1075 if (dc->ctx->dce_version >= DCN_VERSION_1_0) { 1076 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color)); 1077 1078 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) 1079 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1080 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) 1081 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1082 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE) 1083 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1084 else { 1085 if (dc->ctx->dce_version < DCN_VERSION_2_0) 1086 color_space_to_black_color( 1087 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color)); 1088 } 1089 if (dc->ctx->dce_version >= DCN_VERSION_2_0) { 1090 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) 1091 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1092 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP) 1093 get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1094 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1095 get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1096 } 1097 } 1098 } 1099 1100 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1101 { 1102 int i, j; 1103 struct dc_state *dangling_context = dc_create_state(dc); 1104 struct dc_state *current_ctx; 1105 struct pipe_ctx *pipe; 1106 struct timing_generator *tg; 1107 1108 if (dangling_context == NULL) 1109 return; 1110 1111 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1112 1113 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1114 struct dc_stream_state *old_stream = 1115 dc->current_state->res_ctx.pipe_ctx[i].stream; 1116 bool should_disable = true; 1117 bool pipe_split_change = false; 1118 1119 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1120 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1121 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1122 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1123 else 1124 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1125 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1126 1127 for (j = 0; j < context->stream_count; j++) { 1128 if (old_stream == context->streams[j]) { 1129 should_disable = false; 1130 break; 1131 } 1132 } 1133 if (!should_disable && pipe_split_change && 1134 dc->current_state->stream_count != context->stream_count) 1135 should_disable = true; 1136 1137 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1138 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1139 struct pipe_ctx *old_pipe, *new_pipe; 1140 1141 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1142 new_pipe = &context->res_ctx.pipe_ctx[i]; 1143 1144 if (old_pipe->plane_state && !new_pipe->plane_state) 1145 should_disable = true; 1146 } 1147 1148 if (should_disable && old_stream) { 1149 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1150 tg = pipe->stream_res.tg; 1151 /* When disabling plane for a phantom pipe, we must turn on the 1152 * phantom OTG so the disable programming gets the double buffer 1153 * update. Otherwise the pipe will be left in a partially disabled 1154 * state that can result in underflow or hang when enabling it 1155 * again for different use. 1156 */ 1157 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1158 if (tg->funcs->enable_crtc) { 1159 int main_pipe_width, main_pipe_height; 1160 1161 main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; 1162 main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; 1163 if (dc->hwss.blank_phantom) 1164 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); 1165 tg->funcs->enable_crtc(tg); 1166 } 1167 } 1168 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1169 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1170 1171 if (pipe->stream && pipe->plane_state) 1172 dc_update_viusal_confirm_color(dc, context, pipe); 1173 1174 if (dc->hwss.apply_ctx_for_surface) { 1175 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1176 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1177 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1178 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1179 } 1180 if (dc->hwss.program_front_end_for_ctx) { 1181 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1182 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1183 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1184 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1185 } 1186 /* We need to put the phantom OTG back into it's default (disabled) state or we 1187 * can get corruption when transition from one SubVP config to a different one. 1188 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1189 * will still get it's double buffer update. 1190 */ 1191 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1192 if (tg->funcs->disable_phantom_crtc) 1193 tg->funcs->disable_phantom_crtc(tg); 1194 } 1195 } 1196 } 1197 1198 current_ctx = dc->current_state; 1199 dc->current_state = dangling_context; 1200 dc_release_state(current_ctx); 1201 } 1202 1203 static void disable_vbios_mode_if_required( 1204 struct dc *dc, 1205 struct dc_state *context) 1206 { 1207 unsigned int i, j; 1208 1209 /* check if timing_changed, disable stream*/ 1210 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1211 struct dc_stream_state *stream = NULL; 1212 struct dc_link *link = NULL; 1213 struct pipe_ctx *pipe = NULL; 1214 1215 pipe = &context->res_ctx.pipe_ctx[i]; 1216 stream = pipe->stream; 1217 if (stream == NULL) 1218 continue; 1219 1220 if (stream->apply_seamless_boot_optimization) 1221 continue; 1222 1223 // only looking for first odm pipe 1224 if (pipe->prev_odm_pipe) 1225 continue; 1226 1227 if (stream->link->local_sink && 1228 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1229 link = stream->link; 1230 } 1231 1232 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1233 unsigned int enc_inst, tg_inst = 0; 1234 unsigned int pix_clk_100hz; 1235 1236 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1237 if (enc_inst != ENGINE_ID_UNKNOWN) { 1238 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1239 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1240 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1241 dc->res_pool->stream_enc[j]); 1242 break; 1243 } 1244 } 1245 1246 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1247 dc->res_pool->dp_clock_source, 1248 tg_inst, &pix_clk_100hz); 1249 1250 if (link->link_status.link_active) { 1251 uint32_t requested_pix_clk_100hz = 1252 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1253 1254 if (pix_clk_100hz != requested_pix_clk_100hz) { 1255 dc->link_srv->set_dpms_off(pipe); 1256 pipe->stream->dpms_off = false; 1257 } 1258 } 1259 } 1260 } 1261 } 1262 } 1263 1264 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1265 { 1266 int i; 1267 PERF_TRACE(); 1268 for (i = 0; i < MAX_PIPES; i++) { 1269 int count = 0; 1270 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1271 1272 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1273 continue; 1274 1275 /* Timeout 100 ms */ 1276 while (count < 100000) { 1277 /* Must set to false to start with, due to OR in update function */ 1278 pipe->plane_state->status.is_flip_pending = false; 1279 dc->hwss.update_pending_status(pipe); 1280 if (!pipe->plane_state->status.is_flip_pending) 1281 break; 1282 udelay(1); 1283 count++; 1284 } 1285 ASSERT(!pipe->plane_state->status.is_flip_pending); 1286 } 1287 PERF_TRACE(); 1288 } 1289 1290 /* Public functions */ 1291 1292 struct dc *dc_create(const struct dc_init_data *init_params) 1293 { 1294 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1295 unsigned int full_pipe_count; 1296 1297 if (!dc) 1298 return NULL; 1299 1300 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1301 if (!dc_construct_ctx(dc, init_params)) 1302 goto destruct_dc; 1303 } else { 1304 if (!dc_construct(dc, init_params)) 1305 goto destruct_dc; 1306 1307 full_pipe_count = dc->res_pool->pipe_count; 1308 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1309 full_pipe_count--; 1310 dc->caps.max_streams = min( 1311 full_pipe_count, 1312 dc->res_pool->stream_enc_count); 1313 1314 dc->caps.max_links = dc->link_count; 1315 dc->caps.max_audios = dc->res_pool->audio_count; 1316 dc->caps.linear_pitch_alignment = 64; 1317 1318 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1319 1320 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1321 1322 if (dc->res_pool->dmcu != NULL) 1323 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1324 } 1325 1326 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1327 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1328 1329 /* Populate versioning information */ 1330 dc->versions.dc_ver = DC_VER; 1331 1332 dc->build_id = DC_BUILD_ID; 1333 1334 DC_LOG_DC("Display Core initialized\n"); 1335 1336 1337 1338 return dc; 1339 1340 destruct_dc: 1341 dc_destruct(dc); 1342 kfree(dc); 1343 return NULL; 1344 } 1345 1346 static void detect_edp_presence(struct dc *dc) 1347 { 1348 struct dc_link *edp_links[MAX_NUM_EDP]; 1349 struct dc_link *edp_link = NULL; 1350 enum dc_connection_type type; 1351 int i; 1352 int edp_num; 1353 1354 dc_get_edp_links(dc, edp_links, &edp_num); 1355 if (!edp_num) 1356 return; 1357 1358 for (i = 0; i < edp_num; i++) { 1359 edp_link = edp_links[i]; 1360 if (dc->config.edp_not_connected) { 1361 edp_link->edp_sink_present = false; 1362 } else { 1363 dc_link_detect_connection_type(edp_link, &type); 1364 edp_link->edp_sink_present = (type != dc_connection_none); 1365 } 1366 } 1367 } 1368 1369 void dc_hardware_init(struct dc *dc) 1370 { 1371 1372 detect_edp_presence(dc); 1373 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1374 dc->hwss.init_hw(dc); 1375 } 1376 1377 void dc_init_callbacks(struct dc *dc, 1378 const struct dc_callback_init *init_params) 1379 { 1380 dc->ctx->cp_psp = init_params->cp_psp; 1381 } 1382 1383 void dc_deinit_callbacks(struct dc *dc) 1384 { 1385 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1386 } 1387 1388 void dc_destroy(struct dc **dc) 1389 { 1390 dc_destruct(*dc); 1391 kfree(*dc); 1392 *dc = NULL; 1393 } 1394 1395 static void enable_timing_multisync( 1396 struct dc *dc, 1397 struct dc_state *ctx) 1398 { 1399 int i, multisync_count = 0; 1400 int pipe_count = dc->res_pool->pipe_count; 1401 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1402 1403 for (i = 0; i < pipe_count; i++) { 1404 if (!ctx->res_ctx.pipe_ctx[i].stream || 1405 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1406 continue; 1407 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1408 continue; 1409 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1410 multisync_count++; 1411 } 1412 1413 if (multisync_count > 0) { 1414 dc->hwss.enable_per_frame_crtc_position_reset( 1415 dc, multisync_count, multisync_pipes); 1416 } 1417 } 1418 1419 static void program_timing_sync( 1420 struct dc *dc, 1421 struct dc_state *ctx) 1422 { 1423 int i, j, k; 1424 int group_index = 0; 1425 int num_group = 0; 1426 int pipe_count = dc->res_pool->pipe_count; 1427 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1428 1429 for (i = 0; i < pipe_count; i++) { 1430 if (!ctx->res_ctx.pipe_ctx[i].stream 1431 || ctx->res_ctx.pipe_ctx[i].top_pipe 1432 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1433 continue; 1434 1435 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1436 } 1437 1438 for (i = 0; i < pipe_count; i++) { 1439 int group_size = 1; 1440 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1441 struct pipe_ctx *pipe_set[MAX_PIPES]; 1442 1443 if (!unsynced_pipes[i]) 1444 continue; 1445 1446 pipe_set[0] = unsynced_pipes[i]; 1447 unsynced_pipes[i] = NULL; 1448 1449 /* Add tg to the set, search rest of the tg's for ones with 1450 * same timing, add all tgs with same timing to the group 1451 */ 1452 for (j = i + 1; j < pipe_count; j++) { 1453 if (!unsynced_pipes[j]) 1454 continue; 1455 if (sync_type != TIMING_SYNCHRONIZABLE && 1456 dc->hwss.enable_vblanks_synchronization && 1457 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1458 resource_are_vblanks_synchronizable( 1459 unsynced_pipes[j]->stream, 1460 pipe_set[0]->stream)) { 1461 sync_type = VBLANK_SYNCHRONIZABLE; 1462 pipe_set[group_size] = unsynced_pipes[j]; 1463 unsynced_pipes[j] = NULL; 1464 group_size++; 1465 } else 1466 if (sync_type != VBLANK_SYNCHRONIZABLE && 1467 resource_are_streams_timing_synchronizable( 1468 unsynced_pipes[j]->stream, 1469 pipe_set[0]->stream)) { 1470 sync_type = TIMING_SYNCHRONIZABLE; 1471 pipe_set[group_size] = unsynced_pipes[j]; 1472 unsynced_pipes[j] = NULL; 1473 group_size++; 1474 } 1475 } 1476 1477 /* set first unblanked pipe as master */ 1478 for (j = 0; j < group_size; j++) { 1479 bool is_blanked; 1480 1481 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1482 is_blanked = 1483 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1484 else 1485 is_blanked = 1486 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1487 if (!is_blanked) { 1488 if (j == 0) 1489 break; 1490 1491 swap(pipe_set[0], pipe_set[j]); 1492 break; 1493 } 1494 } 1495 1496 for (k = 0; k < group_size; k++) { 1497 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1498 1499 status->timing_sync_info.group_id = num_group; 1500 status->timing_sync_info.group_size = group_size; 1501 if (k == 0) 1502 status->timing_sync_info.master = true; 1503 else 1504 status->timing_sync_info.master = false; 1505 1506 } 1507 1508 /* remove any other pipes that are already been synced */ 1509 if (dc->config.use_pipe_ctx_sync_logic) { 1510 /* check pipe's syncd to decide which pipe to be removed */ 1511 for (j = 1; j < group_size; j++) { 1512 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1513 group_size--; 1514 pipe_set[j] = pipe_set[group_size]; 1515 j--; 1516 } else 1517 /* link slave pipe's syncd with master pipe */ 1518 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1519 } 1520 } else { 1521 for (j = j + 1; j < group_size; j++) { 1522 bool is_blanked; 1523 1524 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1525 is_blanked = 1526 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1527 else 1528 is_blanked = 1529 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1530 if (!is_blanked) { 1531 group_size--; 1532 pipe_set[j] = pipe_set[group_size]; 1533 j--; 1534 } 1535 } 1536 } 1537 1538 if (group_size > 1) { 1539 if (sync_type == TIMING_SYNCHRONIZABLE) { 1540 dc->hwss.enable_timing_synchronization( 1541 dc, group_index, group_size, pipe_set); 1542 } else 1543 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1544 dc->hwss.enable_vblanks_synchronization( 1545 dc, group_index, group_size, pipe_set); 1546 } 1547 group_index++; 1548 } 1549 num_group++; 1550 } 1551 } 1552 1553 static bool streams_changed(struct dc *dc, 1554 struct dc_stream_state *streams[], 1555 uint8_t stream_count) 1556 { 1557 uint8_t i; 1558 1559 if (stream_count != dc->current_state->stream_count) 1560 return true; 1561 1562 for (i = 0; i < dc->current_state->stream_count; i++) { 1563 if (dc->current_state->streams[i] != streams[i]) 1564 return true; 1565 if (!streams[i]->link->link_state_valid) 1566 return true; 1567 } 1568 1569 return false; 1570 } 1571 1572 bool dc_validate_boot_timing(const struct dc *dc, 1573 const struct dc_sink *sink, 1574 struct dc_crtc_timing *crtc_timing) 1575 { 1576 struct timing_generator *tg; 1577 struct stream_encoder *se = NULL; 1578 1579 struct dc_crtc_timing hw_crtc_timing = {0}; 1580 1581 struct dc_link *link = sink->link; 1582 unsigned int i, enc_inst, tg_inst = 0; 1583 1584 /* Support seamless boot on EDP displays only */ 1585 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1586 return false; 1587 } 1588 1589 if (dc->debug.force_odm_combine) 1590 return false; 1591 1592 /* Check for enabled DIG to identify enabled display */ 1593 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1594 return false; 1595 1596 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1597 1598 if (enc_inst == ENGINE_ID_UNKNOWN) 1599 return false; 1600 1601 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1602 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1603 1604 se = dc->res_pool->stream_enc[i]; 1605 1606 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1607 dc->res_pool->stream_enc[i]); 1608 break; 1609 } 1610 } 1611 1612 // tg_inst not found 1613 if (i == dc->res_pool->stream_enc_count) 1614 return false; 1615 1616 if (tg_inst >= dc->res_pool->timing_generator_count) 1617 return false; 1618 1619 if (tg_inst != link->link_enc->preferred_engine) 1620 return false; 1621 1622 tg = dc->res_pool->timing_generators[tg_inst]; 1623 1624 if (!tg->funcs->get_hw_timing) 1625 return false; 1626 1627 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1628 return false; 1629 1630 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1631 return false; 1632 1633 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1634 return false; 1635 1636 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1637 return false; 1638 1639 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1640 return false; 1641 1642 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1643 return false; 1644 1645 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1646 return false; 1647 1648 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1649 return false; 1650 1651 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1652 return false; 1653 1654 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1655 return false; 1656 1657 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1658 return false; 1659 1660 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1661 return false; 1662 1663 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1664 return false; 1665 1666 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1667 if (crtc_timing->flags.DSC) 1668 return false; 1669 1670 if (dc_is_dp_signal(link->connector_signal)) { 1671 unsigned int pix_clk_100hz; 1672 uint32_t numOdmPipes = 1; 1673 uint32_t id_src[4] = {0}; 1674 1675 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1676 dc->res_pool->dp_clock_source, 1677 tg_inst, &pix_clk_100hz); 1678 1679 if (tg->funcs->get_optc_source) 1680 tg->funcs->get_optc_source(tg, 1681 &numOdmPipes, &id_src[0], &id_src[1]); 1682 1683 if (numOdmPipes == 2) 1684 pix_clk_100hz *= 2; 1685 if (numOdmPipes == 4) 1686 pix_clk_100hz *= 4; 1687 1688 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1689 // slightly due to rounding issues in 10 kHz units. 1690 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1691 return false; 1692 1693 if (!se->funcs->dp_get_pixel_format) 1694 return false; 1695 1696 if (!se->funcs->dp_get_pixel_format( 1697 se, 1698 &hw_crtc_timing.pixel_encoding, 1699 &hw_crtc_timing.display_color_depth)) 1700 return false; 1701 1702 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1703 return false; 1704 1705 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1706 return false; 1707 } 1708 1709 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1710 return false; 1711 } 1712 1713 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1714 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1715 return false; 1716 } 1717 1718 return true; 1719 } 1720 1721 static inline bool should_update_pipe_for_stream( 1722 struct dc_state *context, 1723 struct pipe_ctx *pipe_ctx, 1724 struct dc_stream_state *stream) 1725 { 1726 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1727 } 1728 1729 static inline bool should_update_pipe_for_plane( 1730 struct dc_state *context, 1731 struct pipe_ctx *pipe_ctx, 1732 struct dc_plane_state *plane_state) 1733 { 1734 return (pipe_ctx->plane_state == plane_state); 1735 } 1736 1737 void dc_enable_stereo( 1738 struct dc *dc, 1739 struct dc_state *context, 1740 struct dc_stream_state *streams[], 1741 uint8_t stream_count) 1742 { 1743 int i, j; 1744 struct pipe_ctx *pipe; 1745 1746 for (i = 0; i < MAX_PIPES; i++) { 1747 if (context != NULL) { 1748 pipe = &context->res_ctx.pipe_ctx[i]; 1749 } else { 1750 context = dc->current_state; 1751 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1752 } 1753 1754 for (j = 0; pipe && j < stream_count; j++) { 1755 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1756 dc->hwss.setup_stereo) 1757 dc->hwss.setup_stereo(pipe, dc); 1758 } 1759 } 1760 } 1761 1762 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1763 { 1764 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1765 enable_timing_multisync(dc, context); 1766 program_timing_sync(dc, context); 1767 } 1768 } 1769 1770 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1771 { 1772 int i; 1773 unsigned int stream_mask = 0; 1774 1775 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1776 if (context->res_ctx.pipe_ctx[i].stream) 1777 stream_mask |= 1 << i; 1778 } 1779 1780 return stream_mask; 1781 } 1782 1783 void dc_z10_restore(const struct dc *dc) 1784 { 1785 if (dc->hwss.z10_restore) 1786 dc->hwss.z10_restore(dc); 1787 } 1788 1789 void dc_z10_save_init(struct dc *dc) 1790 { 1791 if (dc->hwss.z10_save_init) 1792 dc->hwss.z10_save_init(dc); 1793 } 1794 1795 /** 1796 * dc_commit_state_no_check - Apply context to the hardware 1797 * 1798 * @dc: DC object with the current status to be updated 1799 * @context: New state that will become the current status at the end of this function 1800 * 1801 * Applies given context to the hardware and copy it into current context. 1802 * It's up to the user to release the src context afterwards. 1803 * 1804 * Return: an enum dc_status result code for the operation 1805 */ 1806 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1807 { 1808 struct dc_bios *dcb = dc->ctx->dc_bios; 1809 enum dc_status result = DC_ERROR_UNEXPECTED; 1810 struct pipe_ctx *pipe; 1811 int i, k, l; 1812 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1813 struct dc_state *old_state; 1814 bool subvp_prev_use = false; 1815 1816 dc_z10_restore(dc); 1817 dc_allow_idle_optimizations(dc, false); 1818 1819 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1820 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1821 1822 /* Check old context for SubVP */ 1823 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1824 if (subvp_prev_use) 1825 break; 1826 } 1827 1828 for (i = 0; i < context->stream_count; i++) 1829 dc_streams[i] = context->streams[i]; 1830 1831 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1832 disable_vbios_mode_if_required(dc, context); 1833 dc->hwss.enable_accelerated_mode(dc, context); 1834 } 1835 1836 if (context->stream_count > get_seamless_boot_stream_count(context) || 1837 context->stream_count == 0) 1838 dc->hwss.prepare_bandwidth(dc, context); 1839 1840 /* When SubVP is active, all HW programming must be done while 1841 * SubVP lock is acquired 1842 */ 1843 if (dc->hwss.subvp_pipe_control_lock) 1844 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1845 1846 if (dc->hwss.update_dsc_pg) 1847 dc->hwss.update_dsc_pg(dc, context, false); 1848 1849 disable_dangling_plane(dc, context); 1850 /* re-program planes for existing stream, in case we need to 1851 * free up plane resource for later use 1852 */ 1853 if (dc->hwss.apply_ctx_for_surface) { 1854 for (i = 0; i < context->stream_count; i++) { 1855 if (context->streams[i]->mode_changed) 1856 continue; 1857 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1858 dc->hwss.apply_ctx_for_surface( 1859 dc, context->streams[i], 1860 context->stream_status[i].plane_count, 1861 context); /* use new pipe config in new context */ 1862 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1863 dc->hwss.post_unlock_program_front_end(dc, context); 1864 } 1865 } 1866 1867 /* Program hardware */ 1868 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1869 pipe = &context->res_ctx.pipe_ctx[i]; 1870 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1871 } 1872 1873 result = dc->hwss.apply_ctx_to_hw(dc, context); 1874 1875 if (result != DC_OK) { 1876 /* Application of dc_state to hardware stopped. */ 1877 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1878 return result; 1879 } 1880 1881 dc_trigger_sync(dc, context); 1882 1883 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */ 1884 for (i = 0; i < context->stream_count; i++) { 1885 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed; 1886 1887 context->streams[i]->update_flags.raw = 0xFFFFFFFF; 1888 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed; 1889 } 1890 1891 /* Program all planes within new context*/ 1892 if (dc->hwss.program_front_end_for_ctx) { 1893 dc->hwss.interdependent_update_lock(dc, context, true); 1894 dc->hwss.program_front_end_for_ctx(dc, context); 1895 dc->hwss.interdependent_update_lock(dc, context, false); 1896 dc->hwss.post_unlock_program_front_end(dc, context); 1897 } 1898 1899 if (dc->hwss.commit_subvp_config) 1900 dc->hwss.commit_subvp_config(dc, context); 1901 if (dc->hwss.subvp_pipe_control_lock) 1902 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1903 1904 for (i = 0; i < context->stream_count; i++) { 1905 const struct dc_link *link = context->streams[i]->link; 1906 1907 if (!context->streams[i]->mode_changed) 1908 continue; 1909 1910 if (dc->hwss.apply_ctx_for_surface) { 1911 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1912 dc->hwss.apply_ctx_for_surface( 1913 dc, context->streams[i], 1914 context->stream_status[i].plane_count, 1915 context); 1916 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1917 dc->hwss.post_unlock_program_front_end(dc, context); 1918 } 1919 1920 /* 1921 * enable stereo 1922 * TODO rework dc_enable_stereo call to work with validation sets? 1923 */ 1924 for (k = 0; k < MAX_PIPES; k++) { 1925 pipe = &context->res_ctx.pipe_ctx[k]; 1926 1927 for (l = 0 ; pipe && l < context->stream_count; l++) { 1928 if (context->streams[l] && 1929 context->streams[l] == pipe->stream && 1930 dc->hwss.setup_stereo) 1931 dc->hwss.setup_stereo(pipe, dc); 1932 } 1933 } 1934 1935 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1936 context->streams[i]->timing.h_addressable, 1937 context->streams[i]->timing.v_addressable, 1938 context->streams[i]->timing.h_total, 1939 context->streams[i]->timing.v_total, 1940 context->streams[i]->timing.pix_clk_100hz / 10); 1941 } 1942 1943 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1944 1945 if (context->stream_count > get_seamless_boot_stream_count(context) || 1946 context->stream_count == 0) { 1947 /* Must wait for no flips to be pending before doing optimize bw */ 1948 wait_for_no_pipes_pending(dc, context); 1949 /* pplib is notified if disp_num changed */ 1950 dc->hwss.optimize_bandwidth(dc, context); 1951 } 1952 1953 if (dc->hwss.update_dsc_pg) 1954 dc->hwss.update_dsc_pg(dc, context, true); 1955 1956 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1957 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1958 else 1959 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1960 1961 context->stream_mask = get_stream_mask(dc, context); 1962 1963 if (context->stream_mask != dc->current_state->stream_mask) 1964 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1965 1966 for (i = 0; i < context->stream_count; i++) 1967 context->streams[i]->mode_changed = false; 1968 1969 /* Clear update flags that were set earlier to avoid redundant programming */ 1970 for (i = 0; i < context->stream_count; i++) { 1971 context->streams[i]->update_flags.raw = 0x0; 1972 } 1973 1974 old_state = dc->current_state; 1975 dc->current_state = context; 1976 1977 dc_release_state(old_state); 1978 1979 dc_retain_state(dc->current_state); 1980 1981 return result; 1982 } 1983 1984 static bool commit_minimal_transition_state(struct dc *dc, 1985 struct dc_state *transition_base_context); 1986 1987 /** 1988 * dc_commit_streams - Commit current stream state 1989 * 1990 * @dc: DC object with the commit state to be configured in the hardware 1991 * @streams: Array with a list of stream state 1992 * @stream_count: Total of streams 1993 * 1994 * Function responsible for commit streams change to the hardware. 1995 * 1996 * Return: 1997 * Return DC_OK if everything work as expected, otherwise, return a dc_status 1998 * code. 1999 */ 2000 enum dc_status dc_commit_streams(struct dc *dc, 2001 struct dc_stream_state *streams[], 2002 uint8_t stream_count) 2003 { 2004 int i, j; 2005 struct dc_state *context; 2006 enum dc_status res = DC_OK; 2007 struct dc_validation_set set[MAX_STREAMS] = {0}; 2008 struct pipe_ctx *pipe; 2009 bool handle_exit_odm2to1 = false; 2010 2011 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2012 return res; 2013 2014 if (!streams_changed(dc, streams, stream_count)) 2015 return res; 2016 2017 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 2018 2019 for (i = 0; i < stream_count; i++) { 2020 struct dc_stream_state *stream = streams[i]; 2021 struct dc_stream_status *status = dc_stream_get_status(stream); 2022 2023 dc_stream_log(dc, stream); 2024 2025 set[i].stream = stream; 2026 2027 if (status) { 2028 set[i].plane_count = status->plane_count; 2029 for (j = 0; j < status->plane_count; j++) 2030 set[i].plane_states[j] = status->plane_states[j]; 2031 } 2032 } 2033 2034 /* ODM Combine 2:1 power optimization is only applied for single stream 2035 * scenario, it uses extra pipes than needed to reduce power consumption 2036 * We need to switch off this feature to make room for new streams. 2037 */ 2038 if (stream_count > dc->current_state->stream_count && 2039 dc->current_state->stream_count == 1) { 2040 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2041 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2042 if (pipe->next_odm_pipe) 2043 handle_exit_odm2to1 = true; 2044 } 2045 } 2046 2047 if (handle_exit_odm2to1) 2048 res = commit_minimal_transition_state(dc, dc->current_state); 2049 2050 context = dc_create_state(dc); 2051 if (!context) 2052 goto context_alloc_fail; 2053 2054 dc_resource_state_copy_construct_current(dc, context); 2055 2056 res = dc_validate_with_context(dc, set, stream_count, context, false); 2057 if (res != DC_OK) { 2058 BREAK_TO_DEBUGGER(); 2059 goto fail; 2060 } 2061 2062 res = dc_commit_state_no_check(dc, context); 2063 2064 for (i = 0; i < stream_count; i++) { 2065 for (j = 0; j < context->stream_count; j++) { 2066 if (streams[i]->stream_id == context->streams[j]->stream_id) 2067 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2068 2069 if (dc_is_embedded_signal(streams[i]->signal)) { 2070 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); 2071 2072 if (dc->hwss.is_abm_supported) 2073 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); 2074 else 2075 status->is_abm_supported = true; 2076 } 2077 } 2078 } 2079 2080 fail: 2081 dc_release_state(context); 2082 2083 context_alloc_fail: 2084 2085 DC_LOG_DC("%s Finished.\n", __func__); 2086 2087 return res; 2088 } 2089 2090 bool dc_acquire_release_mpc_3dlut( 2091 struct dc *dc, bool acquire, 2092 struct dc_stream_state *stream, 2093 struct dc_3dlut **lut, 2094 struct dc_transfer_func **shaper) 2095 { 2096 int pipe_idx; 2097 bool ret = false; 2098 bool found_pipe_idx = false; 2099 const struct resource_pool *pool = dc->res_pool; 2100 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2101 int mpcc_id = 0; 2102 2103 if (pool && res_ctx) { 2104 if (acquire) { 2105 /*find pipe idx for the given stream*/ 2106 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2107 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2108 found_pipe_idx = true; 2109 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2110 break; 2111 } 2112 } 2113 } else 2114 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2115 2116 if (found_pipe_idx) { 2117 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2118 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2119 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2120 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2121 } 2122 } 2123 return ret; 2124 } 2125 2126 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2127 { 2128 int i; 2129 struct pipe_ctx *pipe; 2130 2131 for (i = 0; i < MAX_PIPES; i++) { 2132 pipe = &context->res_ctx.pipe_ctx[i]; 2133 2134 // Don't check flip pending on phantom pipes 2135 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2136 continue; 2137 2138 /* Must set to false to start with, due to OR in update function */ 2139 pipe->plane_state->status.is_flip_pending = false; 2140 dc->hwss.update_pending_status(pipe); 2141 if (pipe->plane_state->status.is_flip_pending) 2142 return true; 2143 } 2144 return false; 2145 } 2146 2147 /* Perform updates here which need to be deferred until next vupdate 2148 * 2149 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2150 * but forcing lut memory to shutdown state is immediate. This causes 2151 * single frame corruption as lut gets disabled mid-frame unless shutdown 2152 * is deferred until after entering bypass. 2153 */ 2154 static void process_deferred_updates(struct dc *dc) 2155 { 2156 int i = 0; 2157 2158 if (dc->debug.enable_mem_low_power.bits.cm) { 2159 ASSERT(dc->dcn_ip->max_num_dpp); 2160 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2161 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2162 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2163 } 2164 } 2165 2166 void dc_post_update_surfaces_to_stream(struct dc *dc) 2167 { 2168 int i; 2169 struct dc_state *context = dc->current_state; 2170 2171 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2172 return; 2173 2174 post_surface_trace(dc); 2175 2176 /* 2177 * Only relevant for DCN behavior where we can guarantee the optimization 2178 * is safe to apply - retain the legacy behavior for DCE. 2179 */ 2180 2181 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2182 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2183 else { 2184 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2185 2186 if (is_flip_pending_in_pipes(dc, context)) 2187 return; 2188 2189 for (i = 0; i < dc->res_pool->pipe_count; i++) 2190 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2191 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2192 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2193 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 2194 } 2195 2196 process_deferred_updates(dc); 2197 2198 dc->hwss.optimize_bandwidth(dc, context); 2199 2200 if (dc->hwss.update_dsc_pg) 2201 dc->hwss.update_dsc_pg(dc, context, true); 2202 } 2203 2204 dc->optimized_required = false; 2205 dc->wm_optimized_required = false; 2206 } 2207 2208 static void init_state(struct dc *dc, struct dc_state *context) 2209 { 2210 /* Each context must have their own instance of VBA and in order to 2211 * initialize and obtain IP and SOC the base DML instance from DC is 2212 * initially copied into every context 2213 */ 2214 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2215 } 2216 2217 struct dc_state *dc_create_state(struct dc *dc) 2218 { 2219 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2220 GFP_KERNEL); 2221 2222 if (!context) 2223 return NULL; 2224 2225 init_state(dc, context); 2226 2227 kref_init(&context->refcount); 2228 2229 return context; 2230 } 2231 2232 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2233 { 2234 int i, j; 2235 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2236 2237 if (!new_ctx) 2238 return NULL; 2239 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2240 2241 for (i = 0; i < MAX_PIPES; i++) { 2242 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2243 2244 if (cur_pipe->top_pipe) 2245 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2246 2247 if (cur_pipe->bottom_pipe) 2248 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2249 2250 if (cur_pipe->prev_odm_pipe) 2251 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2252 2253 if (cur_pipe->next_odm_pipe) 2254 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2255 2256 } 2257 2258 for (i = 0; i < new_ctx->stream_count; i++) { 2259 dc_stream_retain(new_ctx->streams[i]); 2260 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2261 dc_plane_state_retain( 2262 new_ctx->stream_status[i].plane_states[j]); 2263 } 2264 2265 kref_init(&new_ctx->refcount); 2266 2267 return new_ctx; 2268 } 2269 2270 void dc_retain_state(struct dc_state *context) 2271 { 2272 kref_get(&context->refcount); 2273 } 2274 2275 static void dc_state_free(struct kref *kref) 2276 { 2277 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2278 dc_resource_state_destruct(context); 2279 kvfree(context); 2280 } 2281 2282 void dc_release_state(struct dc_state *context) 2283 { 2284 kref_put(&context->refcount, dc_state_free); 2285 } 2286 2287 bool dc_set_generic_gpio_for_stereo(bool enable, 2288 struct gpio_service *gpio_service) 2289 { 2290 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2291 struct gpio_pin_info pin_info; 2292 struct gpio *generic; 2293 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2294 GFP_KERNEL); 2295 2296 if (!config) 2297 return false; 2298 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2299 2300 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2301 kfree(config); 2302 return false; 2303 } else { 2304 generic = dal_gpio_service_create_generic_mux( 2305 gpio_service, 2306 pin_info.offset, 2307 pin_info.mask); 2308 } 2309 2310 if (!generic) { 2311 kfree(config); 2312 return false; 2313 } 2314 2315 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2316 2317 config->enable_output_from_mux = enable; 2318 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2319 2320 if (gpio_result == GPIO_RESULT_OK) 2321 gpio_result = dal_mux_setup_config(generic, config); 2322 2323 if (gpio_result == GPIO_RESULT_OK) { 2324 dal_gpio_close(generic); 2325 dal_gpio_destroy_generic_mux(&generic); 2326 kfree(config); 2327 return true; 2328 } else { 2329 dal_gpio_close(generic); 2330 dal_gpio_destroy_generic_mux(&generic); 2331 kfree(config); 2332 return false; 2333 } 2334 } 2335 2336 static bool is_surface_in_context( 2337 const struct dc_state *context, 2338 const struct dc_plane_state *plane_state) 2339 { 2340 int j; 2341 2342 for (j = 0; j < MAX_PIPES; j++) { 2343 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2344 2345 if (plane_state == pipe_ctx->plane_state) { 2346 return true; 2347 } 2348 } 2349 2350 return false; 2351 } 2352 2353 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2354 { 2355 union surface_update_flags *update_flags = &u->surface->update_flags; 2356 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2357 2358 if (!u->plane_info) 2359 return UPDATE_TYPE_FAST; 2360 2361 if (u->plane_info->color_space != u->surface->color_space) { 2362 update_flags->bits.color_space_change = 1; 2363 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2364 } 2365 2366 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2367 update_flags->bits.horizontal_mirror_change = 1; 2368 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2369 } 2370 2371 if (u->plane_info->rotation != u->surface->rotation) { 2372 update_flags->bits.rotation_change = 1; 2373 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2374 } 2375 2376 if (u->plane_info->format != u->surface->format) { 2377 update_flags->bits.pixel_format_change = 1; 2378 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2379 } 2380 2381 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2382 update_flags->bits.stereo_format_change = 1; 2383 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2384 } 2385 2386 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2387 update_flags->bits.per_pixel_alpha_change = 1; 2388 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2389 } 2390 2391 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2392 update_flags->bits.global_alpha_change = 1; 2393 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2394 } 2395 2396 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2397 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2398 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2399 /* During DCC on/off, stutter period is calculated before 2400 * DCC has fully transitioned. This results in incorrect 2401 * stutter period calculation. Triggering a full update will 2402 * recalculate stutter period. 2403 */ 2404 update_flags->bits.dcc_change = 1; 2405 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2406 } 2407 2408 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2409 resource_pixel_format_to_bpp(u->surface->format)) { 2410 /* different bytes per element will require full bandwidth 2411 * and DML calculation 2412 */ 2413 update_flags->bits.bpp_change = 1; 2414 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2415 } 2416 2417 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2418 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2419 update_flags->bits.plane_size_change = 1; 2420 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2421 } 2422 2423 2424 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2425 sizeof(union dc_tiling_info)) != 0) { 2426 update_flags->bits.swizzle_change = 1; 2427 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2428 2429 /* todo: below are HW dependent, we should add a hook to 2430 * DCE/N resource and validated there. 2431 */ 2432 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2433 /* swizzled mode requires RQ to be setup properly, 2434 * thus need to run DML to calculate RQ settings 2435 */ 2436 update_flags->bits.bandwidth_change = 1; 2437 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2438 } 2439 } 2440 2441 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2442 return update_type; 2443 } 2444 2445 static enum surface_update_type get_scaling_info_update_type( 2446 const struct dc *dc, 2447 const struct dc_surface_update *u) 2448 { 2449 union surface_update_flags *update_flags = &u->surface->update_flags; 2450 2451 if (!u->scaling_info) 2452 return UPDATE_TYPE_FAST; 2453 2454 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2455 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2456 || u->scaling_info->scaling_quality.integer_scaling != 2457 u->surface->scaling_quality.integer_scaling 2458 ) { 2459 update_flags->bits.scaling_change = 1; 2460 2461 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2462 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2463 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2464 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2465 /* Making dst rect smaller requires a bandwidth change */ 2466 update_flags->bits.bandwidth_change = 1; 2467 } 2468 2469 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2470 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2471 2472 update_flags->bits.scaling_change = 1; 2473 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2474 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2475 /* Making src rect bigger requires a bandwidth change */ 2476 update_flags->bits.clock_change = 1; 2477 } 2478 2479 if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && 2480 (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || 2481 u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) 2482 /* Changing clip size of a large surface may result in MPC slice count change */ 2483 update_flags->bits.bandwidth_change = 1; 2484 2485 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2486 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2487 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2488 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2489 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2490 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2491 update_flags->bits.position_change = 1; 2492 2493 if (update_flags->bits.clock_change 2494 || update_flags->bits.bandwidth_change 2495 || update_flags->bits.scaling_change) 2496 return UPDATE_TYPE_FULL; 2497 2498 if (update_flags->bits.position_change) 2499 return UPDATE_TYPE_MED; 2500 2501 return UPDATE_TYPE_FAST; 2502 } 2503 2504 static enum surface_update_type det_surface_update(const struct dc *dc, 2505 const struct dc_surface_update *u) 2506 { 2507 const struct dc_state *context = dc->current_state; 2508 enum surface_update_type type; 2509 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2510 union surface_update_flags *update_flags = &u->surface->update_flags; 2511 2512 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2513 update_flags->raw = 0xFFFFFFFF; 2514 return UPDATE_TYPE_FULL; 2515 } 2516 2517 update_flags->raw = 0; // Reset all flags 2518 2519 type = get_plane_info_update_type(u); 2520 elevate_update_type(&overall_type, type); 2521 2522 type = get_scaling_info_update_type(dc, u); 2523 elevate_update_type(&overall_type, type); 2524 2525 if (u->flip_addr) { 2526 update_flags->bits.addr_update = 1; 2527 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2528 update_flags->bits.tmz_changed = 1; 2529 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2530 } 2531 } 2532 if (u->in_transfer_func) 2533 update_flags->bits.in_transfer_func_change = 1; 2534 2535 if (u->input_csc_color_matrix) 2536 update_flags->bits.input_csc_change = 1; 2537 2538 if (u->coeff_reduction_factor) 2539 update_flags->bits.coeff_reduction_change = 1; 2540 2541 if (u->gamut_remap_matrix) 2542 update_flags->bits.gamut_remap_change = 1; 2543 2544 if (u->gamma) { 2545 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2546 2547 if (u->plane_info) 2548 format = u->plane_info->format; 2549 else if (u->surface) 2550 format = u->surface->format; 2551 2552 if (dce_use_lut(format)) 2553 update_flags->bits.gamma_change = 1; 2554 } 2555 2556 if (u->lut3d_func || u->func_shaper) 2557 update_flags->bits.lut_3d = 1; 2558 2559 if (u->hdr_mult.value) 2560 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2561 update_flags->bits.hdr_mult = 1; 2562 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2563 } 2564 2565 if (update_flags->bits.in_transfer_func_change) { 2566 type = UPDATE_TYPE_MED; 2567 elevate_update_type(&overall_type, type); 2568 } 2569 2570 if (update_flags->bits.lut_3d) { 2571 type = UPDATE_TYPE_FULL; 2572 elevate_update_type(&overall_type, type); 2573 } 2574 2575 if (dc->debug.enable_legacy_fast_update && 2576 (update_flags->bits.gamma_change || 2577 update_flags->bits.gamut_remap_change || 2578 update_flags->bits.input_csc_change || 2579 update_flags->bits.coeff_reduction_change)) { 2580 type = UPDATE_TYPE_FULL; 2581 elevate_update_type(&overall_type, type); 2582 } 2583 return overall_type; 2584 } 2585 2586 static enum surface_update_type check_update_surfaces_for_stream( 2587 struct dc *dc, 2588 struct dc_surface_update *updates, 2589 int surface_count, 2590 struct dc_stream_update *stream_update, 2591 const struct dc_stream_status *stream_status) 2592 { 2593 int i; 2594 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2595 2596 if (dc->idle_optimizations_allowed) 2597 overall_type = UPDATE_TYPE_FULL; 2598 2599 if (stream_status == NULL || stream_status->plane_count != surface_count) 2600 overall_type = UPDATE_TYPE_FULL; 2601 2602 if (stream_update && stream_update->pending_test_pattern) { 2603 overall_type = UPDATE_TYPE_FULL; 2604 } 2605 2606 /* some stream updates require passive update */ 2607 if (stream_update) { 2608 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2609 2610 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2611 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2612 stream_update->integer_scaling_update) 2613 su_flags->bits.scaling = 1; 2614 2615 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2616 su_flags->bits.out_tf = 1; 2617 2618 if (stream_update->abm_level) 2619 su_flags->bits.abm_level = 1; 2620 2621 if (stream_update->dpms_off) 2622 su_flags->bits.dpms_off = 1; 2623 2624 if (stream_update->gamut_remap) 2625 su_flags->bits.gamut_remap = 1; 2626 2627 if (stream_update->wb_update) 2628 su_flags->bits.wb_update = 1; 2629 2630 if (stream_update->dsc_config) 2631 su_flags->bits.dsc_changed = 1; 2632 2633 if (stream_update->mst_bw_update) 2634 su_flags->bits.mst_bw = 1; 2635 2636 if (stream_update->stream && stream_update->stream->freesync_on_desktop && 2637 (stream_update->vrr_infopacket || stream_update->allow_freesync || 2638 stream_update->vrr_active_variable || stream_update->vrr_active_fixed)) 2639 su_flags->bits.fams_changed = 1; 2640 2641 if (su_flags->raw != 0) 2642 overall_type = UPDATE_TYPE_FULL; 2643 2644 if (stream_update->output_csc_transform || stream_update->output_color_space) 2645 su_flags->bits.out_csc = 1; 2646 2647 /* Output transfer function changes do not require bandwidth recalculation, 2648 * so don't trigger a full update 2649 */ 2650 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) 2651 su_flags->bits.out_tf = 1; 2652 } 2653 2654 for (i = 0 ; i < surface_count; i++) { 2655 enum surface_update_type type = 2656 det_surface_update(dc, &updates[i]); 2657 2658 elevate_update_type(&overall_type, type); 2659 } 2660 2661 return overall_type; 2662 } 2663 2664 /* 2665 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2666 * 2667 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2668 */ 2669 enum surface_update_type dc_check_update_surfaces_for_stream( 2670 struct dc *dc, 2671 struct dc_surface_update *updates, 2672 int surface_count, 2673 struct dc_stream_update *stream_update, 2674 const struct dc_stream_status *stream_status) 2675 { 2676 int i; 2677 enum surface_update_type type; 2678 2679 if (stream_update) 2680 stream_update->stream->update_flags.raw = 0; 2681 for (i = 0; i < surface_count; i++) 2682 updates[i].surface->update_flags.raw = 0; 2683 2684 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2685 if (type == UPDATE_TYPE_FULL) { 2686 if (stream_update) { 2687 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2688 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2689 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2690 } 2691 for (i = 0; i < surface_count; i++) 2692 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2693 } 2694 2695 if (type == UPDATE_TYPE_FAST) { 2696 // If there's an available clock comparator, we use that. 2697 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2698 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2699 dc->optimized_required = true; 2700 // Else we fallback to mem compare. 2701 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2702 dc->optimized_required = true; 2703 } 2704 2705 dc->optimized_required |= dc->wm_optimized_required; 2706 } 2707 2708 return type; 2709 } 2710 2711 static struct dc_stream_status *stream_get_status( 2712 struct dc_state *ctx, 2713 struct dc_stream_state *stream) 2714 { 2715 uint8_t i; 2716 2717 for (i = 0; i < ctx->stream_count; i++) { 2718 if (stream == ctx->streams[i]) { 2719 return &ctx->stream_status[i]; 2720 } 2721 } 2722 2723 return NULL; 2724 } 2725 2726 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2727 2728 static void copy_surface_update_to_plane( 2729 struct dc_plane_state *surface, 2730 struct dc_surface_update *srf_update) 2731 { 2732 if (srf_update->flip_addr) { 2733 surface->address = srf_update->flip_addr->address; 2734 surface->flip_immediate = 2735 srf_update->flip_addr->flip_immediate; 2736 surface->time.time_elapsed_in_us[surface->time.index] = 2737 srf_update->flip_addr->flip_timestamp_in_us - 2738 surface->time.prev_update_time_in_us; 2739 surface->time.prev_update_time_in_us = 2740 srf_update->flip_addr->flip_timestamp_in_us; 2741 surface->time.index++; 2742 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2743 surface->time.index = 0; 2744 2745 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2746 } 2747 2748 if (srf_update->scaling_info) { 2749 surface->scaling_quality = 2750 srf_update->scaling_info->scaling_quality; 2751 surface->dst_rect = 2752 srf_update->scaling_info->dst_rect; 2753 surface->src_rect = 2754 srf_update->scaling_info->src_rect; 2755 surface->clip_rect = 2756 srf_update->scaling_info->clip_rect; 2757 } 2758 2759 if (srf_update->plane_info) { 2760 surface->color_space = 2761 srf_update->plane_info->color_space; 2762 surface->format = 2763 srf_update->plane_info->format; 2764 surface->plane_size = 2765 srf_update->plane_info->plane_size; 2766 surface->rotation = 2767 srf_update->plane_info->rotation; 2768 surface->horizontal_mirror = 2769 srf_update->plane_info->horizontal_mirror; 2770 surface->stereo_format = 2771 srf_update->plane_info->stereo_format; 2772 surface->tiling_info = 2773 srf_update->plane_info->tiling_info; 2774 surface->visible = 2775 srf_update->plane_info->visible; 2776 surface->per_pixel_alpha = 2777 srf_update->plane_info->per_pixel_alpha; 2778 surface->global_alpha = 2779 srf_update->plane_info->global_alpha; 2780 surface->global_alpha_value = 2781 srf_update->plane_info->global_alpha_value; 2782 surface->dcc = 2783 srf_update->plane_info->dcc; 2784 surface->layer_index = 2785 srf_update->plane_info->layer_index; 2786 } 2787 2788 if (srf_update->gamma && 2789 (surface->gamma_correction != 2790 srf_update->gamma)) { 2791 memcpy(&surface->gamma_correction->entries, 2792 &srf_update->gamma->entries, 2793 sizeof(struct dc_gamma_entries)); 2794 surface->gamma_correction->is_identity = 2795 srf_update->gamma->is_identity; 2796 surface->gamma_correction->num_entries = 2797 srf_update->gamma->num_entries; 2798 surface->gamma_correction->type = 2799 srf_update->gamma->type; 2800 } 2801 2802 if (srf_update->in_transfer_func && 2803 (surface->in_transfer_func != 2804 srf_update->in_transfer_func)) { 2805 surface->in_transfer_func->sdr_ref_white_level = 2806 srf_update->in_transfer_func->sdr_ref_white_level; 2807 surface->in_transfer_func->tf = 2808 srf_update->in_transfer_func->tf; 2809 surface->in_transfer_func->type = 2810 srf_update->in_transfer_func->type; 2811 memcpy(&surface->in_transfer_func->tf_pts, 2812 &srf_update->in_transfer_func->tf_pts, 2813 sizeof(struct dc_transfer_func_distributed_points)); 2814 } 2815 2816 if (srf_update->func_shaper && 2817 (surface->in_shaper_func != 2818 srf_update->func_shaper)) 2819 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2820 sizeof(*surface->in_shaper_func)); 2821 2822 if (srf_update->lut3d_func && 2823 (surface->lut3d_func != 2824 srf_update->lut3d_func)) 2825 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2826 sizeof(*surface->lut3d_func)); 2827 2828 if (srf_update->hdr_mult.value) 2829 surface->hdr_mult = 2830 srf_update->hdr_mult; 2831 2832 if (srf_update->blend_tf && 2833 (surface->blend_tf != 2834 srf_update->blend_tf)) 2835 memcpy(surface->blend_tf, srf_update->blend_tf, 2836 sizeof(*surface->blend_tf)); 2837 2838 if (srf_update->input_csc_color_matrix) 2839 surface->input_csc_color_matrix = 2840 *srf_update->input_csc_color_matrix; 2841 2842 if (srf_update->coeff_reduction_factor) 2843 surface->coeff_reduction_factor = 2844 *srf_update->coeff_reduction_factor; 2845 2846 if (srf_update->gamut_remap_matrix) 2847 surface->gamut_remap_matrix = 2848 *srf_update->gamut_remap_matrix; 2849 } 2850 2851 static void copy_stream_update_to_stream(struct dc *dc, 2852 struct dc_state *context, 2853 struct dc_stream_state *stream, 2854 struct dc_stream_update *update) 2855 { 2856 struct dc_context *dc_ctx = dc->ctx; 2857 2858 if (update == NULL || stream == NULL) 2859 return; 2860 2861 if (update->src.height && update->src.width) 2862 stream->src = update->src; 2863 2864 if (update->dst.height && update->dst.width) 2865 stream->dst = update->dst; 2866 2867 if (update->out_transfer_func && 2868 stream->out_transfer_func != update->out_transfer_func) { 2869 stream->out_transfer_func->sdr_ref_white_level = 2870 update->out_transfer_func->sdr_ref_white_level; 2871 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2872 stream->out_transfer_func->type = 2873 update->out_transfer_func->type; 2874 memcpy(&stream->out_transfer_func->tf_pts, 2875 &update->out_transfer_func->tf_pts, 2876 sizeof(struct dc_transfer_func_distributed_points)); 2877 } 2878 2879 if (update->hdr_static_metadata) 2880 stream->hdr_static_metadata = *update->hdr_static_metadata; 2881 2882 if (update->abm_level) 2883 stream->abm_level = *update->abm_level; 2884 2885 if (update->periodic_interrupt) 2886 stream->periodic_interrupt = *update->periodic_interrupt; 2887 2888 if (update->gamut_remap) 2889 stream->gamut_remap_matrix = *update->gamut_remap; 2890 2891 /* Note: this being updated after mode set is currently not a use case 2892 * however if it arises OCSC would need to be reprogrammed at the 2893 * minimum 2894 */ 2895 if (update->output_color_space) 2896 stream->output_color_space = *update->output_color_space; 2897 2898 if (update->output_csc_transform) 2899 stream->csc_color_matrix = *update->output_csc_transform; 2900 2901 if (update->vrr_infopacket) 2902 stream->vrr_infopacket = *update->vrr_infopacket; 2903 2904 if (update->allow_freesync) 2905 stream->allow_freesync = *update->allow_freesync; 2906 2907 if (update->vrr_active_variable) 2908 stream->vrr_active_variable = *update->vrr_active_variable; 2909 2910 if (update->vrr_active_fixed) 2911 stream->vrr_active_fixed = *update->vrr_active_fixed; 2912 2913 if (update->crtc_timing_adjust) 2914 stream->adjust = *update->crtc_timing_adjust; 2915 2916 if (update->dpms_off) 2917 stream->dpms_off = *update->dpms_off; 2918 2919 if (update->hfvsif_infopacket) 2920 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2921 2922 if (update->vtem_infopacket) 2923 stream->vtem_infopacket = *update->vtem_infopacket; 2924 2925 if (update->vsc_infopacket) 2926 stream->vsc_infopacket = *update->vsc_infopacket; 2927 2928 if (update->vsp_infopacket) 2929 stream->vsp_infopacket = *update->vsp_infopacket; 2930 2931 if (update->adaptive_sync_infopacket) 2932 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 2933 2934 if (update->dither_option) 2935 stream->dither_option = *update->dither_option; 2936 2937 if (update->pending_test_pattern) 2938 stream->test_pattern = *update->pending_test_pattern; 2939 /* update current stream with writeback info */ 2940 if (update->wb_update) { 2941 int i; 2942 2943 stream->num_wb_info = update->wb_update->num_wb_info; 2944 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2945 for (i = 0; i < stream->num_wb_info; i++) 2946 stream->writeback_info[i] = 2947 update->wb_update->writeback_info[i]; 2948 } 2949 if (update->dsc_config) { 2950 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2951 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2952 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2953 update->dsc_config->num_slices_v != 0); 2954 2955 /* Use temporarry context for validating new DSC config */ 2956 struct dc_state *dsc_validate_context = dc_create_state(dc); 2957 2958 if (dsc_validate_context) { 2959 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2960 2961 stream->timing.dsc_cfg = *update->dsc_config; 2962 stream->timing.flags.DSC = enable_dsc; 2963 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2964 stream->timing.dsc_cfg = old_dsc_cfg; 2965 stream->timing.flags.DSC = old_dsc_enabled; 2966 update->dsc_config = NULL; 2967 } 2968 2969 dc_release_state(dsc_validate_context); 2970 } else { 2971 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2972 update->dsc_config = NULL; 2973 } 2974 } 2975 } 2976 2977 static bool update_planes_and_stream_state(struct dc *dc, 2978 struct dc_surface_update *srf_updates, int surface_count, 2979 struct dc_stream_state *stream, 2980 struct dc_stream_update *stream_update, 2981 enum surface_update_type *new_update_type, 2982 struct dc_state **new_context) 2983 { 2984 struct dc_state *context; 2985 int i, j; 2986 enum surface_update_type update_type; 2987 const struct dc_stream_status *stream_status; 2988 struct dc_context *dc_ctx = dc->ctx; 2989 2990 stream_status = dc_stream_get_status(stream); 2991 2992 if (!stream_status) { 2993 if (surface_count) /* Only an error condition if surf_count non-zero*/ 2994 ASSERT(false); 2995 2996 return false; /* Cannot commit surface to stream that is not committed */ 2997 } 2998 2999 context = dc->current_state; 3000 3001 update_type = dc_check_update_surfaces_for_stream( 3002 dc, srf_updates, surface_count, stream_update, stream_status); 3003 3004 /* update current stream with the new updates */ 3005 copy_stream_update_to_stream(dc, context, stream, stream_update); 3006 3007 /* do not perform surface update if surface has invalid dimensions 3008 * (all zero) and no scaling_info is provided 3009 */ 3010 if (surface_count > 0) { 3011 for (i = 0; i < surface_count; i++) { 3012 if ((srf_updates[i].surface->src_rect.width == 0 || 3013 srf_updates[i].surface->src_rect.height == 0 || 3014 srf_updates[i].surface->dst_rect.width == 0 || 3015 srf_updates[i].surface->dst_rect.height == 0) && 3016 (!srf_updates[i].scaling_info || 3017 srf_updates[i].scaling_info->src_rect.width == 0 || 3018 srf_updates[i].scaling_info->src_rect.height == 0 || 3019 srf_updates[i].scaling_info->dst_rect.width == 0 || 3020 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3021 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3022 return false; 3023 } 3024 } 3025 } 3026 3027 if (update_type >= update_surface_trace_level) 3028 update_surface_trace(dc, srf_updates, surface_count); 3029 3030 if (update_type >= UPDATE_TYPE_FULL) { 3031 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3032 3033 for (i = 0; i < surface_count; i++) 3034 new_planes[i] = srf_updates[i].surface; 3035 3036 /* initialize scratch memory for building context */ 3037 context = dc_create_state(dc); 3038 if (context == NULL) { 3039 DC_ERROR("Failed to allocate new validate context!\n"); 3040 return false; 3041 } 3042 3043 dc_resource_state_copy_construct( 3044 dc->current_state, context); 3045 3046 /* For each full update, remove all existing phantom pipes first. 3047 * Ensures that we have enough pipes for newly added MPO planes 3048 */ 3049 if (dc->res_pool->funcs->remove_phantom_pipes) 3050 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 3051 3052 /*remove old surfaces from context */ 3053 if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3054 3055 BREAK_TO_DEBUGGER(); 3056 goto fail; 3057 } 3058 3059 /* add surface to context */ 3060 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3061 3062 BREAK_TO_DEBUGGER(); 3063 goto fail; 3064 } 3065 } 3066 3067 /* save update parameters into surface */ 3068 for (i = 0; i < surface_count; i++) { 3069 struct dc_plane_state *surface = srf_updates[i].surface; 3070 3071 copy_surface_update_to_plane(surface, &srf_updates[i]); 3072 3073 if (update_type >= UPDATE_TYPE_MED) { 3074 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3075 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3076 3077 if (pipe_ctx->plane_state != surface) 3078 continue; 3079 3080 resource_build_scaling_params(pipe_ctx); 3081 } 3082 } 3083 } 3084 3085 if (update_type == UPDATE_TYPE_FULL) { 3086 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3087 /* For phantom pipes we remove and create a new set of phantom pipes 3088 * for each full update (because we don't know if we'll need phantom 3089 * pipes until after the first round of validation). However, if validation 3090 * fails we need to keep the existing phantom pipes (because we don't update 3091 * the dc->current_state). 3092 * 3093 * The phantom stream/plane refcount is decremented for validation because 3094 * we assume it'll be removed (the free comes when the dc_state is freed), 3095 * but if validation fails we have to increment back the refcount so it's 3096 * consistent. 3097 */ 3098 if (dc->res_pool->funcs->retain_phantom_pipes) 3099 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); 3100 BREAK_TO_DEBUGGER(); 3101 goto fail; 3102 } 3103 } 3104 3105 *new_context = context; 3106 *new_update_type = update_type; 3107 3108 return true; 3109 3110 fail: 3111 dc_release_state(context); 3112 3113 return false; 3114 3115 } 3116 3117 static void commit_planes_do_stream_update(struct dc *dc, 3118 struct dc_stream_state *stream, 3119 struct dc_stream_update *stream_update, 3120 enum surface_update_type update_type, 3121 struct dc_state *context) 3122 { 3123 int j; 3124 3125 // Stream updates 3126 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3127 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3128 3129 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) { 3130 3131 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3132 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3133 3134 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3135 stream_update->vrr_infopacket || 3136 stream_update->vsc_infopacket || 3137 stream_update->vsp_infopacket || 3138 stream_update->hfvsif_infopacket || 3139 stream_update->adaptive_sync_infopacket || 3140 stream_update->vtem_infopacket) { 3141 resource_build_info_frame(pipe_ctx); 3142 dc->hwss.update_info_frame(pipe_ctx); 3143 3144 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3145 dc->link_srv->dp_trace_source_sequence( 3146 pipe_ctx->stream->link, 3147 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3148 } 3149 3150 if (stream_update->hdr_static_metadata && 3151 stream->use_dynamic_meta && 3152 dc->hwss.set_dmdata_attributes && 3153 pipe_ctx->stream->dmdata_address.quad_part != 0) 3154 dc->hwss.set_dmdata_attributes(pipe_ctx); 3155 3156 if (stream_update->gamut_remap) 3157 dc_stream_set_gamut_remap(dc, stream); 3158 3159 if (stream_update->output_csc_transform) 3160 dc_stream_program_csc_matrix(dc, stream); 3161 3162 if (stream_update->dither_option) { 3163 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3164 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3165 &pipe_ctx->stream->bit_depth_params); 3166 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3167 &stream->bit_depth_params, 3168 &stream->clamping); 3169 while (odm_pipe) { 3170 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3171 &stream->bit_depth_params, 3172 &stream->clamping); 3173 odm_pipe = odm_pipe->next_odm_pipe; 3174 } 3175 } 3176 3177 3178 /* Full fe update*/ 3179 if (update_type == UPDATE_TYPE_FAST) 3180 continue; 3181 3182 if (stream_update->dsc_config) 3183 dc->link_srv->update_dsc_config(pipe_ctx); 3184 3185 if (stream_update->mst_bw_update) { 3186 if (stream_update->mst_bw_update->is_increase) 3187 dc->link_srv->increase_mst_payload(pipe_ctx, 3188 stream_update->mst_bw_update->mst_stream_bw); 3189 else 3190 dc->link_srv->reduce_mst_payload(pipe_ctx, 3191 stream_update->mst_bw_update->mst_stream_bw); 3192 } 3193 3194 if (stream_update->pending_test_pattern) { 3195 dc_link_dp_set_test_pattern(stream->link, 3196 stream->test_pattern.type, 3197 stream->test_pattern.color_space, 3198 stream->test_pattern.p_link_settings, 3199 stream->test_pattern.p_custom_pattern, 3200 stream->test_pattern.cust_pattern_size); 3201 } 3202 3203 if (stream_update->dpms_off) { 3204 if (*stream_update->dpms_off) { 3205 dc->link_srv->set_dpms_off(pipe_ctx); 3206 /* for dpms, keep acquired resources*/ 3207 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3208 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3209 3210 dc->optimized_required = true; 3211 3212 } else { 3213 if (get_seamless_boot_stream_count(context) == 0) 3214 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3215 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3216 } 3217 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space 3218 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) { 3219 /* 3220 * Workaround for firmware issue in some receivers where they don't pick up 3221 * correct output color space unless DP link is disabled/re-enabled 3222 */ 3223 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3224 } 3225 3226 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3227 bool should_program_abm = true; 3228 3229 // if otg funcs defined check if blanked before programming 3230 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3231 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3232 should_program_abm = false; 3233 3234 if (should_program_abm) { 3235 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3236 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3237 } else { 3238 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3239 pipe_ctx->stream_res.abm, stream->abm_level); 3240 } 3241 } 3242 } 3243 } 3244 } 3245 } 3246 3247 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3248 { 3249 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3250 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3251 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3252 return true; 3253 3254 if (stream->link->replay_settings.config.replay_supported) 3255 return true; 3256 3257 return false; 3258 } 3259 3260 void dc_dmub_update_dirty_rect(struct dc *dc, 3261 int surface_count, 3262 struct dc_stream_state *stream, 3263 struct dc_surface_update *srf_updates, 3264 struct dc_state *context) 3265 { 3266 union dmub_rb_cmd cmd; 3267 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3268 unsigned int i, j; 3269 unsigned int panel_inst = 0; 3270 3271 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3272 return; 3273 3274 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3275 return; 3276 3277 memset(&cmd, 0x0, sizeof(cmd)); 3278 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3279 cmd.update_dirty_rect.header.sub_type = 0; 3280 cmd.update_dirty_rect.header.payload_bytes = 3281 sizeof(cmd.update_dirty_rect) - 3282 sizeof(cmd.update_dirty_rect.header); 3283 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3284 for (i = 0; i < surface_count; i++) { 3285 struct dc_plane_state *plane_state = srf_updates[i].surface; 3286 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3287 3288 if (!srf_updates[i].surface || !flip_addr) 3289 continue; 3290 /* Do not send in immediate flip mode */ 3291 if (srf_updates[i].surface->flip_immediate) 3292 continue; 3293 3294 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3295 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3296 sizeof(flip_addr->dirty_rects)); 3297 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3298 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3299 3300 if (pipe_ctx->stream != stream) 3301 continue; 3302 if (pipe_ctx->plane_state != plane_state) 3303 continue; 3304 3305 update_dirty_rect->panel_inst = panel_inst; 3306 update_dirty_rect->pipe_idx = j; 3307 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3308 } 3309 } 3310 } 3311 3312 static void build_dmub_update_dirty_rect( 3313 struct dc *dc, 3314 int surface_count, 3315 struct dc_stream_state *stream, 3316 struct dc_surface_update *srf_updates, 3317 struct dc_state *context, 3318 struct dc_dmub_cmd dc_dmub_cmd[], 3319 unsigned int *dmub_cmd_count) 3320 { 3321 union dmub_rb_cmd cmd; 3322 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3323 unsigned int i, j; 3324 unsigned int panel_inst = 0; 3325 3326 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3327 return; 3328 3329 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3330 return; 3331 3332 memset(&cmd, 0x0, sizeof(cmd)); 3333 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3334 cmd.update_dirty_rect.header.sub_type = 0; 3335 cmd.update_dirty_rect.header.payload_bytes = 3336 sizeof(cmd.update_dirty_rect) - 3337 sizeof(cmd.update_dirty_rect.header); 3338 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3339 for (i = 0; i < surface_count; i++) { 3340 struct dc_plane_state *plane_state = srf_updates[i].surface; 3341 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3342 3343 if (!srf_updates[i].surface || !flip_addr) 3344 continue; 3345 /* Do not send in immediate flip mode */ 3346 if (srf_updates[i].surface->flip_immediate) 3347 continue; 3348 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3349 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3350 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3351 sizeof(flip_addr->dirty_rects)); 3352 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3353 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3354 3355 if (pipe_ctx->stream != stream) 3356 continue; 3357 if (pipe_ctx->plane_state != plane_state) 3358 continue; 3359 update_dirty_rect->panel_inst = panel_inst; 3360 update_dirty_rect->pipe_idx = j; 3361 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd; 3362 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; 3363 (*dmub_cmd_count)++; 3364 } 3365 } 3366 } 3367 3368 3369 /** 3370 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB 3371 * 3372 * @dc: Current DC state 3373 * @srf_updates: Array of surface updates 3374 * @surface_count: Number of surfaces that have an updated 3375 * @stream: Corresponding stream to be updated in the current flip 3376 * @context: New DC state to be programmed 3377 * 3378 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB 3379 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array 3380 * 3381 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required 3382 * to build an array of commands and have them sent while the OTG lock is acquired. 3383 * 3384 * Return: void 3385 */ 3386 static void build_dmub_cmd_list(struct dc *dc, 3387 struct dc_surface_update *srf_updates, 3388 int surface_count, 3389 struct dc_stream_state *stream, 3390 struct dc_state *context, 3391 struct dc_dmub_cmd dc_dmub_cmd[], 3392 unsigned int *dmub_cmd_count) 3393 { 3394 // Initialize cmd count to 0 3395 *dmub_cmd_count = 0; 3396 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3397 } 3398 3399 static void commit_planes_for_stream_fast(struct dc *dc, 3400 struct dc_surface_update *srf_updates, 3401 int surface_count, 3402 struct dc_stream_state *stream, 3403 struct dc_stream_update *stream_update, 3404 enum surface_update_type update_type, 3405 struct dc_state *context) 3406 { 3407 int i, j; 3408 struct pipe_ctx *top_pipe_to_program = NULL; 3409 dc_z10_restore(dc); 3410 3411 top_pipe_to_program = resource_get_otg_master_for_stream( 3412 &context->res_ctx, 3413 stream); 3414 3415 if (dc->debug.visual_confirm) { 3416 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3417 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3418 3419 if (pipe->stream && pipe->plane_state) 3420 dc_update_viusal_confirm_color(dc, context, pipe); 3421 } 3422 } 3423 3424 for (i = 0; i < surface_count; i++) { 3425 struct dc_plane_state *plane_state = srf_updates[i].surface; 3426 /*set logical flag for lock/unlock use*/ 3427 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3428 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3429 3430 if (!pipe_ctx->plane_state) 3431 continue; 3432 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3433 continue; 3434 pipe_ctx->plane_state->triplebuffer_flips = false; 3435 if (update_type == UPDATE_TYPE_FAST && 3436 dc->hwss.program_triplebuffer && 3437 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3438 /*triple buffer for VUpdate only*/ 3439 pipe_ctx->plane_state->triplebuffer_flips = true; 3440 } 3441 } 3442 } 3443 3444 build_dmub_cmd_list(dc, 3445 srf_updates, 3446 surface_count, 3447 stream, 3448 context, 3449 context->dc_dmub_cmd, 3450 &(context->dmub_cmd_count)); 3451 hwss_build_fast_sequence(dc, 3452 context->dc_dmub_cmd, 3453 context->dmub_cmd_count, 3454 context->block_sequence, 3455 &(context->block_sequence_steps), 3456 top_pipe_to_program); 3457 hwss_execute_sequence(dc, 3458 context->block_sequence, 3459 context->block_sequence_steps); 3460 /* Clear update flags so next flip doesn't have redundant programming 3461 * (if there's no stream update, the update flags are not cleared). 3462 * Surface updates are cleared unconditionally at the beginning of each flip, 3463 * so no need to clear here. 3464 */ 3465 if (top_pipe_to_program->stream) 3466 top_pipe_to_program->stream->update_flags.raw = 0; 3467 } 3468 3469 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) 3470 { 3471 /* 3472 * This function calls HWSS to wait for any potentially double buffered 3473 * operations to complete. It should be invoked as a pre-amble prior 3474 * to full update programming before asserting any HW locks. 3475 */ 3476 int pipe_idx; 3477 int opp_inst; 3478 int opp_count = dc->res_pool->pipe_count; 3479 struct hubp *hubp; 3480 int mpcc_inst; 3481 const struct pipe_ctx *pipe_ctx; 3482 3483 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { 3484 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx]; 3485 3486 if (!pipe_ctx->stream) 3487 continue; 3488 3489 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) 3490 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); 3491 3492 hubp = pipe_ctx->plane_res.hubp; 3493 if (!hubp) 3494 continue; 3495 3496 mpcc_inst = hubp->inst; 3497 // MPCC inst is equal to pipe index in practice 3498 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { 3499 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { 3500 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); 3501 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false; 3502 break; 3503 } 3504 } 3505 } 3506 } 3507 3508 static void commit_planes_for_stream(struct dc *dc, 3509 struct dc_surface_update *srf_updates, 3510 int surface_count, 3511 struct dc_stream_state *stream, 3512 struct dc_stream_update *stream_update, 3513 enum surface_update_type update_type, 3514 struct dc_state *context) 3515 { 3516 int i, j; 3517 struct pipe_ctx *top_pipe_to_program = NULL; 3518 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3519 bool subvp_prev_use = false; 3520 bool subvp_curr_use = false; 3521 3522 // Once we apply the new subvp context to hardware it won't be in the 3523 // dc->current_state anymore, so we have to cache it before we apply 3524 // the new SubVP context 3525 subvp_prev_use = false; 3526 dc_z10_restore(dc); 3527 if (update_type == UPDATE_TYPE_FULL) 3528 wait_for_outstanding_hw_updates(dc, context); 3529 3530 if (update_type == UPDATE_TYPE_FULL) { 3531 dc_allow_idle_optimizations(dc, false); 3532 3533 if (get_seamless_boot_stream_count(context) == 0) 3534 dc->hwss.prepare_bandwidth(dc, context); 3535 3536 if (dc->hwss.update_dsc_pg) 3537 dc->hwss.update_dsc_pg(dc, context, false); 3538 3539 context_clock_trace(dc, context); 3540 } 3541 3542 top_pipe_to_program = resource_get_otg_master_for_stream( 3543 &context->res_ctx, 3544 stream); 3545 3546 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3547 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3548 3549 // Check old context for SubVP 3550 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3551 if (subvp_prev_use) 3552 break; 3553 } 3554 3555 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3556 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3557 3558 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3559 subvp_curr_use = true; 3560 break; 3561 } 3562 } 3563 3564 if (dc->debug.visual_confirm) 3565 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3566 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3567 3568 if (pipe->stream && pipe->plane_state) 3569 dc_update_viusal_confirm_color(dc, context, pipe); 3570 } 3571 3572 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3573 struct pipe_ctx *mpcc_pipe; 3574 struct pipe_ctx *odm_pipe; 3575 3576 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3577 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3578 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3579 } 3580 3581 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3582 if (top_pipe_to_program && 3583 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3584 if (should_use_dmub_lock(stream->link)) { 3585 union dmub_hw_lock_flags hw_locks = { 0 }; 3586 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3587 3588 hw_locks.bits.lock_dig = 1; 3589 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3590 3591 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3592 true, 3593 &hw_locks, 3594 &inst_flags); 3595 } else 3596 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3597 top_pipe_to_program->stream_res.tg); 3598 } 3599 3600 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3601 if (dc->hwss.subvp_pipe_control_lock) 3602 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3603 dc->hwss.interdependent_update_lock(dc, context, true); 3604 3605 } else { 3606 if (dc->hwss.subvp_pipe_control_lock) 3607 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3608 /* Lock the top pipe while updating plane addrs, since freesync requires 3609 * plane addr update event triggers to be synchronized. 3610 * top_pipe_to_program is expected to never be NULL 3611 */ 3612 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3613 } 3614 3615 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3616 3617 // Stream updates 3618 if (stream_update) 3619 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3620 3621 if (surface_count == 0) { 3622 /* 3623 * In case of turning off screen, no need to program front end a second time. 3624 * just return after program blank. 3625 */ 3626 if (dc->hwss.apply_ctx_for_surface) 3627 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3628 if (dc->hwss.program_front_end_for_ctx) 3629 dc->hwss.program_front_end_for_ctx(dc, context); 3630 3631 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3632 dc->hwss.interdependent_update_lock(dc, context, false); 3633 } else { 3634 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3635 } 3636 dc->hwss.post_unlock_program_front_end(dc, context); 3637 3638 if (update_type != UPDATE_TYPE_FAST) 3639 if (dc->hwss.commit_subvp_config) 3640 dc->hwss.commit_subvp_config(dc, context); 3641 3642 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3643 * move the SubVP lock to after the phantom pipes have been setup 3644 */ 3645 if (dc->hwss.subvp_pipe_control_lock) 3646 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 3647 NULL, subvp_prev_use); 3648 return; 3649 } 3650 3651 if (update_type != UPDATE_TYPE_FAST) { 3652 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3653 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3654 3655 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP || 3656 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) && 3657 pipe_ctx->stream && pipe_ctx->plane_state) { 3658 /* Only update visual confirm for SUBVP and Mclk switching here. 3659 * The bar appears on all pipes, so we need to update the bar on all displays, 3660 * so the information doesn't get stale. 3661 */ 3662 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, 3663 pipe_ctx->plane_res.hubp->inst); 3664 } 3665 } 3666 } 3667 3668 for (i = 0; i < surface_count; i++) { 3669 struct dc_plane_state *plane_state = srf_updates[i].surface; 3670 /*set logical flag for lock/unlock use*/ 3671 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3672 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3673 if (!pipe_ctx->plane_state) 3674 continue; 3675 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3676 continue; 3677 pipe_ctx->plane_state->triplebuffer_flips = false; 3678 if (update_type == UPDATE_TYPE_FAST && 3679 dc->hwss.program_triplebuffer != NULL && 3680 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3681 /*triple buffer for VUpdate only*/ 3682 pipe_ctx->plane_state->triplebuffer_flips = true; 3683 } 3684 } 3685 if (update_type == UPDATE_TYPE_FULL) { 3686 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3687 plane_state->flip_immediate = false; 3688 } 3689 } 3690 3691 // Update Type FULL, Surface updates 3692 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3693 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3694 3695 if (!pipe_ctx->top_pipe && 3696 !pipe_ctx->prev_odm_pipe && 3697 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3698 struct dc_stream_status *stream_status = NULL; 3699 3700 if (!pipe_ctx->plane_state) 3701 continue; 3702 3703 /* Full fe update*/ 3704 if (update_type == UPDATE_TYPE_FAST) 3705 continue; 3706 3707 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3708 3709 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3710 /*turn off triple buffer for full update*/ 3711 dc->hwss.program_triplebuffer( 3712 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3713 } 3714 stream_status = 3715 stream_get_status(context, pipe_ctx->stream); 3716 3717 if (dc->hwss.apply_ctx_for_surface) 3718 dc->hwss.apply_ctx_for_surface( 3719 dc, pipe_ctx->stream, stream_status->plane_count, context); 3720 } 3721 } 3722 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3723 dc->hwss.program_front_end_for_ctx(dc, context); 3724 if (dc->debug.validate_dml_output) { 3725 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3726 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3727 if (cur_pipe->stream == NULL) 3728 continue; 3729 3730 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3731 cur_pipe->plane_res.hubp, dc->ctx, 3732 &context->res_ctx.pipe_ctx[i].rq_regs, 3733 &context->res_ctx.pipe_ctx[i].dlg_regs, 3734 &context->res_ctx.pipe_ctx[i].ttu_regs); 3735 } 3736 } 3737 } 3738 3739 // Update Type FAST, Surface updates 3740 if (update_type == UPDATE_TYPE_FAST) { 3741 if (dc->hwss.set_flip_control_gsl) 3742 for (i = 0; i < surface_count; i++) { 3743 struct dc_plane_state *plane_state = srf_updates[i].surface; 3744 3745 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3746 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3747 3748 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3749 continue; 3750 3751 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3752 continue; 3753 3754 // GSL has to be used for flip immediate 3755 dc->hwss.set_flip_control_gsl(pipe_ctx, 3756 pipe_ctx->plane_state->flip_immediate); 3757 } 3758 } 3759 3760 /* Perform requested Updates */ 3761 for (i = 0; i < surface_count; i++) { 3762 struct dc_plane_state *plane_state = srf_updates[i].surface; 3763 3764 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3765 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3766 3767 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3768 continue; 3769 3770 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3771 continue; 3772 3773 /*program triple buffer after lock based on flip type*/ 3774 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3775 /*only enable triplebuffer for fast_update*/ 3776 dc->hwss.program_triplebuffer( 3777 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3778 } 3779 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3780 dc->hwss.update_plane_addr(dc, pipe_ctx); 3781 } 3782 } 3783 } 3784 3785 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3786 dc->hwss.interdependent_update_lock(dc, context, false); 3787 } else { 3788 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3789 } 3790 3791 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3792 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3793 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3794 top_pipe_to_program->stream_res.tg, 3795 CRTC_STATE_VACTIVE); 3796 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3797 top_pipe_to_program->stream_res.tg, 3798 CRTC_STATE_VBLANK); 3799 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3800 top_pipe_to_program->stream_res.tg, 3801 CRTC_STATE_VACTIVE); 3802 3803 if (should_use_dmub_lock(stream->link)) { 3804 union dmub_hw_lock_flags hw_locks = { 0 }; 3805 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3806 3807 hw_locks.bits.lock_dig = 1; 3808 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3809 3810 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3811 false, 3812 &hw_locks, 3813 &inst_flags); 3814 } else 3815 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3816 top_pipe_to_program->stream_res.tg); 3817 } 3818 3819 if (subvp_curr_use) { 3820 /* If enabling subvp or transitioning from subvp->subvp, enable the 3821 * phantom streams before we program front end for the phantom pipes. 3822 */ 3823 if (update_type != UPDATE_TYPE_FAST) { 3824 if (dc->hwss.enable_phantom_streams) 3825 dc->hwss.enable_phantom_streams(dc, context); 3826 } 3827 } 3828 3829 if (update_type != UPDATE_TYPE_FAST) 3830 dc->hwss.post_unlock_program_front_end(dc, context); 3831 3832 if (subvp_prev_use && !subvp_curr_use) { 3833 /* If disabling subvp, disable phantom streams after front end 3834 * programming has completed (we turn on phantom OTG in order 3835 * to complete the plane disable for phantom pipes). 3836 */ 3837 dc->hwss.apply_ctx_to_hw(dc, context); 3838 } 3839 3840 if (update_type != UPDATE_TYPE_FAST) 3841 if (dc->hwss.commit_subvp_config) 3842 dc->hwss.commit_subvp_config(dc, context); 3843 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3844 * move the SubVP lock to after the phantom pipes have been setup 3845 */ 3846 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3847 if (dc->hwss.subvp_pipe_control_lock) 3848 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3849 } else { 3850 if (dc->hwss.subvp_pipe_control_lock) 3851 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3852 } 3853 3854 // Fire manual trigger only when bottom plane is flipped 3855 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3856 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3857 3858 if (!pipe_ctx->plane_state) 3859 continue; 3860 3861 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3862 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3863 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3864 pipe_ctx->plane_state->skip_manual_trigger) 3865 continue; 3866 3867 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3868 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3869 } 3870 } 3871 3872 /** 3873 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 3874 * 3875 * @dc: Used to get the current state status 3876 * @stream: Target stream, which we want to remove the attached planes 3877 * @surface_count: Number of surface update 3878 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 3879 * 3880 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 3881 * the MPO if used simultaneously in some specific configurations (e.g., 3882 * 4k@144). This function checks if the incoming context requires applying a 3883 * transition state with unnecessary pipe splitting and ODM disabled to 3884 * circumvent our hardware limitations to prevent this edge case. If the OPP 3885 * associated with an MPCC might change due to plane additions, this function 3886 * returns true. 3887 * 3888 * Return: 3889 * Return true if OPP and MPCC might change, otherwise, return false. 3890 */ 3891 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3892 struct dc_stream_state *stream, 3893 int surface_count, 3894 bool *is_plane_addition) 3895 { 3896 3897 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3898 bool force_minimal_pipe_splitting = false; 3899 bool subvp_active = false; 3900 uint32_t i; 3901 3902 *is_plane_addition = false; 3903 3904 if (cur_stream_status && 3905 dc->current_state->stream_count > 0 && 3906 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3907 /* determine if minimal transition is required due to MPC*/ 3908 if (surface_count > 0) { 3909 if (cur_stream_status->plane_count > surface_count) { 3910 force_minimal_pipe_splitting = true; 3911 } else if (cur_stream_status->plane_count < surface_count) { 3912 force_minimal_pipe_splitting = true; 3913 *is_plane_addition = true; 3914 } 3915 } 3916 } 3917 3918 if (cur_stream_status && 3919 dc->current_state->stream_count == 1 && 3920 dc->debug.enable_single_display_2to1_odm_policy) { 3921 /* determine if minimal transition is required due to dynamic ODM*/ 3922 if (surface_count > 0) { 3923 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3924 force_minimal_pipe_splitting = true; 3925 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3926 force_minimal_pipe_splitting = true; 3927 *is_plane_addition = true; 3928 } 3929 } 3930 } 3931 3932 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3933 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3934 3935 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 3936 subvp_active = true; 3937 break; 3938 } 3939 } 3940 3941 /* For SubVP when adding or removing planes we need to add a minimal transition 3942 * (even when disabling all planes). Whenever disabling a phantom pipe, we 3943 * must use the minimal transition path to disable the pipe correctly. 3944 * 3945 * We want to use the minimal transition whenever subvp is active, not only if 3946 * a plane is being added / removed from a subvp stream (MPO plane can be added 3947 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 3948 * a min transition to disable subvp. 3949 */ 3950 if (cur_stream_status && subvp_active) { 3951 /* determine if minimal transition is required due to SubVP*/ 3952 if (cur_stream_status->plane_count > surface_count) { 3953 force_minimal_pipe_splitting = true; 3954 } else if (cur_stream_status->plane_count < surface_count) { 3955 force_minimal_pipe_splitting = true; 3956 *is_plane_addition = true; 3957 } 3958 } 3959 3960 return force_minimal_pipe_splitting; 3961 } 3962 3963 /** 3964 * commit_minimal_transition_state - Create a transition pipe split state 3965 * 3966 * @dc: Used to get the current state status 3967 * @transition_base_context: New transition state 3968 * 3969 * In some specific configurations, such as pipe split on multi-display with 3970 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 3971 * programming when moving to new planes. To mitigate those types of problems, 3972 * this function adds a transition state that minimizes pipe usage before 3973 * programming the new configuration. When adding a new plane, the current 3974 * state requires the least pipes, so it is applied without splitting. When 3975 * removing a plane, the new state requires the least pipes, so it is applied 3976 * without splitting. 3977 * 3978 * Return: 3979 * Return false if something is wrong in the transition state. 3980 */ 3981 static bool commit_minimal_transition_state(struct dc *dc, 3982 struct dc_state *transition_base_context) 3983 { 3984 struct dc_state *transition_context = dc_create_state(dc); 3985 enum pipe_split_policy tmp_mpc_policy = 0; 3986 bool temp_dynamic_odm_policy = 0; 3987 bool temp_subvp_policy = 0; 3988 enum dc_status ret = DC_ERROR_UNEXPECTED; 3989 unsigned int i, j; 3990 unsigned int pipe_in_use = 0; 3991 bool subvp_in_use = false; 3992 bool odm_in_use = false; 3993 3994 if (!transition_context) 3995 return false; 3996 /* Setup: 3997 * Store the current ODM and MPC config in some temp variables to be 3998 * restored after we commit the transition state. 3999 */ 4000 4001 /* check current pipes in use*/ 4002 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4003 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 4004 4005 if (pipe->plane_state) 4006 pipe_in_use++; 4007 } 4008 4009 /* If SubVP is enabled and we are adding or removing planes from any main subvp 4010 * pipe, we must use the minimal transition. 4011 */ 4012 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4013 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4014 4015 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 4016 subvp_in_use = true; 4017 break; 4018 } 4019 } 4020 4021 /* If ODM is enabled and we are adding or removing planes from any ODM 4022 * pipe, we must use the minimal transition. 4023 */ 4024 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4025 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4026 4027 if (pipe->stream && pipe->next_odm_pipe) { 4028 odm_in_use = true; 4029 break; 4030 } 4031 } 4032 4033 /* When the OS add a new surface if we have been used all of pipes with odm combine 4034 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 4035 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 4036 * call it again. Otherwise return true to skip. 4037 * 4038 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 4039 * enter/exit MPO when DCN still have enough resources. 4040 */ 4041 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) { 4042 dc_release_state(transition_context); 4043 return true; 4044 } 4045 4046 if (!dc->config.is_vmin_only_asic) { 4047 tmp_mpc_policy = dc->debug.pipe_split_policy; 4048 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 4049 } 4050 4051 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 4052 dc->debug.enable_single_display_2to1_odm_policy = false; 4053 4054 temp_subvp_policy = dc->debug.force_disable_subvp; 4055 dc->debug.force_disable_subvp = true; 4056 4057 dc_resource_state_copy_construct(transition_base_context, transition_context); 4058 4059 /* commit minimal state */ 4060 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { 4061 for (i = 0; i < transition_context->stream_count; i++) { 4062 struct dc_stream_status *stream_status = &transition_context->stream_status[i]; 4063 4064 for (j = 0; j < stream_status->plane_count; j++) { 4065 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 4066 4067 /* force vsync flip when reconfiguring pipes to prevent underflow 4068 * and corruption 4069 */ 4070 plane_state->flip_immediate = false; 4071 } 4072 } 4073 4074 ret = dc_commit_state_no_check(dc, transition_context); 4075 } 4076 4077 /* always release as dc_commit_state_no_check retains in good case */ 4078 dc_release_state(transition_context); 4079 4080 /* TearDown: 4081 * Restore original configuration for ODM and MPO. 4082 */ 4083 if (!dc->config.is_vmin_only_asic) 4084 dc->debug.pipe_split_policy = tmp_mpc_policy; 4085 4086 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; 4087 dc->debug.force_disable_subvp = temp_subvp_policy; 4088 4089 if (ret != DC_OK) { 4090 /* this should never happen */ 4091 BREAK_TO_DEBUGGER(); 4092 return false; 4093 } 4094 4095 /* force full surface update */ 4096 for (i = 0; i < dc->current_state->stream_count; i++) { 4097 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 4098 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 4099 } 4100 } 4101 4102 return true; 4103 } 4104 4105 /** 4106 * update_seamless_boot_flags() - Helper function for updating seamless boot flags 4107 * 4108 * @dc: Current DC state 4109 * @context: New DC state to be programmed 4110 * @surface_count: Number of surfaces that have an updated 4111 * @stream: Corresponding stream to be updated in the current flip 4112 * 4113 * Updating seamless boot flags do not need to be part of the commit sequence. This 4114 * helper function will update the seamless boot flags on each flip (if required) 4115 * outside of the HW commit sequence (fast or slow). 4116 * 4117 * Return: void 4118 */ 4119 static void update_seamless_boot_flags(struct dc *dc, 4120 struct dc_state *context, 4121 int surface_count, 4122 struct dc_stream_state *stream) 4123 { 4124 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 4125 /* Optimize seamless boot flag keeps clocks and watermarks high until 4126 * first flip. After first flip, optimization is required to lower 4127 * bandwidth. Important to note that it is expected UEFI will 4128 * only light up a single display on POST, therefore we only expect 4129 * one stream with seamless boot flag set. 4130 */ 4131 if (stream->apply_seamless_boot_optimization) { 4132 stream->apply_seamless_boot_optimization = false; 4133 4134 if (get_seamless_boot_stream_count(context) == 0) 4135 dc->optimized_required = true; 4136 } 4137 } 4138 } 4139 4140 static void populate_fast_updates(struct dc_fast_update *fast_update, 4141 struct dc_surface_update *srf_updates, 4142 int surface_count, 4143 struct dc_stream_update *stream_update) 4144 { 4145 int i = 0; 4146 4147 if (stream_update) { 4148 fast_update[0].out_transfer_func = stream_update->out_transfer_func; 4149 fast_update[0].output_csc_transform = stream_update->output_csc_transform; 4150 } 4151 4152 for (i = 0; i < surface_count; i++) { 4153 fast_update[i].flip_addr = srf_updates[i].flip_addr; 4154 fast_update[i].gamma = srf_updates[i].gamma; 4155 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 4156 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 4157 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 4158 } 4159 } 4160 4161 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) 4162 { 4163 int i; 4164 4165 if (fast_update[0].out_transfer_func || 4166 fast_update[0].output_csc_transform) 4167 return true; 4168 4169 for (i = 0; i < surface_count; i++) { 4170 if (fast_update[i].flip_addr || 4171 fast_update[i].gamma || 4172 fast_update[i].gamut_remap_matrix || 4173 fast_update[i].input_csc_color_matrix || 4174 fast_update[i].coeff_reduction_factor) 4175 return true; 4176 } 4177 4178 return false; 4179 } 4180 4181 static bool full_update_required(struct dc *dc, 4182 struct dc_surface_update *srf_updates, 4183 int surface_count, 4184 struct dc_stream_update *stream_update, 4185 struct dc_stream_state *stream) 4186 { 4187 4188 int i; 4189 struct dc_stream_status *stream_status; 4190 const struct dc_state *context = dc->current_state; 4191 4192 for (i = 0; i < surface_count; i++) { 4193 if (srf_updates && 4194 (srf_updates[i].plane_info || 4195 srf_updates[i].scaling_info || 4196 (srf_updates[i].hdr_mult.value && 4197 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 4198 srf_updates[i].in_transfer_func || 4199 srf_updates[i].func_shaper || 4200 srf_updates[i].lut3d_func || 4201 srf_updates[i].blend_tf || 4202 srf_updates[i].surface->force_full_update || 4203 (srf_updates[i].flip_addr && 4204 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 4205 !is_surface_in_context(context, srf_updates[i].surface))) 4206 return true; 4207 } 4208 4209 if (stream_update && 4210 (((stream_update->src.height != 0 && stream_update->src.width != 0) || 4211 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 4212 stream_update->integer_scaling_update) || 4213 stream_update->hdr_static_metadata || 4214 stream_update->abm_level || 4215 stream_update->periodic_interrupt || 4216 stream_update->vrr_infopacket || 4217 stream_update->vsc_infopacket || 4218 stream_update->vsp_infopacket || 4219 stream_update->hfvsif_infopacket || 4220 stream_update->vtem_infopacket || 4221 stream_update->adaptive_sync_infopacket || 4222 stream_update->dpms_off || 4223 stream_update->allow_freesync || 4224 stream_update->vrr_active_variable || 4225 stream_update->vrr_active_fixed || 4226 stream_update->gamut_remap || 4227 stream_update->output_color_space || 4228 stream_update->dither_option || 4229 stream_update->wb_update || 4230 stream_update->dsc_config || 4231 stream_update->mst_bw_update || 4232 stream_update->func_shaper || 4233 stream_update->lut3d_func || 4234 stream_update->pending_test_pattern || 4235 stream_update->crtc_timing_adjust)) 4236 return true; 4237 4238 if (stream) { 4239 stream_status = dc_stream_get_status(stream); 4240 if (stream_status == NULL || stream_status->plane_count != surface_count) 4241 return true; 4242 } 4243 if (dc->idle_optimizations_allowed) 4244 return true; 4245 4246 return false; 4247 } 4248 4249 static bool fast_update_only(struct dc *dc, 4250 struct dc_fast_update *fast_update, 4251 struct dc_surface_update *srf_updates, 4252 int surface_count, 4253 struct dc_stream_update *stream_update, 4254 struct dc_stream_state *stream) 4255 { 4256 return fast_updates_exist(fast_update, surface_count) 4257 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 4258 } 4259 4260 bool dc_update_planes_and_stream(struct dc *dc, 4261 struct dc_surface_update *srf_updates, int surface_count, 4262 struct dc_stream_state *stream, 4263 struct dc_stream_update *stream_update) 4264 { 4265 struct dc_state *context; 4266 enum surface_update_type update_type; 4267 int i; 4268 struct mall_temp_config mall_temp_config; 4269 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4270 4271 /* In cases where MPO and split or ODM are used transitions can 4272 * cause underflow. Apply stream configuration with minimal pipe 4273 * split first to avoid unsupported transitions for active pipes. 4274 */ 4275 bool force_minimal_pipe_splitting = 0; 4276 bool is_plane_addition = 0; 4277 4278 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4279 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 4280 dc, 4281 stream, 4282 surface_count, 4283 &is_plane_addition); 4284 4285 /* on plane addition, minimal state is the current one */ 4286 if (force_minimal_pipe_splitting && is_plane_addition && 4287 !commit_minimal_transition_state(dc, dc->current_state)) 4288 return false; 4289 4290 if (!update_planes_and_stream_state( 4291 dc, 4292 srf_updates, 4293 surface_count, 4294 stream, 4295 stream_update, 4296 &update_type, 4297 &context)) 4298 return false; 4299 4300 /* on plane removal, minimal state is the new one */ 4301 if (force_minimal_pipe_splitting && !is_plane_addition) { 4302 /* Since all phantom pipes are removed in full validation, 4303 * we have to save and restore the subvp/mall config when 4304 * we do a minimal transition since the flags marking the 4305 * pipe as subvp/phantom will be cleared (dc copy constructor 4306 * creates a shallow copy). 4307 */ 4308 if (dc->res_pool->funcs->save_mall_state) 4309 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); 4310 if (!commit_minimal_transition_state(dc, context)) { 4311 dc_release_state(context); 4312 return false; 4313 } 4314 if (dc->res_pool->funcs->restore_mall_state) 4315 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); 4316 4317 /* If we do a minimal transition with plane removal and the context 4318 * has subvp we also have to retain back the phantom stream / planes 4319 * since the refcount is decremented as part of the min transition 4320 * (we commit a state with no subvp, so the phantom streams / planes 4321 * had to be removed). 4322 */ 4323 if (dc->res_pool->funcs->retain_phantom_pipes) 4324 dc->res_pool->funcs->retain_phantom_pipes(dc, context); 4325 update_type = UPDATE_TYPE_FULL; 4326 } 4327 4328 update_seamless_boot_flags(dc, context, surface_count, stream); 4329 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 4330 !dc->debug.enable_legacy_fast_update) { 4331 commit_planes_for_stream_fast(dc, 4332 srf_updates, 4333 surface_count, 4334 stream, 4335 stream_update, 4336 update_type, 4337 context); 4338 } else { 4339 if (!stream_update && 4340 dc->hwss.is_pipe_topology_transition_seamless && 4341 !dc->hwss.is_pipe_topology_transition_seamless( 4342 dc, dc->current_state, context)) { 4343 4344 DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); 4345 BREAK_TO_DEBUGGER(); 4346 } 4347 commit_planes_for_stream( 4348 dc, 4349 srf_updates, 4350 surface_count, 4351 stream, 4352 stream_update, 4353 update_type, 4354 context); 4355 } 4356 4357 if (dc->current_state != context) { 4358 4359 /* Since memory free requires elevated IRQL, an interrupt 4360 * request is generated by mem free. If this happens 4361 * between freeing and reassigning the context, our vsync 4362 * interrupt will call into dc and cause a memory 4363 * corruption BSOD. Hence, we first reassign the context, 4364 * then free the old context. 4365 */ 4366 4367 struct dc_state *old = dc->current_state; 4368 4369 dc->current_state = context; 4370 dc_release_state(old); 4371 4372 // clear any forced full updates 4373 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4374 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4375 4376 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4377 pipe_ctx->plane_state->force_full_update = false; 4378 } 4379 } 4380 return true; 4381 } 4382 4383 void dc_commit_updates_for_stream(struct dc *dc, 4384 struct dc_surface_update *srf_updates, 4385 int surface_count, 4386 struct dc_stream_state *stream, 4387 struct dc_stream_update *stream_update, 4388 struct dc_state *state) 4389 { 4390 const struct dc_stream_status *stream_status; 4391 enum surface_update_type update_type; 4392 struct dc_state *context; 4393 struct dc_context *dc_ctx = dc->ctx; 4394 int i, j; 4395 struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 4396 4397 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 4398 stream_status = dc_stream_get_status(stream); 4399 context = dc->current_state; 4400 4401 update_type = dc_check_update_surfaces_for_stream( 4402 dc, srf_updates, surface_count, stream_update, stream_status); 4403 4404 /* TODO: Since change commit sequence can have a huge impact, 4405 * we decided to only enable it for DCN3x. However, as soon as 4406 * we get more confident about this change we'll need to enable 4407 * the new sequence for all ASICs. 4408 */ 4409 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 4410 /* 4411 * Previous frame finished and HW is ready for optimization. 4412 */ 4413 if (update_type == UPDATE_TYPE_FAST) 4414 dc_post_update_surfaces_to_stream(dc); 4415 4416 dc_update_planes_and_stream(dc, srf_updates, 4417 surface_count, stream, 4418 stream_update); 4419 return; 4420 } 4421 4422 if (update_type >= update_surface_trace_level) 4423 update_surface_trace(dc, srf_updates, surface_count); 4424 4425 4426 if (update_type >= UPDATE_TYPE_FULL) { 4427 4428 /* initialize scratch memory for building context */ 4429 context = dc_create_state(dc); 4430 if (context == NULL) { 4431 DC_ERROR("Failed to allocate new validate context!\n"); 4432 return; 4433 } 4434 4435 dc_resource_state_copy_construct(state, context); 4436 4437 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4438 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4439 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4440 4441 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4442 new_pipe->plane_state->force_full_update = true; 4443 } 4444 } else if (update_type == UPDATE_TYPE_FAST) { 4445 /* 4446 * Previous frame finished and HW is ready for optimization. 4447 */ 4448 dc_post_update_surfaces_to_stream(dc); 4449 } 4450 4451 4452 for (i = 0; i < surface_count; i++) { 4453 struct dc_plane_state *surface = srf_updates[i].surface; 4454 4455 copy_surface_update_to_plane(surface, &srf_updates[i]); 4456 4457 if (update_type >= UPDATE_TYPE_MED) { 4458 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4459 struct pipe_ctx *pipe_ctx = 4460 &context->res_ctx.pipe_ctx[j]; 4461 4462 if (pipe_ctx->plane_state != surface) 4463 continue; 4464 4465 resource_build_scaling_params(pipe_ctx); 4466 } 4467 } 4468 } 4469 4470 copy_stream_update_to_stream(dc, context, stream, stream_update); 4471 4472 if (update_type >= UPDATE_TYPE_FULL) { 4473 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4474 DC_ERROR("Mode validation failed for stream update!\n"); 4475 dc_release_state(context); 4476 return; 4477 } 4478 } 4479 4480 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4481 4482 update_seamless_boot_flags(dc, context, surface_count, stream); 4483 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && 4484 !dc->debug.enable_legacy_fast_update) { 4485 commit_planes_for_stream_fast(dc, 4486 srf_updates, 4487 surface_count, 4488 stream, 4489 stream_update, 4490 update_type, 4491 context); 4492 } else { 4493 commit_planes_for_stream( 4494 dc, 4495 srf_updates, 4496 surface_count, 4497 stream, 4498 stream_update, 4499 update_type, 4500 context); 4501 } 4502 /*update current_State*/ 4503 if (dc->current_state != context) { 4504 4505 struct dc_state *old = dc->current_state; 4506 4507 dc->current_state = context; 4508 dc_release_state(old); 4509 4510 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4511 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4512 4513 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4514 pipe_ctx->plane_state->force_full_update = false; 4515 } 4516 } 4517 4518 /* Legacy optimization path for DCE. */ 4519 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4520 dc_post_update_surfaces_to_stream(dc); 4521 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4522 } 4523 4524 return; 4525 4526 } 4527 4528 uint8_t dc_get_current_stream_count(struct dc *dc) 4529 { 4530 return dc->current_state->stream_count; 4531 } 4532 4533 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4534 { 4535 if (i < dc->current_state->stream_count) 4536 return dc->current_state->streams[i]; 4537 return NULL; 4538 } 4539 4540 enum dc_irq_source dc_interrupt_to_irq_source( 4541 struct dc *dc, 4542 uint32_t src_id, 4543 uint32_t ext_id) 4544 { 4545 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4546 } 4547 4548 /* 4549 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4550 */ 4551 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4552 { 4553 4554 if (dc == NULL) 4555 return false; 4556 4557 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4558 } 4559 4560 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4561 { 4562 dal_irq_service_ack(dc->res_pool->irqs, src); 4563 } 4564 4565 void dc_power_down_on_boot(struct dc *dc) 4566 { 4567 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4568 dc->hwss.power_down_on_boot) 4569 dc->hwss.power_down_on_boot(dc); 4570 } 4571 4572 void dc_set_power_state( 4573 struct dc *dc, 4574 enum dc_acpi_cm_power_state power_state) 4575 { 4576 struct kref refcount; 4577 struct display_mode_lib *dml; 4578 4579 if (!dc->current_state) 4580 return; 4581 4582 switch (power_state) { 4583 case DC_ACPI_CM_POWER_STATE_D0: 4584 dc_resource_state_construct(dc, dc->current_state); 4585 4586 dc_z10_restore(dc); 4587 4588 dc->hwss.init_hw(dc); 4589 4590 if (dc->hwss.init_sys_ctx != NULL && 4591 dc->vm_pa_config.valid) { 4592 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4593 } 4594 4595 break; 4596 default: 4597 ASSERT(dc->current_state->stream_count == 0); 4598 /* Zero out the current context so that on resume we start with 4599 * clean state, and dc hw programming optimizations will not 4600 * cause any trouble. 4601 */ 4602 dml = kzalloc(sizeof(struct display_mode_lib), 4603 GFP_KERNEL); 4604 4605 ASSERT(dml); 4606 if (!dml) 4607 return; 4608 4609 /* Preserve refcount */ 4610 refcount = dc->current_state->refcount; 4611 /* Preserve display mode lib */ 4612 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 4613 4614 dc_resource_state_destruct(dc->current_state); 4615 memset(dc->current_state, 0, 4616 sizeof(*dc->current_state)); 4617 4618 dc->current_state->refcount = refcount; 4619 dc->current_state->bw_ctx.dml = *dml; 4620 4621 kfree(dml); 4622 4623 break; 4624 } 4625 } 4626 4627 void dc_resume(struct dc *dc) 4628 { 4629 uint32_t i; 4630 4631 for (i = 0; i < dc->link_count; i++) 4632 dc->link_srv->resume(dc->links[i]); 4633 } 4634 4635 bool dc_is_dmcu_initialized(struct dc *dc) 4636 { 4637 struct dmcu *dmcu = dc->res_pool->dmcu; 4638 4639 if (dmcu) 4640 return dmcu->funcs->is_dmcu_initialized(dmcu); 4641 return false; 4642 } 4643 4644 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4645 { 4646 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4647 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4648 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4649 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4650 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4651 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4652 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4653 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4654 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4655 } 4656 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4657 { 4658 if (dc->hwss.set_clock) 4659 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4660 return DC_ERROR_UNEXPECTED; 4661 } 4662 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4663 { 4664 if (dc->hwss.get_clock) 4665 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4666 } 4667 4668 /* enable/disable eDP PSR without specify stream for eDP */ 4669 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4670 { 4671 int i; 4672 bool allow_active; 4673 4674 for (i = 0; i < dc->current_state->stream_count ; i++) { 4675 struct dc_link *link; 4676 struct dc_stream_state *stream = dc->current_state->streams[i]; 4677 4678 link = stream->link; 4679 if (!link) 4680 continue; 4681 4682 if (link->psr_settings.psr_feature_enabled) { 4683 if (enable && !link->psr_settings.psr_allow_active) { 4684 allow_active = true; 4685 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4686 return false; 4687 } else if (!enable && link->psr_settings.psr_allow_active) { 4688 allow_active = false; 4689 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4690 return false; 4691 } 4692 } 4693 } 4694 4695 return true; 4696 } 4697 4698 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4699 { 4700 if (dc->debug.disable_idle_power_optimizations) 4701 return; 4702 4703 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4704 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4705 return; 4706 4707 if (allow == dc->idle_optimizations_allowed) 4708 return; 4709 4710 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 4711 dc->idle_optimizations_allowed = allow; 4712 } 4713 4714 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4715 void dc_unlock_memory_clock_frequency(struct dc *dc) 4716 { 4717 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4718 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4719 4720 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4721 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4722 } 4723 4724 /* set min memory clock to the min required for current mode, max to maxDPM */ 4725 void dc_lock_memory_clock_frequency(struct dc *dc) 4726 { 4727 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4728 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4729 4730 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4731 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4732 4733 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4734 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4735 } 4736 4737 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4738 { 4739 struct dc_state *context = dc->current_state; 4740 struct hubp *hubp; 4741 struct pipe_ctx *pipe; 4742 int i; 4743 4744 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4745 pipe = &context->res_ctx.pipe_ctx[i]; 4746 4747 if (pipe->stream != NULL) { 4748 dc->hwss.disable_pixel_data(dc, pipe, true); 4749 4750 // wait for double buffer 4751 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4752 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4753 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4754 4755 hubp = pipe->plane_res.hubp; 4756 hubp->funcs->set_blank_regs(hubp, true); 4757 } 4758 } 4759 4760 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4761 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4762 4763 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4764 pipe = &context->res_ctx.pipe_ctx[i]; 4765 4766 if (pipe->stream != NULL) { 4767 dc->hwss.disable_pixel_data(dc, pipe, false); 4768 4769 hubp = pipe->plane_res.hubp; 4770 hubp->funcs->set_blank_regs(hubp, false); 4771 } 4772 } 4773 } 4774 4775 4776 /** 4777 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4778 * @dc: pointer to dc of the dm calling this 4779 * @enable: True = transition to DC mode, false = transition back to AC mode 4780 * 4781 * Some SoCs define additional clock limits when in DC mode, DM should 4782 * invoke this function when the platform undergoes a power source transition 4783 * so DC can apply/unapply the limit. This interface may be disruptive to 4784 * the onscreen content. 4785 * 4786 * Context: Triggered by OS through DM interface, or manually by escape calls. 4787 * Need to hold a dclock when doing so. 4788 * 4789 * Return: none (void function) 4790 * 4791 */ 4792 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4793 { 4794 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i; 4795 bool p_state_change_support; 4796 4797 if (!dc->config.dc_mode_clk_limit_support) 4798 return; 4799 4800 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4801 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) { 4802 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM) 4803 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; 4804 } 4805 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4806 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4807 4808 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4809 if (p_state_change_support) { 4810 if (funcMin <= softMax) 4811 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4812 // else: No-Op 4813 } else { 4814 if (funcMin <= softMax) 4815 blank_and_force_memclk(dc, true, softMax); 4816 // else: No-Op 4817 } 4818 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4819 if (p_state_change_support) { 4820 if (funcMin <= softMax) 4821 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4822 // else: No-Op 4823 } else { 4824 if (funcMin <= softMax) 4825 blank_and_force_memclk(dc, true, maxDPM); 4826 // else: No-Op 4827 } 4828 } 4829 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4830 } 4831 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4832 struct dc_cursor_attributes *cursor_attr) 4833 { 4834 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4835 return true; 4836 return false; 4837 } 4838 4839 /* cleanup on driver unload */ 4840 void dc_hardware_release(struct dc *dc) 4841 { 4842 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4843 4844 if (dc->hwss.hardware_release) 4845 dc->hwss.hardware_release(dc); 4846 } 4847 4848 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4849 { 4850 if (dc->current_state) 4851 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4852 } 4853 4854 /** 4855 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 4856 * 4857 * @dc: [in] dc structure 4858 * 4859 * Checks whether DMUB FW supports outbox notifications, if supported DM 4860 * should register outbox interrupt prior to actually enabling interrupts 4861 * via dc_enable_dmub_outbox 4862 * 4863 * Return: 4864 * True if DMUB FW supports outbox notifications, False otherwise 4865 */ 4866 bool dc_is_dmub_outbox_supported(struct dc *dc) 4867 { 4868 switch (dc->ctx->asic_id.chip_family) { 4869 4870 case FAMILY_YELLOW_CARP: 4871 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 4872 if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 4873 !dc->debug.dpia_debug.bits.disable_dpia) 4874 return true; 4875 break; 4876 4877 case AMDGPU_FAMILY_GC_11_0_1: 4878 case AMDGPU_FAMILY_GC_11_5_0: 4879 if (!dc->debug.dpia_debug.bits.disable_dpia) 4880 return true; 4881 break; 4882 4883 default: 4884 break; 4885 } 4886 4887 /* dmub aux needs dmub notifications to be enabled */ 4888 return dc->debug.enable_dmub_aux_for_legacy_ddc; 4889 4890 } 4891 4892 /** 4893 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 4894 * 4895 * @dc: [in] dc structure 4896 * 4897 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 4898 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 4899 * API shall be removed after switching. 4900 * 4901 * Return: 4902 * True if DMUB FW supports outbox notifications, False otherwise 4903 */ 4904 bool dc_enable_dmub_notifications(struct dc *dc) 4905 { 4906 return dc_is_dmub_outbox_supported(dc); 4907 } 4908 4909 /** 4910 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 4911 * 4912 * @dc: [in] dc structure 4913 * 4914 * Enables DMUB unsolicited notifications to x86 via outbox. 4915 */ 4916 void dc_enable_dmub_outbox(struct dc *dc) 4917 { 4918 struct dc_context *dc_ctx = dc->ctx; 4919 4920 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 4921 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 4922 } 4923 4924 /** 4925 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 4926 * Sets port index appropriately for legacy DDC 4927 * @dc: dc structure 4928 * @link_index: link index 4929 * @payload: aux payload 4930 * 4931 * Returns: True if successful, False if failure 4932 */ 4933 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 4934 uint32_t link_index, 4935 struct aux_payload *payload) 4936 { 4937 uint8_t action; 4938 union dmub_rb_cmd cmd = {0}; 4939 4940 ASSERT(payload->length <= 16); 4941 4942 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 4943 cmd.dp_aux_access.header.payload_bytes = 0; 4944 /* For dpia, ddc_pin is set to NULL */ 4945 if (!dc->links[link_index]->ddc->ddc_pin) 4946 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 4947 else 4948 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 4949 4950 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 4951 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 4952 cmd.dp_aux_access.aux_control.timeout = 0; 4953 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 4954 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 4955 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 4956 4957 /* set aux action */ 4958 if (payload->i2c_over_aux) { 4959 if (payload->write) { 4960 if (payload->mot) 4961 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 4962 else 4963 action = DP_AUX_REQ_ACTION_I2C_WRITE; 4964 } else { 4965 if (payload->mot) 4966 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 4967 else 4968 action = DP_AUX_REQ_ACTION_I2C_READ; 4969 } 4970 } else { 4971 if (payload->write) 4972 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 4973 else 4974 action = DP_AUX_REQ_ACTION_DPCD_READ; 4975 } 4976 4977 cmd.dp_aux_access.aux_control.dpaux.action = action; 4978 4979 if (payload->length && payload->write) { 4980 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 4981 payload->data, 4982 payload->length 4983 ); 4984 } 4985 4986 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 4987 4988 return true; 4989 } 4990 4991 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 4992 uint8_t dpia_port_index) 4993 { 4994 uint8_t index, link_index = 0xFF; 4995 4996 for (index = 0; index < dc->link_count; index++) { 4997 /* ddc_hw_inst has dpia port index for dpia links 4998 * and ddc instance for legacy links 4999 */ 5000 if (!dc->links[index]->ddc->ddc_pin) { 5001 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 5002 link_index = index; 5003 break; 5004 } 5005 } 5006 } 5007 ASSERT(link_index != 0xFF); 5008 return link_index; 5009 } 5010 5011 /** 5012 * dc_process_dmub_set_config_async - Submits set_config command 5013 * 5014 * @dc: [in] dc structure 5015 * @link_index: [in] link_index: link index 5016 * @payload: [in] aux payload 5017 * @notify: [out] set_config immediate reply 5018 * 5019 * Submits set_config command to dmub via inbox message. 5020 * 5021 * Return: 5022 * True if successful, False if failure 5023 */ 5024 bool dc_process_dmub_set_config_async(struct dc *dc, 5025 uint32_t link_index, 5026 struct set_config_cmd_payload *payload, 5027 struct dmub_notification *notify) 5028 { 5029 union dmub_rb_cmd cmd = {0}; 5030 bool is_cmd_complete = true; 5031 5032 /* prepare SET_CONFIG command */ 5033 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 5034 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 5035 5036 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 5037 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 5038 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 5039 5040 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 5041 /* command is not processed by dmub */ 5042 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 5043 return is_cmd_complete; 5044 } 5045 5046 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 5047 if (cmd.set_config_access.header.ret_status == 1) 5048 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 5049 else 5050 /* cmd pending, will receive notification via outbox */ 5051 is_cmd_complete = false; 5052 5053 return is_cmd_complete; 5054 } 5055 5056 /** 5057 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 5058 * 5059 * @dc: [in] dc structure 5060 * @link_index: [in] link index 5061 * @mst_alloc_slots: [in] mst slots to be allotted 5062 * @mst_slots_in_use: [out] mst slots in use returned in failure case 5063 * 5064 * Submits mst slot allocation command to dmub via inbox message 5065 * 5066 * Return: 5067 * DC_OK if successful, DC_ERROR if failure 5068 */ 5069 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 5070 uint32_t link_index, 5071 uint8_t mst_alloc_slots, 5072 uint8_t *mst_slots_in_use) 5073 { 5074 union dmub_rb_cmd cmd = {0}; 5075 5076 /* prepare MST_ALLOC_SLOTS command */ 5077 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 5078 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 5079 5080 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 5081 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 5082 5083 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 5084 /* command is not processed by dmub */ 5085 return DC_ERROR_UNEXPECTED; 5086 5087 /* command processed by dmub, if ret_status is 1 */ 5088 if (cmd.set_config_access.header.ret_status != 1) 5089 /* command processing error */ 5090 return DC_ERROR_UNEXPECTED; 5091 5092 /* command processed and we have a status of 2, mst not enabled in dpia */ 5093 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 5094 return DC_FAIL_UNSUPPORTED_1; 5095 5096 /* previously configured mst alloc and used slots did not match */ 5097 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 5098 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 5099 return DC_NOT_SUPPORTED; 5100 } 5101 5102 return DC_OK; 5103 } 5104 5105 /** 5106 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 5107 * 5108 * @dc: [in] dc structure 5109 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 5110 * 5111 * Submits dpia hpd int enable command to dmub via inbox message 5112 */ 5113 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 5114 uint32_t hpd_int_enable) 5115 { 5116 union dmub_rb_cmd cmd = {0}; 5117 5118 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 5119 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 5120 5121 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 5122 5123 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 5124 } 5125 5126 /** 5127 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging 5128 * 5129 * @dc: [in] dc structure 5130 * 5131 * 5132 */ 5133 void dc_print_dmub_diagnostic_data(const struct dc *dc) 5134 { 5135 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv); 5136 } 5137 5138 /** 5139 * dc_disable_accelerated_mode - disable accelerated mode 5140 * @dc: dc structure 5141 */ 5142 void dc_disable_accelerated_mode(struct dc *dc) 5143 { 5144 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 5145 } 5146 5147 5148 /** 5149 * dc_notify_vsync_int_state - notifies vsync enable/disable state 5150 * @dc: dc structure 5151 * @stream: stream where vsync int state changed 5152 * @enable: whether vsync is enabled or disabled 5153 * 5154 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 5155 * interrupts after steady state is reached. 5156 */ 5157 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 5158 { 5159 int i; 5160 int edp_num; 5161 struct pipe_ctx *pipe = NULL; 5162 struct dc_link *link = stream->sink->link; 5163 struct dc_link *edp_links[MAX_NUM_EDP]; 5164 5165 5166 if (link->psr_settings.psr_feature_enabled) 5167 return; 5168 5169 if (link->replay_settings.replay_feature_enabled) 5170 return; 5171 5172 /*find primary pipe associated with stream*/ 5173 for (i = 0; i < MAX_PIPES; i++) { 5174 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5175 5176 if (pipe->stream == stream && pipe->stream_res.tg) 5177 break; 5178 } 5179 5180 if (i == MAX_PIPES) { 5181 ASSERT(0); 5182 return; 5183 } 5184 5185 dc_get_edp_links(dc, edp_links, &edp_num); 5186 5187 /* Determine panel inst */ 5188 for (i = 0; i < edp_num; i++) { 5189 if (edp_links[i] == link) 5190 break; 5191 } 5192 5193 if (i == edp_num) { 5194 return; 5195 } 5196 5197 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 5198 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 5199 } 5200 5201 /***************************************************************************** 5202 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause 5203 * ABM 5204 * @dc: dc structure 5205 * @stream: stream where vsync int state changed 5206 * @pData: abm hw states 5207 * 5208 ****************************************************************************/ 5209 bool dc_abm_save_restore( 5210 struct dc *dc, 5211 struct dc_stream_state *stream, 5212 struct abm_save_restore *pData) 5213 { 5214 int i; 5215 int edp_num; 5216 struct pipe_ctx *pipe = NULL; 5217 struct dc_link *link = stream->sink->link; 5218 struct dc_link *edp_links[MAX_NUM_EDP]; 5219 5220 5221 /*find primary pipe associated with stream*/ 5222 for (i = 0; i < MAX_PIPES; i++) { 5223 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 5224 5225 if (pipe->stream == stream && pipe->stream_res.tg) 5226 break; 5227 } 5228 5229 if (i == MAX_PIPES) { 5230 ASSERT(0); 5231 return false; 5232 } 5233 5234 dc_get_edp_links(dc, edp_links, &edp_num); 5235 5236 /* Determine panel inst */ 5237 for (i = 0; i < edp_num; i++) 5238 if (edp_links[i] == link) 5239 break; 5240 5241 if (i == edp_num) 5242 return false; 5243 5244 if (pipe->stream_res.abm && 5245 pipe->stream_res.abm->funcs->save_restore) 5246 return pipe->stream_res.abm->funcs->save_restore( 5247 pipe->stream_res.abm, 5248 i, 5249 pData); 5250 return false; 5251 } 5252 5253 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 5254 { 5255 unsigned int i; 5256 bool subvp_in_use = false; 5257 5258 for (i = 0; i < dc->current_state->stream_count; i++) { 5259 if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) { 5260 subvp_in_use = true; 5261 break; 5262 } 5263 } 5264 properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size; 5265 } 5266 5267 /** 5268 ***************************************************************************** 5269 * dc_set_edp_power() - DM controls eDP power to be ON/OFF 5270 * 5271 * Called when DM wants to power on/off eDP. 5272 * Only work on links with flag skip_implict_edp_power_control is set. 5273 * 5274 ***************************************************************************** 5275 */ 5276 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 5277 bool powerOn) 5278 { 5279 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 5280 return; 5281 5282 if (edp_link->skip_implict_edp_power_control == false) 5283 return; 5284 5285 edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); 5286 } 5287 5288