1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "gpio_service_interface.h" 37 #include "clk_mgr.h" 38 #include "clock_source.h" 39 #include "dc_bios_types.h" 40 41 #include "bios_parser_interface.h" 42 #include "bios/bios_parser_helper.h" 43 #include "include/irq_service_interface.h" 44 #include "transform.h" 45 #include "dmcu.h" 46 #include "dpp.h" 47 #include "timing_generator.h" 48 #include "abm.h" 49 #include "virtual/virtual_link_encoder.h" 50 #include "hubp.h" 51 52 #include "link_hwss.h" 53 #include "link_encoder.h" 54 #include "link_enc_cfg.h" 55 56 #include "link.h" 57 #include "dm_helpers.h" 58 #include "mem_input.h" 59 60 #include "dc_dmub_srv.h" 61 62 #include "dsc.h" 63 64 #include "vm_helper.h" 65 66 #include "dce/dce_i2c.h" 67 68 #include "dmub/dmub_srv.h" 69 70 #include "dce/dmub_psr.h" 71 72 #include "dce/dmub_hw_lock_mgr.h" 73 74 #include "dc_trace.h" 75 76 #include "hw_sequencer_private.h" 77 78 #include "dce/dmub_outbox.h" 79 80 #define CTX \ 81 dc->ctx 82 83 #define DC_LOGGER \ 84 dc->ctx->logger 85 86 static const char DC_BUILD_ID[] = "production-build"; 87 88 /** 89 * DOC: Overview 90 * 91 * DC is the OS-agnostic component of the amdgpu DC driver. 92 * 93 * DC maintains and validates a set of structs representing the state of the 94 * driver and writes that state to AMD hardware 95 * 96 * Main DC HW structs: 97 * 98 * struct dc - The central struct. One per driver. Created on driver load, 99 * destroyed on driver unload. 100 * 101 * struct dc_context - One per driver. 102 * Used as a backpointer by most other structs in dc. 103 * 104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 105 * plugpoints). Created on driver load, destroyed on driver unload. 106 * 107 * struct dc_sink - One per display. Created on boot or hotplug. 108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 109 * (the display directly attached). It may also have one or more remote 110 * sinks (in the Multi-Stream Transport case) 111 * 112 * struct resource_pool - One per driver. Represents the hw blocks not in the 113 * main pipeline. Not directly accessible by dm. 114 * 115 * Main dc state structs: 116 * 117 * These structs can be created and destroyed as needed. There is a full set of 118 * these structs in dc->current_state representing the currently programmed state. 119 * 120 * struct dc_state - The global DC state to track global state information, 121 * such as bandwidth values. 122 * 123 * struct dc_stream_state - Represents the hw configuration for the pipeline from 124 * a framebuffer to a display. Maps one-to-one with dc_sink. 125 * 126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 127 * and may have more in the Multi-Plane Overlay case. 128 * 129 * struct resource_context - Represents the programmable state of everything in 130 * the resource_pool. Not directly accessible by dm. 131 * 132 * struct pipe_ctx - A member of struct resource_context. Represents the 133 * internal hardware pipeline components. Each dc_plane_state has either 134 * one or two (in the pipe-split case). 135 */ 136 137 /* Private functions */ 138 139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 140 { 141 if (new > *original) 142 *original = new; 143 } 144 145 static void destroy_links(struct dc *dc) 146 { 147 uint32_t i; 148 149 for (i = 0; i < dc->link_count; i++) { 150 if (NULL != dc->links[i]) 151 dc->link_srv->destroy_link(&dc->links[i]); 152 } 153 } 154 155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 156 { 157 int i; 158 uint32_t count = 0; 159 160 for (i = 0; i < num_links; i++) { 161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 162 links[i]->is_internal_display) 163 count++; 164 } 165 166 return count; 167 } 168 169 static int get_seamless_boot_stream_count(struct dc_state *ctx) 170 { 171 uint8_t i; 172 uint8_t seamless_boot_stream_count = 0; 173 174 for (i = 0; i < ctx->stream_count; i++) 175 if (ctx->streams[i]->apply_seamless_boot_optimization) 176 seamless_boot_stream_count++; 177 178 return seamless_boot_stream_count; 179 } 180 181 static bool create_links( 182 struct dc *dc, 183 uint32_t num_virtual_links) 184 { 185 int i; 186 int connectors_num; 187 struct dc_bios *bios = dc->ctx->dc_bios; 188 189 dc->link_count = 0; 190 191 connectors_num = bios->funcs->get_connectors_number(bios); 192 193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 194 195 if (connectors_num > ENUM_ID_COUNT) { 196 dm_error( 197 "DC: Number of connectors %d exceeds maximum of %d!\n", 198 connectors_num, 199 ENUM_ID_COUNT); 200 return false; 201 } 202 203 dm_output_to_console( 204 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 205 __func__, 206 connectors_num, 207 num_virtual_links); 208 209 for (i = 0; i < connectors_num; i++) { 210 struct link_init_data link_init_params = {0}; 211 struct dc_link *link; 212 213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 214 215 link_init_params.ctx = dc->ctx; 216 /* next BIOS object table connector */ 217 link_init_params.connector_index = i; 218 link_init_params.link_index = dc->link_count; 219 link_init_params.dc = dc; 220 link = dc->link_srv->create_link(&link_init_params); 221 222 if (link) { 223 dc->links[dc->link_count] = link; 224 link->dc = dc; 225 ++dc->link_count; 226 } 227 } 228 229 DC_LOG_DC("BIOS object table - end"); 230 231 /* Create a link for each usb4 dpia port */ 232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 233 struct link_init_data link_init_params = {0}; 234 struct dc_link *link; 235 236 link_init_params.ctx = dc->ctx; 237 link_init_params.connector_index = i; 238 link_init_params.link_index = dc->link_count; 239 link_init_params.dc = dc; 240 link_init_params.is_dpia_link = true; 241 242 link = dc->link_srv->create_link(&link_init_params); 243 if (link) { 244 dc->links[dc->link_count] = link; 245 link->dc = dc; 246 ++dc->link_count; 247 } 248 } 249 250 for (i = 0; i < num_virtual_links; i++) { 251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 252 struct encoder_init_data enc_init = {0}; 253 254 if (link == NULL) { 255 BREAK_TO_DEBUGGER(); 256 goto failed_alloc; 257 } 258 259 link->link_index = dc->link_count; 260 dc->links[dc->link_count] = link; 261 dc->link_count++; 262 263 link->ctx = dc->ctx; 264 link->dc = dc; 265 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 266 link->link_id.type = OBJECT_TYPE_CONNECTOR; 267 link->link_id.id = CONNECTOR_ID_VIRTUAL; 268 link->link_id.enum_id = ENUM_ID_1; 269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 270 271 if (!link->link_enc) { 272 BREAK_TO_DEBUGGER(); 273 goto failed_alloc; 274 } 275 276 link->link_status.dpcd_caps = &link->dpcd_caps; 277 278 enc_init.ctx = dc->ctx; 279 enc_init.channel = CHANNEL_ID_UNKNOWN; 280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 281 enc_init.transmitter = TRANSMITTER_UNKNOWN; 282 enc_init.connector = link->link_id; 283 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 285 enc_init.encoder.enum_id = ENUM_ID_1; 286 virtual_link_encoder_construct(link->link_enc, &enc_init); 287 } 288 289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 290 291 return true; 292 293 failed_alloc: 294 return false; 295 } 296 297 /* Create additional DIG link encoder objects if fewer than the platform 298 * supports were created during link construction. This can happen if the 299 * number of physical connectors is less than the number of DIGs. 300 */ 301 static bool create_link_encoders(struct dc *dc) 302 { 303 bool res = true; 304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 306 int i; 307 308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 309 * link encoders and physical display endpoints and does not require 310 * additional link encoder objects. 311 */ 312 if (num_usb4_dpia == 0) 313 return res; 314 315 /* Create as many link encoder objects as the platform supports. DPIA 316 * endpoints can be programmably mapped to any DIG. 317 */ 318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 319 for (i = 0; i < num_dig_link_enc; i++) { 320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 321 322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 324 (enum engine_id)(ENGINE_ID_DIGA + i)); 325 if (link_enc) { 326 dc->res_pool->link_encoders[i] = link_enc; 327 dc->res_pool->dig_link_enc_count++; 328 } else { 329 res = false; 330 } 331 } 332 } 333 } 334 335 return res; 336 } 337 338 /* Destroy any additional DIG link encoder objects created by 339 * create_link_encoders(). 340 * NB: Must only be called after destroy_links(). 341 */ 342 static void destroy_link_encoders(struct dc *dc) 343 { 344 unsigned int num_usb4_dpia; 345 unsigned int num_dig_link_enc; 346 int i; 347 348 if (!dc->res_pool) 349 return; 350 351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 353 354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 355 * link encoders and physical display endpoints and does not require 356 * additional link encoder objects. 357 */ 358 if (num_usb4_dpia == 0) 359 return; 360 361 for (i = 0; i < num_dig_link_enc; i++) { 362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 363 364 if (link_enc) { 365 link_enc->funcs->destroy(&link_enc); 366 dc->res_pool->link_encoders[i] = NULL; 367 dc->res_pool->dig_link_enc_count--; 368 } 369 } 370 } 371 372 static struct dc_perf_trace *dc_perf_trace_create(void) 373 { 374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 375 } 376 377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 378 { 379 kfree(*perf_trace); 380 *perf_trace = NULL; 381 } 382 383 /** 384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 385 * @dc: dc reference 386 * @stream: Initial dc stream state 387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 388 * 389 * Looks up the pipe context of dc_stream_state and updates the 390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 391 * Rate, which is a power-saving feature that targets reducing panel 392 * refresh rate while the screen is static 393 * 394 * Return: %true if the pipe context is found and adjusted; 395 * %false if the pipe context is not found. 396 */ 397 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 398 struct dc_stream_state *stream, 399 struct dc_crtc_timing_adjust *adjust) 400 { 401 int i; 402 403 /* 404 * Don't adjust DRR while there's bandwidth optimizations pending to 405 * avoid conflicting with firmware updates. 406 */ 407 if (dc->ctx->dce_version > DCE_VERSION_MAX) 408 if (dc->optimized_required || dc->wm_optimized_required) 409 return false; 410 411 stream->adjust.v_total_max = adjust->v_total_max; 412 stream->adjust.v_total_mid = adjust->v_total_mid; 413 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 414 stream->adjust.v_total_min = adjust->v_total_min; 415 416 for (i = 0; i < MAX_PIPES; i++) { 417 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 418 419 if (pipe->stream == stream && pipe->stream_res.tg) { 420 dc->hwss.set_drr(&pipe, 421 1, 422 *adjust); 423 424 return true; 425 } 426 } 427 return false; 428 } 429 430 /** 431 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 432 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 433 * 434 * @dc: [in] dc reference 435 * @stream: [in] Initial dc stream state 436 * @refresh_rate: [in] new refresh_rate 437 * 438 * Return: %true if the pipe context is found and there is an associated 439 * timing_generator for the DC; 440 * %false if the pipe context is not found or there is no 441 * timing_generator for the DC. 442 */ 443 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 444 struct dc_stream_state *stream, 445 uint32_t *refresh_rate) 446 { 447 bool status = false; 448 449 int i = 0; 450 451 for (i = 0; i < MAX_PIPES; i++) { 452 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 453 454 if (pipe->stream == stream && pipe->stream_res.tg) { 455 /* Only execute if a function pointer has been defined for 456 * the DC version in question 457 */ 458 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 459 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 460 461 status = true; 462 463 break; 464 } 465 } 466 } 467 468 return status; 469 } 470 471 bool dc_stream_get_crtc_position(struct dc *dc, 472 struct dc_stream_state **streams, int num_streams, 473 unsigned int *v_pos, unsigned int *nom_v_pos) 474 { 475 /* TODO: Support multiple streams */ 476 const struct dc_stream_state *stream = streams[0]; 477 int i; 478 bool ret = false; 479 struct crtc_position position; 480 481 for (i = 0; i < MAX_PIPES; i++) { 482 struct pipe_ctx *pipe = 483 &dc->current_state->res_ctx.pipe_ctx[i]; 484 485 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 486 dc->hwss.get_position(&pipe, 1, &position); 487 488 *v_pos = position.vertical_count; 489 *nom_v_pos = position.nominal_vcount; 490 ret = true; 491 } 492 } 493 return ret; 494 } 495 496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 497 static inline void 498 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv, 499 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 500 { 501 union dmub_rb_cmd cmd = {0}; 502 503 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num; 504 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num; 505 506 if (is_stop) { 507 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 508 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE; 509 } else { 510 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY; 511 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY; 512 cmd.secure_display.roi_info.x_start = rect->x; 513 cmd.secure_display.roi_info.y_start = rect->y; 514 cmd.secure_display.roi_info.x_end = rect->x + rect->width; 515 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 516 } 517 518 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 519 dc_dmub_srv_cmd_execute(dmub_srv); 520 } 521 522 static inline void 523 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu, 524 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop) 525 { 526 if (is_stop) 527 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 528 else 529 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping); 530 } 531 532 bool 533 dc_stream_forward_crc_window(struct dc_stream_state *stream, 534 struct rect *rect, bool is_stop) 535 { 536 struct dmcu *dmcu; 537 struct dc_dmub_srv *dmub_srv; 538 struct otg_phy_mux mux_mapping; 539 struct pipe_ctx *pipe; 540 int i; 541 struct dc *dc = stream->ctx->dc; 542 543 for (i = 0; i < MAX_PIPES; i++) { 544 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 545 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 546 break; 547 } 548 549 /* Stream not found */ 550 if (i == MAX_PIPES) 551 return false; 552 553 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst; 554 mux_mapping.otg_output_num = pipe->stream_res.tg->inst; 555 556 dmcu = dc->res_pool->dmcu; 557 dmub_srv = dc->ctx->dmub_srv; 558 559 /* forward to dmub */ 560 if (dmub_srv) 561 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop); 562 /* forward to dmcu */ 563 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 564 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop); 565 else 566 return false; 567 568 return true; 569 } 570 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 571 572 /** 573 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 574 * @dc: DC Object 575 * @stream: The stream to configure CRC on. 576 * @enable: Enable CRC if true, disable otherwise. 577 * @crc_window: CRC window (x/y start/end) information 578 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 579 * once. 580 * 581 * By default, only CRC0 is configured, and the entire frame is used to 582 * calculate the CRC. 583 * 584 * Return: %false if the stream is not found or CRC capture is not supported; 585 * %true if the stream has been configured. 586 */ 587 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 588 struct crc_params *crc_window, bool enable, bool continuous) 589 { 590 int i; 591 struct pipe_ctx *pipe; 592 struct crc_params param; 593 struct timing_generator *tg; 594 595 for (i = 0; i < MAX_PIPES; i++) { 596 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 597 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 598 break; 599 } 600 /* Stream not found */ 601 if (i == MAX_PIPES) 602 return false; 603 604 /* By default, capture the full frame */ 605 param.windowa_x_start = 0; 606 param.windowa_y_start = 0; 607 param.windowa_x_end = pipe->stream->timing.h_addressable; 608 param.windowa_y_end = pipe->stream->timing.v_addressable; 609 param.windowb_x_start = 0; 610 param.windowb_y_start = 0; 611 param.windowb_x_end = pipe->stream->timing.h_addressable; 612 param.windowb_y_end = pipe->stream->timing.v_addressable; 613 614 if (crc_window) { 615 param.windowa_x_start = crc_window->windowa_x_start; 616 param.windowa_y_start = crc_window->windowa_y_start; 617 param.windowa_x_end = crc_window->windowa_x_end; 618 param.windowa_y_end = crc_window->windowa_y_end; 619 param.windowb_x_start = crc_window->windowb_x_start; 620 param.windowb_y_start = crc_window->windowb_y_start; 621 param.windowb_x_end = crc_window->windowb_x_end; 622 param.windowb_y_end = crc_window->windowb_y_end; 623 } 624 625 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 626 param.odm_mode = pipe->next_odm_pipe ? 1:0; 627 628 /* Default to the union of both windows */ 629 param.selection = UNION_WINDOW_A_B; 630 param.continuous_mode = continuous; 631 param.enable = enable; 632 633 tg = pipe->stream_res.tg; 634 635 /* Only call if supported */ 636 if (tg->funcs->configure_crc) 637 return tg->funcs->configure_crc(tg, ¶m); 638 DC_LOG_WARNING("CRC capture not supported."); 639 return false; 640 } 641 642 /** 643 * dc_stream_get_crc() - Get CRC values for the given stream. 644 * 645 * @dc: DC object. 646 * @stream: The DC stream state of the stream to get CRCs from. 647 * @r_cr: CRC value for the red component. 648 * @g_y: CRC value for the green component. 649 * @b_cb: CRC value for the blue component. 650 * 651 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 652 * 653 * Return: 654 * %false if stream is not found, or if CRCs are not enabled. 655 */ 656 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 657 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 658 { 659 int i; 660 struct pipe_ctx *pipe; 661 struct timing_generator *tg; 662 663 for (i = 0; i < MAX_PIPES; i++) { 664 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 665 if (pipe->stream == stream) 666 break; 667 } 668 /* Stream not found */ 669 if (i == MAX_PIPES) 670 return false; 671 672 tg = pipe->stream_res.tg; 673 674 if (tg->funcs->get_crc) 675 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 676 DC_LOG_WARNING("CRC capture not supported."); 677 return false; 678 } 679 680 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 681 enum dc_dynamic_expansion option) 682 { 683 /* OPP FMT dyn expansion updates*/ 684 int i; 685 struct pipe_ctx *pipe_ctx; 686 687 for (i = 0; i < MAX_PIPES; i++) { 688 if (dc->current_state->res_ctx.pipe_ctx[i].stream 689 == stream) { 690 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 691 pipe_ctx->stream_res.opp->dyn_expansion = option; 692 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 693 pipe_ctx->stream_res.opp, 694 COLOR_SPACE_YCBCR601, 695 stream->timing.display_color_depth, 696 stream->signal); 697 } 698 } 699 } 700 701 void dc_stream_set_dither_option(struct dc_stream_state *stream, 702 enum dc_dither_option option) 703 { 704 struct bit_depth_reduction_params params; 705 struct dc_link *link = stream->link; 706 struct pipe_ctx *pipes = NULL; 707 int i; 708 709 for (i = 0; i < MAX_PIPES; i++) { 710 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 711 stream) { 712 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 713 break; 714 } 715 } 716 717 if (!pipes) 718 return; 719 if (option > DITHER_OPTION_MAX) 720 return; 721 722 stream->dither_option = option; 723 724 memset(¶ms, 0, sizeof(params)); 725 resource_build_bit_depth_reduction_params(stream, ¶ms); 726 stream->bit_depth_params = params; 727 728 if (pipes->plane_res.xfm && 729 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 730 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 731 pipes->plane_res.xfm, 732 pipes->plane_res.scl_data.lb_params.depth, 733 &stream->bit_depth_params); 734 } 735 736 pipes->stream_res.opp->funcs-> 737 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 738 } 739 740 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 741 { 742 int i; 743 bool ret = false; 744 struct pipe_ctx *pipes; 745 746 for (i = 0; i < MAX_PIPES; i++) { 747 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 748 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 749 dc->hwss.program_gamut_remap(pipes); 750 ret = true; 751 } 752 } 753 754 return ret; 755 } 756 757 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 758 { 759 int i; 760 bool ret = false; 761 struct pipe_ctx *pipes; 762 763 for (i = 0; i < MAX_PIPES; i++) { 764 if (dc->current_state->res_ctx.pipe_ctx[i].stream 765 == stream) { 766 767 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 768 dc->hwss.program_output_csc(dc, 769 pipes, 770 stream->output_color_space, 771 stream->csc_color_matrix.matrix, 772 pipes->stream_res.opp->inst); 773 ret = true; 774 } 775 } 776 777 return ret; 778 } 779 780 void dc_stream_set_static_screen_params(struct dc *dc, 781 struct dc_stream_state **streams, 782 int num_streams, 783 const struct dc_static_screen_params *params) 784 { 785 int i, j; 786 struct pipe_ctx *pipes_affected[MAX_PIPES]; 787 int num_pipes_affected = 0; 788 789 for (i = 0; i < num_streams; i++) { 790 struct dc_stream_state *stream = streams[i]; 791 792 for (j = 0; j < MAX_PIPES; j++) { 793 if (dc->current_state->res_ctx.pipe_ctx[j].stream 794 == stream) { 795 pipes_affected[num_pipes_affected++] = 796 &dc->current_state->res_ctx.pipe_ctx[j]; 797 } 798 } 799 } 800 801 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 802 } 803 804 static void dc_destruct(struct dc *dc) 805 { 806 // reset link encoder assignment table on destruct 807 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 808 link_enc_cfg_init(dc, dc->current_state); 809 810 if (dc->current_state) { 811 dc_release_state(dc->current_state); 812 dc->current_state = NULL; 813 } 814 815 destroy_links(dc); 816 817 destroy_link_encoders(dc); 818 819 if (dc->clk_mgr) { 820 dc_destroy_clk_mgr(dc->clk_mgr); 821 dc->clk_mgr = NULL; 822 } 823 824 dc_destroy_resource_pool(dc); 825 826 if (dc->link_srv) 827 link_destroy_link_service(&dc->link_srv); 828 829 if (dc->ctx->gpio_service) 830 dal_gpio_service_destroy(&dc->ctx->gpio_service); 831 832 if (dc->ctx->created_bios) 833 dal_bios_parser_destroy(&dc->ctx->dc_bios); 834 835 dc_perf_trace_destroy(&dc->ctx->perf_trace); 836 837 kfree(dc->ctx); 838 dc->ctx = NULL; 839 840 kfree(dc->bw_vbios); 841 dc->bw_vbios = NULL; 842 843 kfree(dc->bw_dceip); 844 dc->bw_dceip = NULL; 845 846 kfree(dc->dcn_soc); 847 dc->dcn_soc = NULL; 848 849 kfree(dc->dcn_ip); 850 dc->dcn_ip = NULL; 851 852 kfree(dc->vm_helper); 853 dc->vm_helper = NULL; 854 855 } 856 857 static bool dc_construct_ctx(struct dc *dc, 858 const struct dc_init_data *init_params) 859 { 860 struct dc_context *dc_ctx; 861 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 862 863 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 864 if (!dc_ctx) 865 return false; 866 867 dc_ctx->cgs_device = init_params->cgs_device; 868 dc_ctx->driver_context = init_params->driver; 869 dc_ctx->dc = dc; 870 dc_ctx->asic_id = init_params->asic_id; 871 dc_ctx->dc_sink_id_count = 0; 872 dc_ctx->dc_stream_id_count = 0; 873 dc_ctx->dce_environment = init_params->dce_environment; 874 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets; 875 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets; 876 877 /* Create logger */ 878 879 dc_version = resource_parse_asic_id(init_params->asic_id); 880 dc_ctx->dce_version = dc_version; 881 882 dc_ctx->perf_trace = dc_perf_trace_create(); 883 if (!dc_ctx->perf_trace) { 884 kfree(dc_ctx); 885 ASSERT_CRITICAL(false); 886 return false; 887 } 888 889 dc->ctx = dc_ctx; 890 891 dc->link_srv = link_create_link_service(); 892 if (!dc->link_srv) 893 return false; 894 895 return true; 896 } 897 898 static bool dc_construct(struct dc *dc, 899 const struct dc_init_data *init_params) 900 { 901 struct dc_context *dc_ctx; 902 struct bw_calcs_dceip *dc_dceip; 903 struct bw_calcs_vbios *dc_vbios; 904 struct dcn_soc_bounding_box *dcn_soc; 905 struct dcn_ip_params *dcn_ip; 906 907 dc->config = init_params->flags; 908 909 // Allocate memory for the vm_helper 910 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 911 if (!dc->vm_helper) { 912 dm_error("%s: failed to create dc->vm_helper\n", __func__); 913 goto fail; 914 } 915 916 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 917 918 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 919 if (!dc_dceip) { 920 dm_error("%s: failed to create dceip\n", __func__); 921 goto fail; 922 } 923 924 dc->bw_dceip = dc_dceip; 925 926 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 927 if (!dc_vbios) { 928 dm_error("%s: failed to create vbios\n", __func__); 929 goto fail; 930 } 931 932 dc->bw_vbios = dc_vbios; 933 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 934 if (!dcn_soc) { 935 dm_error("%s: failed to create dcn_soc\n", __func__); 936 goto fail; 937 } 938 939 dc->dcn_soc = dcn_soc; 940 941 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 942 if (!dcn_ip) { 943 dm_error("%s: failed to create dcn_ip\n", __func__); 944 goto fail; 945 } 946 947 dc->dcn_ip = dcn_ip; 948 949 if (!dc_construct_ctx(dc, init_params)) { 950 dm_error("%s: failed to create ctx\n", __func__); 951 goto fail; 952 } 953 954 dc_ctx = dc->ctx; 955 956 /* Resource should construct all asic specific resources. 957 * This should be the only place where we need to parse the asic id 958 */ 959 if (init_params->vbios_override) 960 dc_ctx->dc_bios = init_params->vbios_override; 961 else { 962 /* Create BIOS parser */ 963 struct bp_init_data bp_init_data; 964 965 bp_init_data.ctx = dc_ctx; 966 bp_init_data.bios = init_params->asic_id.atombios_base_address; 967 968 dc_ctx->dc_bios = dal_bios_parser_create( 969 &bp_init_data, dc_ctx->dce_version); 970 971 if (!dc_ctx->dc_bios) { 972 ASSERT_CRITICAL(false); 973 goto fail; 974 } 975 976 dc_ctx->created_bios = true; 977 } 978 979 dc->vendor_signature = init_params->vendor_signature; 980 981 /* Create GPIO service */ 982 dc_ctx->gpio_service = dal_gpio_service_create( 983 dc_ctx->dce_version, 984 dc_ctx->dce_environment, 985 dc_ctx); 986 987 if (!dc_ctx->gpio_service) { 988 ASSERT_CRITICAL(false); 989 goto fail; 990 } 991 992 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 993 if (!dc->res_pool) 994 goto fail; 995 996 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 997 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 998 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 999 1000 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 1001 if (!dc->clk_mgr) 1002 goto fail; 1003 #ifdef CONFIG_DRM_AMD_DC_FP 1004 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1005 1006 if (dc->res_pool->funcs->update_bw_bounding_box) { 1007 DC_FP_START(); 1008 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1009 DC_FP_END(); 1010 } 1011 #endif 1012 1013 /* Creation of current_state must occur after dc->dml 1014 * is initialized in dc_create_resource_pool because 1015 * on creation it copies the contents of dc->dml 1016 */ 1017 1018 dc->current_state = dc_create_state(dc); 1019 1020 if (!dc->current_state) { 1021 dm_error("%s: failed to create validate ctx\n", __func__); 1022 goto fail; 1023 } 1024 1025 if (!create_links(dc, init_params->num_virtual_links)) 1026 goto fail; 1027 1028 /* Create additional DIG link encoder objects if fewer than the platform 1029 * supports were created during link construction. 1030 */ 1031 if (!create_link_encoders(dc)) 1032 goto fail; 1033 1034 dc_resource_state_construct(dc, dc->current_state); 1035 1036 return true; 1037 1038 fail: 1039 return false; 1040 } 1041 1042 static void disable_all_writeback_pipes_for_stream( 1043 const struct dc *dc, 1044 struct dc_stream_state *stream, 1045 struct dc_state *context) 1046 { 1047 int i; 1048 1049 for (i = 0; i < stream->num_wb_info; i++) 1050 stream->writeback_info[i].wb_enabled = false; 1051 } 1052 1053 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, 1054 struct dc_stream_state *stream, bool lock) 1055 { 1056 int i; 1057 1058 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1059 if (dc->hwss.interdependent_update_lock) 1060 dc->hwss.interdependent_update_lock(dc, context, lock); 1061 else { 1062 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1063 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1064 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1065 1066 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1067 if (stream == pipe_ctx->stream) { 1068 if (!pipe_ctx->top_pipe && 1069 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1070 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1071 } 1072 } 1073 } 1074 } 1075 1076 static void phantom_pipe_blank( 1077 struct dc *dc, 1078 struct timing_generator *tg, 1079 int width, 1080 int height) 1081 { 1082 struct dce_hwseq *hws = dc->hwseq; 1083 enum dc_color_space color_space; 1084 struct tg_color black_color = {0}; 1085 struct output_pixel_processor *opp = NULL; 1086 uint32_t num_opps, opp_id_src0, opp_id_src1; 1087 uint32_t otg_active_width, otg_active_height; 1088 uint32_t i; 1089 1090 /* program opp dpg blank color */ 1091 color_space = COLOR_SPACE_SRGB; 1092 color_space_to_black_color(dc, color_space, &black_color); 1093 1094 otg_active_width = width; 1095 otg_active_height = height; 1096 1097 /* get the OPTC source */ 1098 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); 1099 ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); 1100 1101 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 1102 if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) { 1103 opp = dc->res_pool->opps[i]; 1104 break; 1105 } 1106 } 1107 1108 if (opp && opp->funcs->opp_set_disp_pattern_generator) 1109 opp->funcs->opp_set_disp_pattern_generator( 1110 opp, 1111 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, 1112 CONTROLLER_DP_COLOR_SPACE_UDEFINED, 1113 COLOR_DEPTH_UNDEFINED, 1114 &black_color, 1115 otg_active_width, 1116 otg_active_height, 1117 0); 1118 1119 if (tg->funcs->is_tg_enabled(tg)) 1120 hws->funcs.wait_for_blank_complete(opp); 1121 } 1122 1123 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1124 { 1125 int i, j; 1126 struct dc_state *dangling_context = dc_create_state(dc); 1127 struct dc_state *current_ctx; 1128 struct pipe_ctx *pipe; 1129 struct timing_generator *tg; 1130 1131 if (dangling_context == NULL) 1132 return; 1133 1134 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1135 1136 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1137 struct dc_stream_state *old_stream = 1138 dc->current_state->res_ctx.pipe_ctx[i].stream; 1139 bool should_disable = true; 1140 bool pipe_split_change = false; 1141 1142 if ((context->res_ctx.pipe_ctx[i].top_pipe) && 1143 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe)) 1144 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx != 1145 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx; 1146 else 1147 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe != 1148 dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1149 1150 for (j = 0; j < context->stream_count; j++) { 1151 if (old_stream == context->streams[j]) { 1152 should_disable = false; 1153 break; 1154 } 1155 } 1156 if (!should_disable && pipe_split_change && 1157 dc->current_state->stream_count != context->stream_count) 1158 should_disable = true; 1159 1160 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe && 1161 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) { 1162 struct pipe_ctx *old_pipe, *new_pipe; 1163 1164 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1165 new_pipe = &context->res_ctx.pipe_ctx[i]; 1166 1167 if (old_pipe->plane_state && !new_pipe->plane_state) 1168 should_disable = true; 1169 } 1170 1171 if (should_disable && old_stream) { 1172 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1173 tg = pipe->stream_res.tg; 1174 /* When disabling plane for a phantom pipe, we must turn on the 1175 * phantom OTG so the disable programming gets the double buffer 1176 * update. Otherwise the pipe will be left in a partially disabled 1177 * state that can result in underflow or hang when enabling it 1178 * again for different use. 1179 */ 1180 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1181 if (tg->funcs->enable_crtc) { 1182 int main_pipe_width, main_pipe_height; 1183 1184 main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; 1185 main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; 1186 phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height); 1187 tg->funcs->enable_crtc(tg); 1188 } 1189 } 1190 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1191 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1192 1193 if (dc->hwss.apply_ctx_for_surface) { 1194 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1195 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1196 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1197 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1198 } 1199 if (dc->hwss.program_front_end_for_ctx) { 1200 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1201 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1202 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1203 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1204 } 1205 /* We need to put the phantom OTG back into it's default (disabled) state or we 1206 * can get corruption when transition from one SubVP config to a different one. 1207 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1208 * will still get it's double buffer update. 1209 */ 1210 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1211 if (tg->funcs->disable_phantom_crtc) 1212 tg->funcs->disable_phantom_crtc(tg); 1213 } 1214 } 1215 } 1216 1217 current_ctx = dc->current_state; 1218 dc->current_state = dangling_context; 1219 dc_release_state(current_ctx); 1220 } 1221 1222 static void disable_vbios_mode_if_required( 1223 struct dc *dc, 1224 struct dc_state *context) 1225 { 1226 unsigned int i, j; 1227 1228 /* check if timing_changed, disable stream*/ 1229 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1230 struct dc_stream_state *stream = NULL; 1231 struct dc_link *link = NULL; 1232 struct pipe_ctx *pipe = NULL; 1233 1234 pipe = &context->res_ctx.pipe_ctx[i]; 1235 stream = pipe->stream; 1236 if (stream == NULL) 1237 continue; 1238 1239 // only looking for first odm pipe 1240 if (pipe->prev_odm_pipe) 1241 continue; 1242 1243 if (stream->link->local_sink && 1244 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1245 link = stream->link; 1246 } 1247 1248 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1249 unsigned int enc_inst, tg_inst = 0; 1250 unsigned int pix_clk_100hz; 1251 1252 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1253 if (enc_inst != ENGINE_ID_UNKNOWN) { 1254 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1255 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1256 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1257 dc->res_pool->stream_enc[j]); 1258 break; 1259 } 1260 } 1261 1262 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1263 dc->res_pool->dp_clock_source, 1264 tg_inst, &pix_clk_100hz); 1265 1266 if (link->link_status.link_active) { 1267 uint32_t requested_pix_clk_100hz = 1268 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1269 1270 if (pix_clk_100hz != requested_pix_clk_100hz) { 1271 dc->link_srv->set_dpms_off(pipe); 1272 pipe->stream->dpms_off = false; 1273 } 1274 } 1275 } 1276 } 1277 } 1278 } 1279 1280 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1281 { 1282 int i; 1283 PERF_TRACE(); 1284 for (i = 0; i < MAX_PIPES; i++) { 1285 int count = 0; 1286 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1287 1288 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1289 continue; 1290 1291 /* Timeout 100 ms */ 1292 while (count < 100000) { 1293 /* Must set to false to start with, due to OR in update function */ 1294 pipe->plane_state->status.is_flip_pending = false; 1295 dc->hwss.update_pending_status(pipe); 1296 if (!pipe->plane_state->status.is_flip_pending) 1297 break; 1298 udelay(1); 1299 count++; 1300 } 1301 ASSERT(!pipe->plane_state->status.is_flip_pending); 1302 } 1303 PERF_TRACE(); 1304 } 1305 1306 /* Public functions */ 1307 1308 struct dc *dc_create(const struct dc_init_data *init_params) 1309 { 1310 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1311 unsigned int full_pipe_count; 1312 1313 if (!dc) 1314 return NULL; 1315 1316 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1317 if (!dc_construct_ctx(dc, init_params)) 1318 goto destruct_dc; 1319 } else { 1320 if (!dc_construct(dc, init_params)) 1321 goto destruct_dc; 1322 1323 full_pipe_count = dc->res_pool->pipe_count; 1324 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1325 full_pipe_count--; 1326 dc->caps.max_streams = min( 1327 full_pipe_count, 1328 dc->res_pool->stream_enc_count); 1329 1330 dc->caps.max_links = dc->link_count; 1331 dc->caps.max_audios = dc->res_pool->audio_count; 1332 dc->caps.linear_pitch_alignment = 64; 1333 1334 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1335 1336 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1337 1338 if (dc->res_pool->dmcu != NULL) 1339 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1340 } 1341 1342 dc->dcn_reg_offsets = init_params->dcn_reg_offsets; 1343 dc->nbio_reg_offsets = init_params->nbio_reg_offsets; 1344 1345 /* Populate versioning information */ 1346 dc->versions.dc_ver = DC_VER; 1347 1348 dc->build_id = DC_BUILD_ID; 1349 1350 DC_LOG_DC("Display Core initialized\n"); 1351 1352 1353 1354 return dc; 1355 1356 destruct_dc: 1357 dc_destruct(dc); 1358 kfree(dc); 1359 return NULL; 1360 } 1361 1362 static void detect_edp_presence(struct dc *dc) 1363 { 1364 struct dc_link *edp_links[MAX_NUM_EDP]; 1365 struct dc_link *edp_link = NULL; 1366 enum dc_connection_type type; 1367 int i; 1368 int edp_num; 1369 1370 dc_get_edp_links(dc, edp_links, &edp_num); 1371 if (!edp_num) 1372 return; 1373 1374 for (i = 0; i < edp_num; i++) { 1375 edp_link = edp_links[i]; 1376 if (dc->config.edp_not_connected) { 1377 edp_link->edp_sink_present = false; 1378 } else { 1379 dc_link_detect_connection_type(edp_link, &type); 1380 edp_link->edp_sink_present = (type != dc_connection_none); 1381 } 1382 } 1383 } 1384 1385 void dc_hardware_init(struct dc *dc) 1386 { 1387 1388 detect_edp_presence(dc); 1389 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1390 dc->hwss.init_hw(dc); 1391 } 1392 1393 void dc_init_callbacks(struct dc *dc, 1394 const struct dc_callback_init *init_params) 1395 { 1396 dc->ctx->cp_psp = init_params->cp_psp; 1397 } 1398 1399 void dc_deinit_callbacks(struct dc *dc) 1400 { 1401 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1402 } 1403 1404 void dc_destroy(struct dc **dc) 1405 { 1406 dc_destruct(*dc); 1407 kfree(*dc); 1408 *dc = NULL; 1409 } 1410 1411 static void enable_timing_multisync( 1412 struct dc *dc, 1413 struct dc_state *ctx) 1414 { 1415 int i, multisync_count = 0; 1416 int pipe_count = dc->res_pool->pipe_count; 1417 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1418 1419 for (i = 0; i < pipe_count; i++) { 1420 if (!ctx->res_ctx.pipe_ctx[i].stream || 1421 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1422 continue; 1423 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1424 continue; 1425 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1426 multisync_count++; 1427 } 1428 1429 if (multisync_count > 0) { 1430 dc->hwss.enable_per_frame_crtc_position_reset( 1431 dc, multisync_count, multisync_pipes); 1432 } 1433 } 1434 1435 static void program_timing_sync( 1436 struct dc *dc, 1437 struct dc_state *ctx) 1438 { 1439 int i, j, k; 1440 int group_index = 0; 1441 int num_group = 0; 1442 int pipe_count = dc->res_pool->pipe_count; 1443 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1444 1445 for (i = 0; i < pipe_count; i++) { 1446 if (!ctx->res_ctx.pipe_ctx[i].stream 1447 || ctx->res_ctx.pipe_ctx[i].top_pipe 1448 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe) 1449 continue; 1450 1451 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1452 } 1453 1454 for (i = 0; i < pipe_count; i++) { 1455 int group_size = 1; 1456 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1457 struct pipe_ctx *pipe_set[MAX_PIPES]; 1458 1459 if (!unsynced_pipes[i]) 1460 continue; 1461 1462 pipe_set[0] = unsynced_pipes[i]; 1463 unsynced_pipes[i] = NULL; 1464 1465 /* Add tg to the set, search rest of the tg's for ones with 1466 * same timing, add all tgs with same timing to the group 1467 */ 1468 for (j = i + 1; j < pipe_count; j++) { 1469 if (!unsynced_pipes[j]) 1470 continue; 1471 if (sync_type != TIMING_SYNCHRONIZABLE && 1472 dc->hwss.enable_vblanks_synchronization && 1473 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1474 resource_are_vblanks_synchronizable( 1475 unsynced_pipes[j]->stream, 1476 pipe_set[0]->stream)) { 1477 sync_type = VBLANK_SYNCHRONIZABLE; 1478 pipe_set[group_size] = unsynced_pipes[j]; 1479 unsynced_pipes[j] = NULL; 1480 group_size++; 1481 } else 1482 if (sync_type != VBLANK_SYNCHRONIZABLE && 1483 resource_are_streams_timing_synchronizable( 1484 unsynced_pipes[j]->stream, 1485 pipe_set[0]->stream)) { 1486 sync_type = TIMING_SYNCHRONIZABLE; 1487 pipe_set[group_size] = unsynced_pipes[j]; 1488 unsynced_pipes[j] = NULL; 1489 group_size++; 1490 } 1491 } 1492 1493 /* set first unblanked pipe as master */ 1494 for (j = 0; j < group_size; j++) { 1495 bool is_blanked; 1496 1497 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1498 is_blanked = 1499 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1500 else 1501 is_blanked = 1502 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1503 if (!is_blanked) { 1504 if (j == 0) 1505 break; 1506 1507 swap(pipe_set[0], pipe_set[j]); 1508 break; 1509 } 1510 } 1511 1512 for (k = 0; k < group_size; k++) { 1513 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1514 1515 status->timing_sync_info.group_id = num_group; 1516 status->timing_sync_info.group_size = group_size; 1517 if (k == 0) 1518 status->timing_sync_info.master = true; 1519 else 1520 status->timing_sync_info.master = false; 1521 1522 } 1523 1524 /* remove any other pipes that are already been synced */ 1525 if (dc->config.use_pipe_ctx_sync_logic) { 1526 /* check pipe's syncd to decide which pipe to be removed */ 1527 for (j = 1; j < group_size; j++) { 1528 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1529 group_size--; 1530 pipe_set[j] = pipe_set[group_size]; 1531 j--; 1532 } else 1533 /* link slave pipe's syncd with master pipe */ 1534 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1535 } 1536 } else { 1537 for (j = j + 1; j < group_size; j++) { 1538 bool is_blanked; 1539 1540 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1541 is_blanked = 1542 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1543 else 1544 is_blanked = 1545 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1546 if (!is_blanked) { 1547 group_size--; 1548 pipe_set[j] = pipe_set[group_size]; 1549 j--; 1550 } 1551 } 1552 } 1553 1554 if (group_size > 1) { 1555 if (sync_type == TIMING_SYNCHRONIZABLE) { 1556 dc->hwss.enable_timing_synchronization( 1557 dc, group_index, group_size, pipe_set); 1558 } else 1559 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1560 dc->hwss.enable_vblanks_synchronization( 1561 dc, group_index, group_size, pipe_set); 1562 } 1563 group_index++; 1564 } 1565 num_group++; 1566 } 1567 } 1568 1569 static bool streams_changed(struct dc *dc, 1570 struct dc_stream_state *streams[], 1571 uint8_t stream_count) 1572 { 1573 uint8_t i; 1574 1575 if (stream_count != dc->current_state->stream_count) 1576 return true; 1577 1578 for (i = 0; i < dc->current_state->stream_count; i++) { 1579 if (dc->current_state->streams[i] != streams[i]) 1580 return true; 1581 if (!streams[i]->link->link_state_valid) 1582 return true; 1583 } 1584 1585 return false; 1586 } 1587 1588 bool dc_validate_boot_timing(const struct dc *dc, 1589 const struct dc_sink *sink, 1590 struct dc_crtc_timing *crtc_timing) 1591 { 1592 struct timing_generator *tg; 1593 struct stream_encoder *se = NULL; 1594 1595 struct dc_crtc_timing hw_crtc_timing = {0}; 1596 1597 struct dc_link *link = sink->link; 1598 unsigned int i, enc_inst, tg_inst = 0; 1599 1600 /* Support seamless boot on EDP displays only */ 1601 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1602 return false; 1603 } 1604 1605 /* Check for enabled DIG to identify enabled display */ 1606 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1607 return false; 1608 1609 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1610 1611 if (enc_inst == ENGINE_ID_UNKNOWN) 1612 return false; 1613 1614 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1615 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1616 1617 se = dc->res_pool->stream_enc[i]; 1618 1619 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1620 dc->res_pool->stream_enc[i]); 1621 break; 1622 } 1623 } 1624 1625 // tg_inst not found 1626 if (i == dc->res_pool->stream_enc_count) 1627 return false; 1628 1629 if (tg_inst >= dc->res_pool->timing_generator_count) 1630 return false; 1631 1632 if (tg_inst != link->link_enc->preferred_engine) 1633 return false; 1634 1635 tg = dc->res_pool->timing_generators[tg_inst]; 1636 1637 if (!tg->funcs->get_hw_timing) 1638 return false; 1639 1640 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1641 return false; 1642 1643 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1644 return false; 1645 1646 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1647 return false; 1648 1649 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1650 return false; 1651 1652 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1653 return false; 1654 1655 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1656 return false; 1657 1658 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1659 return false; 1660 1661 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1662 return false; 1663 1664 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1665 return false; 1666 1667 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1668 return false; 1669 1670 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1671 return false; 1672 1673 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1674 return false; 1675 1676 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1677 return false; 1678 1679 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1680 if (crtc_timing->flags.DSC) 1681 return false; 1682 1683 if (dc_is_dp_signal(link->connector_signal)) { 1684 unsigned int pix_clk_100hz; 1685 uint32_t numOdmPipes = 1; 1686 uint32_t id_src[4] = {0}; 1687 1688 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1689 dc->res_pool->dp_clock_source, 1690 tg_inst, &pix_clk_100hz); 1691 1692 if (tg->funcs->get_optc_source) 1693 tg->funcs->get_optc_source(tg, 1694 &numOdmPipes, &id_src[0], &id_src[1]); 1695 1696 if (numOdmPipes == 2) 1697 pix_clk_100hz *= 2; 1698 if (numOdmPipes == 4) 1699 pix_clk_100hz *= 4; 1700 1701 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1702 // slightly due to rounding issues in 10 kHz units. 1703 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1704 return false; 1705 1706 if (!se->funcs->dp_get_pixel_format) 1707 return false; 1708 1709 if (!se->funcs->dp_get_pixel_format( 1710 se, 1711 &hw_crtc_timing.pixel_encoding, 1712 &hw_crtc_timing.display_color_depth)) 1713 return false; 1714 1715 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1716 return false; 1717 1718 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1719 return false; 1720 } 1721 1722 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1723 return false; 1724 } 1725 1726 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) { 1727 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1728 return false; 1729 } 1730 1731 return true; 1732 } 1733 1734 static inline bool should_update_pipe_for_stream( 1735 struct dc_state *context, 1736 struct pipe_ctx *pipe_ctx, 1737 struct dc_stream_state *stream) 1738 { 1739 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1740 } 1741 1742 static inline bool should_update_pipe_for_plane( 1743 struct dc_state *context, 1744 struct pipe_ctx *pipe_ctx, 1745 struct dc_plane_state *plane_state) 1746 { 1747 return (pipe_ctx->plane_state == plane_state); 1748 } 1749 1750 void dc_enable_stereo( 1751 struct dc *dc, 1752 struct dc_state *context, 1753 struct dc_stream_state *streams[], 1754 uint8_t stream_count) 1755 { 1756 int i, j; 1757 struct pipe_ctx *pipe; 1758 1759 for (i = 0; i < MAX_PIPES; i++) { 1760 if (context != NULL) { 1761 pipe = &context->res_ctx.pipe_ctx[i]; 1762 } else { 1763 context = dc->current_state; 1764 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1765 } 1766 1767 for (j = 0; pipe && j < stream_count; j++) { 1768 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1769 dc->hwss.setup_stereo) 1770 dc->hwss.setup_stereo(pipe, dc); 1771 } 1772 } 1773 } 1774 1775 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1776 { 1777 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1778 enable_timing_multisync(dc, context); 1779 program_timing_sync(dc, context); 1780 } 1781 } 1782 1783 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1784 { 1785 int i; 1786 unsigned int stream_mask = 0; 1787 1788 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1789 if (context->res_ctx.pipe_ctx[i].stream) 1790 stream_mask |= 1 << i; 1791 } 1792 1793 return stream_mask; 1794 } 1795 1796 void dc_z10_restore(const struct dc *dc) 1797 { 1798 if (dc->hwss.z10_restore) 1799 dc->hwss.z10_restore(dc); 1800 } 1801 1802 void dc_z10_save_init(struct dc *dc) 1803 { 1804 if (dc->hwss.z10_save_init) 1805 dc->hwss.z10_save_init(dc); 1806 } 1807 1808 /** 1809 * dc_commit_state_no_check - Apply context to the hardware 1810 * 1811 * @dc: DC object with the current status to be updated 1812 * @context: New state that will become the current status at the end of this function 1813 * 1814 * Applies given context to the hardware and copy it into current context. 1815 * It's up to the user to release the src context afterwards. 1816 * 1817 * Return: an enum dc_status result code for the operation 1818 */ 1819 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1820 { 1821 struct dc_bios *dcb = dc->ctx->dc_bios; 1822 enum dc_status result = DC_ERROR_UNEXPECTED; 1823 struct pipe_ctx *pipe; 1824 int i, k, l; 1825 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1826 struct dc_state *old_state; 1827 bool subvp_prev_use = false; 1828 1829 dc_z10_restore(dc); 1830 dc_allow_idle_optimizations(dc, false); 1831 1832 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1833 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1834 1835 /* Check old context for SubVP */ 1836 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1837 if (subvp_prev_use) 1838 break; 1839 } 1840 1841 for (i = 0; i < context->stream_count; i++) 1842 dc_streams[i] = context->streams[i]; 1843 1844 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1845 disable_vbios_mode_if_required(dc, context); 1846 dc->hwss.enable_accelerated_mode(dc, context); 1847 } 1848 1849 if (context->stream_count > get_seamless_boot_stream_count(context) || 1850 context->stream_count == 0) 1851 dc->hwss.prepare_bandwidth(dc, context); 1852 1853 /* When SubVP is active, all HW programming must be done while 1854 * SubVP lock is acquired 1855 */ 1856 if (dc->hwss.subvp_pipe_control_lock) 1857 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1858 1859 if (dc->debug.enable_double_buffered_dsc_pg_support) 1860 dc->hwss.update_dsc_pg(dc, context, false); 1861 1862 disable_dangling_plane(dc, context); 1863 /* re-program planes for existing stream, in case we need to 1864 * free up plane resource for later use 1865 */ 1866 if (dc->hwss.apply_ctx_for_surface) { 1867 for (i = 0; i < context->stream_count; i++) { 1868 if (context->streams[i]->mode_changed) 1869 continue; 1870 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1871 dc->hwss.apply_ctx_for_surface( 1872 dc, context->streams[i], 1873 context->stream_status[i].plane_count, 1874 context); /* use new pipe config in new context */ 1875 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1876 dc->hwss.post_unlock_program_front_end(dc, context); 1877 } 1878 } 1879 1880 /* Program hardware */ 1881 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1882 pipe = &context->res_ctx.pipe_ctx[i]; 1883 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1884 } 1885 1886 result = dc->hwss.apply_ctx_to_hw(dc, context); 1887 1888 if (result != DC_OK) { 1889 /* Application of dc_state to hardware stopped. */ 1890 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; 1891 return result; 1892 } 1893 1894 dc_trigger_sync(dc, context); 1895 1896 /* Program all planes within new context*/ 1897 if (dc->hwss.program_front_end_for_ctx) { 1898 dc->hwss.interdependent_update_lock(dc, context, true); 1899 dc->hwss.program_front_end_for_ctx(dc, context); 1900 dc->hwss.interdependent_update_lock(dc, context, false); 1901 dc->hwss.post_unlock_program_front_end(dc, context); 1902 } 1903 1904 if (dc->hwss.commit_subvp_config) 1905 dc->hwss.commit_subvp_config(dc, context); 1906 if (dc->hwss.subvp_pipe_control_lock) 1907 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 1908 1909 for (i = 0; i < context->stream_count; i++) { 1910 const struct dc_link *link = context->streams[i]->link; 1911 1912 if (!context->streams[i]->mode_changed) 1913 continue; 1914 1915 if (dc->hwss.apply_ctx_for_surface) { 1916 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1917 dc->hwss.apply_ctx_for_surface( 1918 dc, context->streams[i], 1919 context->stream_status[i].plane_count, 1920 context); 1921 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1922 dc->hwss.post_unlock_program_front_end(dc, context); 1923 } 1924 1925 /* 1926 * enable stereo 1927 * TODO rework dc_enable_stereo call to work with validation sets? 1928 */ 1929 for (k = 0; k < MAX_PIPES; k++) { 1930 pipe = &context->res_ctx.pipe_ctx[k]; 1931 1932 for (l = 0 ; pipe && l < context->stream_count; l++) { 1933 if (context->streams[l] && 1934 context->streams[l] == pipe->stream && 1935 dc->hwss.setup_stereo) 1936 dc->hwss.setup_stereo(pipe, dc); 1937 } 1938 } 1939 1940 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1941 context->streams[i]->timing.h_addressable, 1942 context->streams[i]->timing.v_addressable, 1943 context->streams[i]->timing.h_total, 1944 context->streams[i]->timing.v_total, 1945 context->streams[i]->timing.pix_clk_100hz / 10); 1946 } 1947 1948 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1949 1950 if (context->stream_count > get_seamless_boot_stream_count(context) || 1951 context->stream_count == 0) { 1952 /* Must wait for no flips to be pending before doing optimize bw */ 1953 wait_for_no_pipes_pending(dc, context); 1954 /* pplib is notified if disp_num changed */ 1955 dc->hwss.optimize_bandwidth(dc, context); 1956 } 1957 1958 if (dc->debug.enable_double_buffered_dsc_pg_support) 1959 dc->hwss.update_dsc_pg(dc, context, true); 1960 1961 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1962 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1963 else 1964 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1965 1966 context->stream_mask = get_stream_mask(dc, context); 1967 1968 if (context->stream_mask != dc->current_state->stream_mask) 1969 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1970 1971 for (i = 0; i < context->stream_count; i++) 1972 context->streams[i]->mode_changed = false; 1973 1974 old_state = dc->current_state; 1975 dc->current_state = context; 1976 1977 dc_release_state(old_state); 1978 1979 dc_retain_state(dc->current_state); 1980 1981 return result; 1982 } 1983 1984 /** 1985 * dc_commit_streams - Commit current stream state 1986 * 1987 * @dc: DC object with the commit state to be configured in the hardware 1988 * @streams: Array with a list of stream state 1989 * @stream_count: Total of streams 1990 * 1991 * Function responsible for commit streams change to the hardware. 1992 * 1993 * Return: 1994 * Return DC_OK if everything work as expected, otherwise, return a dc_status 1995 * code. 1996 */ 1997 enum dc_status dc_commit_streams(struct dc *dc, 1998 struct dc_stream_state *streams[], 1999 uint8_t stream_count) 2000 { 2001 int i, j; 2002 struct dc_state *context; 2003 enum dc_status res = DC_OK; 2004 struct dc_validation_set set[MAX_STREAMS] = {0}; 2005 2006 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) 2007 return res; 2008 2009 if (!streams_changed(dc, streams, stream_count)) 2010 return res; 2011 2012 DC_LOG_DC("%s: %d streams\n", __func__, stream_count); 2013 2014 for (i = 0; i < stream_count; i++) { 2015 struct dc_stream_state *stream = streams[i]; 2016 struct dc_stream_status *status = dc_stream_get_status(stream); 2017 2018 dc_stream_log(dc, stream); 2019 2020 set[i].stream = stream; 2021 2022 if (status) { 2023 set[i].plane_count = status->plane_count; 2024 for (j = 0; j < status->plane_count; j++) 2025 set[i].plane_states[j] = status->plane_states[j]; 2026 } 2027 } 2028 2029 context = dc_create_state(dc); 2030 if (!context) 2031 goto context_alloc_fail; 2032 2033 dc_resource_state_copy_construct_current(dc, context); 2034 2035 res = dc_validate_with_context(dc, set, stream_count, context, false); 2036 if (res != DC_OK) { 2037 BREAK_TO_DEBUGGER(); 2038 goto fail; 2039 } 2040 2041 res = dc_commit_state_no_check(dc, context); 2042 2043 for (i = 0; i < stream_count; i++) { 2044 for (j = 0; j < context->stream_count; j++) { 2045 if (streams[i]->stream_id == context->streams[j]->stream_id) 2046 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2047 2048 if (dc_is_embedded_signal(streams[i]->signal)) { 2049 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); 2050 2051 if (dc->hwss.is_abm_supported) 2052 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); 2053 else 2054 status->is_abm_supported = true; 2055 } 2056 } 2057 } 2058 2059 fail: 2060 dc_release_state(context); 2061 2062 context_alloc_fail: 2063 2064 DC_LOG_DC("%s Finished.\n", __func__); 2065 2066 return res; 2067 } 2068 2069 bool dc_acquire_release_mpc_3dlut( 2070 struct dc *dc, bool acquire, 2071 struct dc_stream_state *stream, 2072 struct dc_3dlut **lut, 2073 struct dc_transfer_func **shaper) 2074 { 2075 int pipe_idx; 2076 bool ret = false; 2077 bool found_pipe_idx = false; 2078 const struct resource_pool *pool = dc->res_pool; 2079 struct resource_context *res_ctx = &dc->current_state->res_ctx; 2080 int mpcc_id = 0; 2081 2082 if (pool && res_ctx) { 2083 if (acquire) { 2084 /*find pipe idx for the given stream*/ 2085 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 2086 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 2087 found_pipe_idx = true; 2088 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 2089 break; 2090 } 2091 } 2092 } else 2093 found_pipe_idx = true;/*for release pipe_idx is not required*/ 2094 2095 if (found_pipe_idx) { 2096 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 2097 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 2098 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 2099 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 2100 } 2101 } 2102 return ret; 2103 } 2104 2105 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 2106 { 2107 int i; 2108 struct pipe_ctx *pipe; 2109 2110 for (i = 0; i < MAX_PIPES; i++) { 2111 pipe = &context->res_ctx.pipe_ctx[i]; 2112 2113 // Don't check flip pending on phantom pipes 2114 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2115 continue; 2116 2117 /* Must set to false to start with, due to OR in update function */ 2118 pipe->plane_state->status.is_flip_pending = false; 2119 dc->hwss.update_pending_status(pipe); 2120 if (pipe->plane_state->status.is_flip_pending) 2121 return true; 2122 } 2123 return false; 2124 } 2125 2126 /* Perform updates here which need to be deferred until next vupdate 2127 * 2128 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 2129 * but forcing lut memory to shutdown state is immediate. This causes 2130 * single frame corruption as lut gets disabled mid-frame unless shutdown 2131 * is deferred until after entering bypass. 2132 */ 2133 static void process_deferred_updates(struct dc *dc) 2134 { 2135 int i = 0; 2136 2137 if (dc->debug.enable_mem_low_power.bits.cm) { 2138 ASSERT(dc->dcn_ip->max_num_dpp); 2139 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 2140 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 2141 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 2142 } 2143 } 2144 2145 void dc_post_update_surfaces_to_stream(struct dc *dc) 2146 { 2147 int i; 2148 struct dc_state *context = dc->current_state; 2149 2150 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 2151 return; 2152 2153 post_surface_trace(dc); 2154 2155 /* 2156 * Only relevant for DCN behavior where we can guarantee the optimization 2157 * is safe to apply - retain the legacy behavior for DCE. 2158 */ 2159 2160 if (dc->ctx->dce_version < DCE_VERSION_MAX) 2161 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2162 else { 2163 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2164 2165 if (is_flip_pending_in_pipes(dc, context)) 2166 return; 2167 2168 for (i = 0; i < dc->res_pool->pipe_count; i++) 2169 if (context->res_ctx.pipe_ctx[i].stream == NULL || 2170 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 2171 context->res_ctx.pipe_ctx[i].pipe_idx = i; 2172 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 2173 } 2174 2175 process_deferred_updates(dc); 2176 2177 dc->hwss.optimize_bandwidth(dc, context); 2178 2179 if (dc->debug.enable_double_buffered_dsc_pg_support) 2180 dc->hwss.update_dsc_pg(dc, context, true); 2181 } 2182 2183 dc->optimized_required = false; 2184 dc->wm_optimized_required = false; 2185 } 2186 2187 static void init_state(struct dc *dc, struct dc_state *context) 2188 { 2189 /* Each context must have their own instance of VBA and in order to 2190 * initialize and obtain IP and SOC the base DML instance from DC is 2191 * initially copied into every context 2192 */ 2193 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2194 } 2195 2196 struct dc_state *dc_create_state(struct dc *dc) 2197 { 2198 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2199 GFP_KERNEL); 2200 2201 if (!context) 2202 return NULL; 2203 2204 init_state(dc, context); 2205 2206 kref_init(&context->refcount); 2207 2208 return context; 2209 } 2210 2211 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2212 { 2213 int i, j; 2214 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2215 2216 if (!new_ctx) 2217 return NULL; 2218 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2219 2220 for (i = 0; i < MAX_PIPES; i++) { 2221 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2222 2223 if (cur_pipe->top_pipe) 2224 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2225 2226 if (cur_pipe->bottom_pipe) 2227 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2228 2229 if (cur_pipe->prev_odm_pipe) 2230 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2231 2232 if (cur_pipe->next_odm_pipe) 2233 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2234 2235 } 2236 2237 for (i = 0; i < new_ctx->stream_count; i++) { 2238 dc_stream_retain(new_ctx->streams[i]); 2239 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2240 dc_plane_state_retain( 2241 new_ctx->stream_status[i].plane_states[j]); 2242 } 2243 2244 kref_init(&new_ctx->refcount); 2245 2246 return new_ctx; 2247 } 2248 2249 void dc_retain_state(struct dc_state *context) 2250 { 2251 kref_get(&context->refcount); 2252 } 2253 2254 static void dc_state_free(struct kref *kref) 2255 { 2256 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2257 dc_resource_state_destruct(context); 2258 kvfree(context); 2259 } 2260 2261 void dc_release_state(struct dc_state *context) 2262 { 2263 kref_put(&context->refcount, dc_state_free); 2264 } 2265 2266 bool dc_set_generic_gpio_for_stereo(bool enable, 2267 struct gpio_service *gpio_service) 2268 { 2269 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2270 struct gpio_pin_info pin_info; 2271 struct gpio *generic; 2272 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2273 GFP_KERNEL); 2274 2275 if (!config) 2276 return false; 2277 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2278 2279 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2280 kfree(config); 2281 return false; 2282 } else { 2283 generic = dal_gpio_service_create_generic_mux( 2284 gpio_service, 2285 pin_info.offset, 2286 pin_info.mask); 2287 } 2288 2289 if (!generic) { 2290 kfree(config); 2291 return false; 2292 } 2293 2294 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2295 2296 config->enable_output_from_mux = enable; 2297 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2298 2299 if (gpio_result == GPIO_RESULT_OK) 2300 gpio_result = dal_mux_setup_config(generic, config); 2301 2302 if (gpio_result == GPIO_RESULT_OK) { 2303 dal_gpio_close(generic); 2304 dal_gpio_destroy_generic_mux(&generic); 2305 kfree(config); 2306 return true; 2307 } else { 2308 dal_gpio_close(generic); 2309 dal_gpio_destroy_generic_mux(&generic); 2310 kfree(config); 2311 return false; 2312 } 2313 } 2314 2315 static bool is_surface_in_context( 2316 const struct dc_state *context, 2317 const struct dc_plane_state *plane_state) 2318 { 2319 int j; 2320 2321 for (j = 0; j < MAX_PIPES; j++) { 2322 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2323 2324 if (plane_state == pipe_ctx->plane_state) { 2325 return true; 2326 } 2327 } 2328 2329 return false; 2330 } 2331 2332 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2333 { 2334 union surface_update_flags *update_flags = &u->surface->update_flags; 2335 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2336 2337 if (!u->plane_info) 2338 return UPDATE_TYPE_FAST; 2339 2340 if (u->plane_info->color_space != u->surface->color_space) { 2341 update_flags->bits.color_space_change = 1; 2342 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2343 } 2344 2345 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2346 update_flags->bits.horizontal_mirror_change = 1; 2347 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2348 } 2349 2350 if (u->plane_info->rotation != u->surface->rotation) { 2351 update_flags->bits.rotation_change = 1; 2352 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2353 } 2354 2355 if (u->plane_info->format != u->surface->format) { 2356 update_flags->bits.pixel_format_change = 1; 2357 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2358 } 2359 2360 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2361 update_flags->bits.stereo_format_change = 1; 2362 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2363 } 2364 2365 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2366 update_flags->bits.per_pixel_alpha_change = 1; 2367 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2368 } 2369 2370 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2371 update_flags->bits.global_alpha_change = 1; 2372 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2373 } 2374 2375 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2376 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2377 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2378 /* During DCC on/off, stutter period is calculated before 2379 * DCC has fully transitioned. This results in incorrect 2380 * stutter period calculation. Triggering a full update will 2381 * recalculate stutter period. 2382 */ 2383 update_flags->bits.dcc_change = 1; 2384 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2385 } 2386 2387 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2388 resource_pixel_format_to_bpp(u->surface->format)) { 2389 /* different bytes per element will require full bandwidth 2390 * and DML calculation 2391 */ 2392 update_flags->bits.bpp_change = 1; 2393 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2394 } 2395 2396 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2397 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2398 update_flags->bits.plane_size_change = 1; 2399 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2400 } 2401 2402 2403 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2404 sizeof(union dc_tiling_info)) != 0) { 2405 update_flags->bits.swizzle_change = 1; 2406 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2407 2408 /* todo: below are HW dependent, we should add a hook to 2409 * DCE/N resource and validated there. 2410 */ 2411 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2412 /* swizzled mode requires RQ to be setup properly, 2413 * thus need to run DML to calculate RQ settings 2414 */ 2415 update_flags->bits.bandwidth_change = 1; 2416 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2417 } 2418 } 2419 2420 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2421 return update_type; 2422 } 2423 2424 static enum surface_update_type get_scaling_info_update_type( 2425 const struct dc_surface_update *u) 2426 { 2427 union surface_update_flags *update_flags = &u->surface->update_flags; 2428 2429 if (!u->scaling_info) 2430 return UPDATE_TYPE_FAST; 2431 2432 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2433 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2434 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2435 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2436 || u->scaling_info->scaling_quality.integer_scaling != 2437 u->surface->scaling_quality.integer_scaling 2438 ) { 2439 update_flags->bits.scaling_change = 1; 2440 2441 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2442 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2443 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2444 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2445 /* Making dst rect smaller requires a bandwidth change */ 2446 update_flags->bits.bandwidth_change = 1; 2447 } 2448 2449 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2450 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2451 2452 update_flags->bits.scaling_change = 1; 2453 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2454 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2455 /* Making src rect bigger requires a bandwidth change */ 2456 update_flags->bits.clock_change = 1; 2457 } 2458 2459 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2460 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2461 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2462 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2463 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2464 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2465 update_flags->bits.position_change = 1; 2466 2467 if (update_flags->bits.clock_change 2468 || update_flags->bits.bandwidth_change 2469 || update_flags->bits.scaling_change) 2470 return UPDATE_TYPE_FULL; 2471 2472 if (update_flags->bits.position_change) 2473 return UPDATE_TYPE_MED; 2474 2475 return UPDATE_TYPE_FAST; 2476 } 2477 2478 static enum surface_update_type det_surface_update(const struct dc *dc, 2479 const struct dc_surface_update *u) 2480 { 2481 const struct dc_state *context = dc->current_state; 2482 enum surface_update_type type; 2483 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2484 union surface_update_flags *update_flags = &u->surface->update_flags; 2485 2486 if (u->flip_addr) 2487 update_flags->bits.addr_update = 1; 2488 2489 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2490 update_flags->raw = 0xFFFFFFFF; 2491 return UPDATE_TYPE_FULL; 2492 } 2493 2494 update_flags->raw = 0; // Reset all flags 2495 2496 type = get_plane_info_update_type(u); 2497 elevate_update_type(&overall_type, type); 2498 2499 type = get_scaling_info_update_type(u); 2500 elevate_update_type(&overall_type, type); 2501 2502 if (u->flip_addr) { 2503 update_flags->bits.addr_update = 1; 2504 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2505 update_flags->bits.tmz_changed = 1; 2506 elevate_update_type(&overall_type, UPDATE_TYPE_FULL); 2507 } 2508 } 2509 if (u->in_transfer_func) 2510 update_flags->bits.in_transfer_func_change = 1; 2511 2512 if (u->input_csc_color_matrix) 2513 update_flags->bits.input_csc_change = 1; 2514 2515 if (u->coeff_reduction_factor) 2516 update_flags->bits.coeff_reduction_change = 1; 2517 2518 if (u->gamut_remap_matrix) 2519 update_flags->bits.gamut_remap_change = 1; 2520 2521 if (u->gamma) { 2522 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2523 2524 if (u->plane_info) 2525 format = u->plane_info->format; 2526 else if (u->surface) 2527 format = u->surface->format; 2528 2529 if (dce_use_lut(format)) 2530 update_flags->bits.gamma_change = 1; 2531 } 2532 2533 if (u->lut3d_func || u->func_shaper) 2534 update_flags->bits.lut_3d = 1; 2535 2536 if (u->hdr_mult.value) 2537 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2538 update_flags->bits.hdr_mult = 1; 2539 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2540 } 2541 2542 if (update_flags->bits.in_transfer_func_change) { 2543 type = UPDATE_TYPE_MED; 2544 elevate_update_type(&overall_type, type); 2545 } 2546 2547 if (update_flags->bits.input_csc_change 2548 || update_flags->bits.coeff_reduction_change 2549 || update_flags->bits.lut_3d 2550 || update_flags->bits.gamma_change 2551 || update_flags->bits.gamut_remap_change) { 2552 type = UPDATE_TYPE_FULL; 2553 elevate_update_type(&overall_type, type); 2554 } 2555 2556 return overall_type; 2557 } 2558 2559 static enum surface_update_type check_update_surfaces_for_stream( 2560 struct dc *dc, 2561 struct dc_surface_update *updates, 2562 int surface_count, 2563 struct dc_stream_update *stream_update, 2564 const struct dc_stream_status *stream_status) 2565 { 2566 int i; 2567 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2568 2569 if (dc->idle_optimizations_allowed) 2570 overall_type = UPDATE_TYPE_FULL; 2571 2572 if (stream_status == NULL || stream_status->plane_count != surface_count) 2573 overall_type = UPDATE_TYPE_FULL; 2574 2575 if (stream_update && stream_update->pending_test_pattern) { 2576 overall_type = UPDATE_TYPE_FULL; 2577 } 2578 2579 /* some stream updates require passive update */ 2580 if (stream_update) { 2581 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2582 2583 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2584 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2585 stream_update->integer_scaling_update) 2586 su_flags->bits.scaling = 1; 2587 2588 if (stream_update->out_transfer_func) 2589 su_flags->bits.out_tf = 1; 2590 2591 if (stream_update->abm_level) 2592 su_flags->bits.abm_level = 1; 2593 2594 if (stream_update->dpms_off) 2595 su_flags->bits.dpms_off = 1; 2596 2597 if (stream_update->gamut_remap) 2598 su_flags->bits.gamut_remap = 1; 2599 2600 if (stream_update->wb_update) 2601 su_flags->bits.wb_update = 1; 2602 2603 if (stream_update->dsc_config) 2604 su_flags->bits.dsc_changed = 1; 2605 2606 if (stream_update->mst_bw_update) 2607 su_flags->bits.mst_bw = 1; 2608 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) 2609 su_flags->bits.crtc_timing_adjust = 1; 2610 2611 if (su_flags->raw != 0) 2612 overall_type = UPDATE_TYPE_FULL; 2613 2614 if (stream_update->output_csc_transform || stream_update->output_color_space) 2615 su_flags->bits.out_csc = 1; 2616 } 2617 2618 for (i = 0 ; i < surface_count; i++) { 2619 enum surface_update_type type = 2620 det_surface_update(dc, &updates[i]); 2621 2622 elevate_update_type(&overall_type, type); 2623 } 2624 2625 return overall_type; 2626 } 2627 2628 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect) 2629 { 2630 int view_height, view_width, clip_x, clip_y, clip_width, clip_height; 2631 2632 view_height = src.height; 2633 view_width = src.width; 2634 2635 clip_x = clip_rect.x; 2636 clip_y = clip_rect.y; 2637 2638 clip_width = clip_rect.width; 2639 clip_height = clip_rect.height; 2640 2641 /* check for centered video accounting for off by 1 scaling truncation */ 2642 if ((view_height - clip_y - clip_height <= clip_y + 1) && 2643 (view_width - clip_x - clip_width <= clip_x + 1) && 2644 (view_height - clip_y - clip_height >= clip_y - 1) && 2645 (view_width - clip_x - clip_width >= clip_x - 1)) { 2646 2647 /* when OS scales up/down to letter box, it may end up 2648 * with few blank pixels on the border due to truncating. 2649 * Add offset margin to account for this 2650 */ 2651 if (clip_x <= 4 || clip_y <= 4) 2652 return true; 2653 } 2654 2655 return false; 2656 } 2657 2658 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc, 2659 struct dc_surface_update *srf_updates, int surface_count, 2660 enum surface_update_type update_type) 2661 { 2662 enum surface_update_type new_update_type = update_type; 2663 int i, j; 2664 struct pipe_ctx *pipe = NULL; 2665 struct dc_stream_state *stream; 2666 2667 /* Check that we are in windowed MPO with ODM 2668 * - look for MPO pipe by scanning pipes for first pipe matching 2669 * surface that has moved ( position change ) 2670 * - MPO pipe will have top pipe 2671 * - check that top pipe has ODM pointer 2672 */ 2673 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) { 2674 for (i = 0; i < surface_count; i++) { 2675 if (srf_updates[i].surface && srf_updates[i].scaling_info 2676 && srf_updates[i].surface->update_flags.bits.position_change) { 2677 2678 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2679 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) { 2680 pipe = &dc->current_state->res_ctx.pipe_ctx[j]; 2681 stream = pipe->stream; 2682 break; 2683 } 2684 } 2685 2686 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream 2687 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) { 2688 struct rect old_clip_rect, new_clip_rect; 2689 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle; 2690 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle; 2691 2692 old_clip_rect = srf_updates[i].surface->clip_rect; 2693 new_clip_rect = srf_updates[i].scaling_info->clip_rect; 2694 2695 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2696 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2697 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right; 2698 2699 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2))); 2700 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2))); 2701 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right; 2702 2703 if (old_clip_rect_left && new_clip_rect_middle) 2704 new_update_type = UPDATE_TYPE_FULL; 2705 else if (old_clip_rect_middle && new_clip_rect_right) 2706 new_update_type = UPDATE_TYPE_FULL; 2707 else if (old_clip_rect_right && new_clip_rect_middle) 2708 new_update_type = UPDATE_TYPE_FULL; 2709 else if (old_clip_rect_middle && new_clip_rect_left) 2710 new_update_type = UPDATE_TYPE_FULL; 2711 } 2712 } 2713 } 2714 } 2715 return new_update_type; 2716 } 2717 2718 /* 2719 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2720 * 2721 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2722 */ 2723 enum surface_update_type dc_check_update_surfaces_for_stream( 2724 struct dc *dc, 2725 struct dc_surface_update *updates, 2726 int surface_count, 2727 struct dc_stream_update *stream_update, 2728 const struct dc_stream_status *stream_status) 2729 { 2730 int i; 2731 enum surface_update_type type; 2732 2733 if (stream_update) 2734 stream_update->stream->update_flags.raw = 0; 2735 for (i = 0; i < surface_count; i++) 2736 updates[i].surface->update_flags.raw = 0; 2737 2738 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2739 if (type == UPDATE_TYPE_FULL) { 2740 if (stream_update) { 2741 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2742 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2743 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2744 } 2745 for (i = 0; i < surface_count; i++) 2746 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2747 } 2748 2749 if (type == UPDATE_TYPE_MED) 2750 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc, 2751 updates, surface_count, type); 2752 2753 if (type == UPDATE_TYPE_FAST) { 2754 // If there's an available clock comparator, we use that. 2755 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2756 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2757 dc->optimized_required = true; 2758 // Else we fallback to mem compare. 2759 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2760 dc->optimized_required = true; 2761 } 2762 2763 dc->optimized_required |= dc->wm_optimized_required; 2764 } 2765 2766 return type; 2767 } 2768 2769 static struct dc_stream_status *stream_get_status( 2770 struct dc_state *ctx, 2771 struct dc_stream_state *stream) 2772 { 2773 uint8_t i; 2774 2775 for (i = 0; i < ctx->stream_count; i++) { 2776 if (stream == ctx->streams[i]) { 2777 return &ctx->stream_status[i]; 2778 } 2779 } 2780 2781 return NULL; 2782 } 2783 2784 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2785 2786 static void copy_surface_update_to_plane( 2787 struct dc_plane_state *surface, 2788 struct dc_surface_update *srf_update) 2789 { 2790 if (srf_update->flip_addr) { 2791 surface->address = srf_update->flip_addr->address; 2792 surface->flip_immediate = 2793 srf_update->flip_addr->flip_immediate; 2794 surface->time.time_elapsed_in_us[surface->time.index] = 2795 srf_update->flip_addr->flip_timestamp_in_us - 2796 surface->time.prev_update_time_in_us; 2797 surface->time.prev_update_time_in_us = 2798 srf_update->flip_addr->flip_timestamp_in_us; 2799 surface->time.index++; 2800 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2801 surface->time.index = 0; 2802 2803 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2804 } 2805 2806 if (srf_update->scaling_info) { 2807 surface->scaling_quality = 2808 srf_update->scaling_info->scaling_quality; 2809 surface->dst_rect = 2810 srf_update->scaling_info->dst_rect; 2811 surface->src_rect = 2812 srf_update->scaling_info->src_rect; 2813 surface->clip_rect = 2814 srf_update->scaling_info->clip_rect; 2815 } 2816 2817 if (srf_update->plane_info) { 2818 surface->color_space = 2819 srf_update->plane_info->color_space; 2820 surface->format = 2821 srf_update->plane_info->format; 2822 surface->plane_size = 2823 srf_update->plane_info->plane_size; 2824 surface->rotation = 2825 srf_update->plane_info->rotation; 2826 surface->horizontal_mirror = 2827 srf_update->plane_info->horizontal_mirror; 2828 surface->stereo_format = 2829 srf_update->plane_info->stereo_format; 2830 surface->tiling_info = 2831 srf_update->plane_info->tiling_info; 2832 surface->visible = 2833 srf_update->plane_info->visible; 2834 surface->per_pixel_alpha = 2835 srf_update->plane_info->per_pixel_alpha; 2836 surface->global_alpha = 2837 srf_update->plane_info->global_alpha; 2838 surface->global_alpha_value = 2839 srf_update->plane_info->global_alpha_value; 2840 surface->dcc = 2841 srf_update->plane_info->dcc; 2842 surface->layer_index = 2843 srf_update->plane_info->layer_index; 2844 } 2845 2846 if (srf_update->gamma && 2847 (surface->gamma_correction != 2848 srf_update->gamma)) { 2849 memcpy(&surface->gamma_correction->entries, 2850 &srf_update->gamma->entries, 2851 sizeof(struct dc_gamma_entries)); 2852 surface->gamma_correction->is_identity = 2853 srf_update->gamma->is_identity; 2854 surface->gamma_correction->num_entries = 2855 srf_update->gamma->num_entries; 2856 surface->gamma_correction->type = 2857 srf_update->gamma->type; 2858 } 2859 2860 if (srf_update->in_transfer_func && 2861 (surface->in_transfer_func != 2862 srf_update->in_transfer_func)) { 2863 surface->in_transfer_func->sdr_ref_white_level = 2864 srf_update->in_transfer_func->sdr_ref_white_level; 2865 surface->in_transfer_func->tf = 2866 srf_update->in_transfer_func->tf; 2867 surface->in_transfer_func->type = 2868 srf_update->in_transfer_func->type; 2869 memcpy(&surface->in_transfer_func->tf_pts, 2870 &srf_update->in_transfer_func->tf_pts, 2871 sizeof(struct dc_transfer_func_distributed_points)); 2872 } 2873 2874 if (srf_update->func_shaper && 2875 (surface->in_shaper_func != 2876 srf_update->func_shaper)) 2877 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2878 sizeof(*surface->in_shaper_func)); 2879 2880 if (srf_update->lut3d_func && 2881 (surface->lut3d_func != 2882 srf_update->lut3d_func)) 2883 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2884 sizeof(*surface->lut3d_func)); 2885 2886 if (srf_update->hdr_mult.value) 2887 surface->hdr_mult = 2888 srf_update->hdr_mult; 2889 2890 if (srf_update->blend_tf && 2891 (surface->blend_tf != 2892 srf_update->blend_tf)) 2893 memcpy(surface->blend_tf, srf_update->blend_tf, 2894 sizeof(*surface->blend_tf)); 2895 2896 if (srf_update->input_csc_color_matrix) 2897 surface->input_csc_color_matrix = 2898 *srf_update->input_csc_color_matrix; 2899 2900 if (srf_update->coeff_reduction_factor) 2901 surface->coeff_reduction_factor = 2902 *srf_update->coeff_reduction_factor; 2903 2904 if (srf_update->gamut_remap_matrix) 2905 surface->gamut_remap_matrix = 2906 *srf_update->gamut_remap_matrix; 2907 } 2908 2909 static void copy_stream_update_to_stream(struct dc *dc, 2910 struct dc_state *context, 2911 struct dc_stream_state *stream, 2912 struct dc_stream_update *update) 2913 { 2914 struct dc_context *dc_ctx = dc->ctx; 2915 2916 if (update == NULL || stream == NULL) 2917 return; 2918 2919 if (update->src.height && update->src.width) 2920 stream->src = update->src; 2921 2922 if (update->dst.height && update->dst.width) 2923 stream->dst = update->dst; 2924 2925 if (update->out_transfer_func && 2926 stream->out_transfer_func != update->out_transfer_func) { 2927 stream->out_transfer_func->sdr_ref_white_level = 2928 update->out_transfer_func->sdr_ref_white_level; 2929 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2930 stream->out_transfer_func->type = 2931 update->out_transfer_func->type; 2932 memcpy(&stream->out_transfer_func->tf_pts, 2933 &update->out_transfer_func->tf_pts, 2934 sizeof(struct dc_transfer_func_distributed_points)); 2935 } 2936 2937 if (update->hdr_static_metadata) 2938 stream->hdr_static_metadata = *update->hdr_static_metadata; 2939 2940 if (update->abm_level) 2941 stream->abm_level = *update->abm_level; 2942 2943 if (update->periodic_interrupt) 2944 stream->periodic_interrupt = *update->periodic_interrupt; 2945 2946 if (update->gamut_remap) 2947 stream->gamut_remap_matrix = *update->gamut_remap; 2948 2949 /* Note: this being updated after mode set is currently not a use case 2950 * however if it arises OCSC would need to be reprogrammed at the 2951 * minimum 2952 */ 2953 if (update->output_color_space) 2954 stream->output_color_space = *update->output_color_space; 2955 2956 if (update->output_csc_transform) 2957 stream->csc_color_matrix = *update->output_csc_transform; 2958 2959 if (update->vrr_infopacket) 2960 stream->vrr_infopacket = *update->vrr_infopacket; 2961 2962 if (update->allow_freesync) 2963 stream->allow_freesync = *update->allow_freesync; 2964 2965 if (update->vrr_active_variable) 2966 stream->vrr_active_variable = *update->vrr_active_variable; 2967 2968 if (update->crtc_timing_adjust) 2969 stream->adjust = *update->crtc_timing_adjust; 2970 2971 if (update->dpms_off) 2972 stream->dpms_off = *update->dpms_off; 2973 2974 if (update->hfvsif_infopacket) 2975 stream->hfvsif_infopacket = *update->hfvsif_infopacket; 2976 2977 if (update->vtem_infopacket) 2978 stream->vtem_infopacket = *update->vtem_infopacket; 2979 2980 if (update->vsc_infopacket) 2981 stream->vsc_infopacket = *update->vsc_infopacket; 2982 2983 if (update->vsp_infopacket) 2984 stream->vsp_infopacket = *update->vsp_infopacket; 2985 2986 if (update->adaptive_sync_infopacket) 2987 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket; 2988 2989 if (update->dither_option) 2990 stream->dither_option = *update->dither_option; 2991 2992 if (update->pending_test_pattern) 2993 stream->test_pattern = *update->pending_test_pattern; 2994 /* update current stream with writeback info */ 2995 if (update->wb_update) { 2996 int i; 2997 2998 stream->num_wb_info = update->wb_update->num_wb_info; 2999 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 3000 for (i = 0; i < stream->num_wb_info; i++) 3001 stream->writeback_info[i] = 3002 update->wb_update->writeback_info[i]; 3003 } 3004 if (update->dsc_config) { 3005 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 3006 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 3007 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 3008 update->dsc_config->num_slices_v != 0); 3009 3010 /* Use temporarry context for validating new DSC config */ 3011 struct dc_state *dsc_validate_context = dc_create_state(dc); 3012 3013 if (dsc_validate_context) { 3014 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 3015 3016 stream->timing.dsc_cfg = *update->dsc_config; 3017 stream->timing.flags.DSC = enable_dsc; 3018 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 3019 stream->timing.dsc_cfg = old_dsc_cfg; 3020 stream->timing.flags.DSC = old_dsc_enabled; 3021 update->dsc_config = NULL; 3022 } 3023 3024 dc_release_state(dsc_validate_context); 3025 } else { 3026 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 3027 update->dsc_config = NULL; 3028 } 3029 } 3030 } 3031 3032 static bool update_planes_and_stream_state(struct dc *dc, 3033 struct dc_surface_update *srf_updates, int surface_count, 3034 struct dc_stream_state *stream, 3035 struct dc_stream_update *stream_update, 3036 enum surface_update_type *new_update_type, 3037 struct dc_state **new_context) 3038 { 3039 struct dc_state *context; 3040 int i, j; 3041 enum surface_update_type update_type; 3042 const struct dc_stream_status *stream_status; 3043 struct dc_context *dc_ctx = dc->ctx; 3044 3045 stream_status = dc_stream_get_status(stream); 3046 3047 if (!stream_status) { 3048 if (surface_count) /* Only an error condition if surf_count non-zero*/ 3049 ASSERT(false); 3050 3051 return false; /* Cannot commit surface to stream that is not committed */ 3052 } 3053 3054 context = dc->current_state; 3055 3056 update_type = dc_check_update_surfaces_for_stream( 3057 dc, srf_updates, surface_count, stream_update, stream_status); 3058 3059 /* update current stream with the new updates */ 3060 copy_stream_update_to_stream(dc, context, stream, stream_update); 3061 3062 /* do not perform surface update if surface has invalid dimensions 3063 * (all zero) and no scaling_info is provided 3064 */ 3065 if (surface_count > 0) { 3066 for (i = 0; i < surface_count; i++) { 3067 if ((srf_updates[i].surface->src_rect.width == 0 || 3068 srf_updates[i].surface->src_rect.height == 0 || 3069 srf_updates[i].surface->dst_rect.width == 0 || 3070 srf_updates[i].surface->dst_rect.height == 0) && 3071 (!srf_updates[i].scaling_info || 3072 srf_updates[i].scaling_info->src_rect.width == 0 || 3073 srf_updates[i].scaling_info->src_rect.height == 0 || 3074 srf_updates[i].scaling_info->dst_rect.width == 0 || 3075 srf_updates[i].scaling_info->dst_rect.height == 0)) { 3076 DC_ERROR("Invalid src/dst rects in surface update!\n"); 3077 return false; 3078 } 3079 } 3080 } 3081 3082 if (update_type >= update_surface_trace_level) 3083 update_surface_trace(dc, srf_updates, surface_count); 3084 3085 if (update_type >= UPDATE_TYPE_FULL) { 3086 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3087 3088 for (i = 0; i < surface_count; i++) 3089 new_planes[i] = srf_updates[i].surface; 3090 3091 /* initialize scratch memory for building context */ 3092 context = dc_create_state(dc); 3093 if (context == NULL) { 3094 DC_ERROR("Failed to allocate new validate context!\n"); 3095 return false; 3096 } 3097 3098 dc_resource_state_copy_construct( 3099 dc->current_state, context); 3100 3101 /* For each full update, remove all existing phantom pipes first. 3102 * Ensures that we have enough pipes for newly added MPO planes 3103 */ 3104 if (dc->res_pool->funcs->remove_phantom_pipes) 3105 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 3106 3107 /*remove old surfaces from context */ 3108 if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3109 3110 BREAK_TO_DEBUGGER(); 3111 goto fail; 3112 } 3113 3114 /* add surface to context */ 3115 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3116 3117 BREAK_TO_DEBUGGER(); 3118 goto fail; 3119 } 3120 } 3121 3122 /* save update parameters into surface */ 3123 for (i = 0; i < surface_count; i++) { 3124 struct dc_plane_state *surface = srf_updates[i].surface; 3125 3126 copy_surface_update_to_plane(surface, &srf_updates[i]); 3127 3128 if (update_type >= UPDATE_TYPE_MED) { 3129 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3130 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3131 3132 if (pipe_ctx->plane_state != surface) 3133 continue; 3134 3135 resource_build_scaling_params(pipe_ctx); 3136 } 3137 } 3138 } 3139 3140 if (update_type == UPDATE_TYPE_FULL) { 3141 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3142 /* For phantom pipes we remove and create a new set of phantom pipes 3143 * for each full update (because we don't know if we'll need phantom 3144 * pipes until after the first round of validation). However, if validation 3145 * fails we need to keep the existing phantom pipes (because we don't update 3146 * the dc->current_state). 3147 * 3148 * The phantom stream/plane refcount is decremented for validation because 3149 * we assume it'll be removed (the free comes when the dc_state is freed), 3150 * but if validation fails we have to increment back the refcount so it's 3151 * consistent. 3152 */ 3153 if (dc->res_pool->funcs->retain_phantom_pipes) 3154 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state); 3155 BREAK_TO_DEBUGGER(); 3156 goto fail; 3157 } 3158 } 3159 3160 *new_context = context; 3161 *new_update_type = update_type; 3162 3163 return true; 3164 3165 fail: 3166 dc_release_state(context); 3167 3168 return false; 3169 3170 } 3171 3172 static void commit_planes_do_stream_update(struct dc *dc, 3173 struct dc_stream_state *stream, 3174 struct dc_stream_update *stream_update, 3175 enum surface_update_type update_type, 3176 struct dc_state *context) 3177 { 3178 int j; 3179 3180 // Stream updates 3181 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3182 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3183 3184 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 3185 3186 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt) 3187 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx); 3188 3189 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 3190 stream_update->vrr_infopacket || 3191 stream_update->vsc_infopacket || 3192 stream_update->vsp_infopacket || 3193 stream_update->hfvsif_infopacket || 3194 stream_update->adaptive_sync_infopacket || 3195 stream_update->vtem_infopacket) { 3196 resource_build_info_frame(pipe_ctx); 3197 dc->hwss.update_info_frame(pipe_ctx); 3198 3199 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 3200 dc->link_srv->dp_trace_source_sequence( 3201 pipe_ctx->stream->link, 3202 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 3203 } 3204 3205 if (stream_update->hdr_static_metadata && 3206 stream->use_dynamic_meta && 3207 dc->hwss.set_dmdata_attributes && 3208 pipe_ctx->stream->dmdata_address.quad_part != 0) 3209 dc->hwss.set_dmdata_attributes(pipe_ctx); 3210 3211 if (stream_update->gamut_remap) 3212 dc_stream_set_gamut_remap(dc, stream); 3213 3214 if (stream_update->output_csc_transform) 3215 dc_stream_program_csc_matrix(dc, stream); 3216 3217 if (stream_update->dither_option) { 3218 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 3219 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 3220 &pipe_ctx->stream->bit_depth_params); 3221 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 3222 &stream->bit_depth_params, 3223 &stream->clamping); 3224 while (odm_pipe) { 3225 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 3226 &stream->bit_depth_params, 3227 &stream->clamping); 3228 odm_pipe = odm_pipe->next_odm_pipe; 3229 } 3230 } 3231 3232 3233 /* Full fe update*/ 3234 if (update_type == UPDATE_TYPE_FAST) 3235 continue; 3236 3237 if (stream_update->dsc_config) 3238 dc->link_srv->update_dsc_config(pipe_ctx); 3239 3240 if (stream_update->mst_bw_update) { 3241 if (stream_update->mst_bw_update->is_increase) 3242 dc->link_srv->increase_mst_payload(pipe_ctx, 3243 stream_update->mst_bw_update->mst_stream_bw); 3244 else 3245 dc->link_srv->reduce_mst_payload(pipe_ctx, 3246 stream_update->mst_bw_update->mst_stream_bw); 3247 } 3248 3249 if (stream_update->pending_test_pattern) { 3250 dc_link_dp_set_test_pattern(stream->link, 3251 stream->test_pattern.type, 3252 stream->test_pattern.color_space, 3253 stream->test_pattern.p_link_settings, 3254 stream->test_pattern.p_custom_pattern, 3255 stream->test_pattern.cust_pattern_size); 3256 } 3257 3258 if (stream_update->dpms_off) { 3259 if (*stream_update->dpms_off) { 3260 dc->link_srv->set_dpms_off(pipe_ctx); 3261 /* for dpms, keep acquired resources*/ 3262 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 3263 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 3264 3265 dc->optimized_required = true; 3266 3267 } else { 3268 if (get_seamless_boot_stream_count(context) == 0) 3269 dc->hwss.prepare_bandwidth(dc, dc->current_state); 3270 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx); 3271 } 3272 } 3273 3274 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 3275 bool should_program_abm = true; 3276 3277 // if otg funcs defined check if blanked before programming 3278 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 3279 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 3280 should_program_abm = false; 3281 3282 if (should_program_abm) { 3283 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 3284 dc->hwss.set_abm_immediate_disable(pipe_ctx); 3285 } else { 3286 pipe_ctx->stream_res.abm->funcs->set_abm_level( 3287 pipe_ctx->stream_res.abm, stream->abm_level); 3288 } 3289 } 3290 } 3291 } 3292 } 3293 } 3294 3295 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3296 { 3297 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3298 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3299 && stream->ctx->dce_version >= DCN_VERSION_3_1) 3300 return true; 3301 3302 return false; 3303 } 3304 3305 void dc_dmub_update_dirty_rect(struct dc *dc, 3306 int surface_count, 3307 struct dc_stream_state *stream, 3308 struct dc_surface_update *srf_updates, 3309 struct dc_state *context) 3310 { 3311 union dmub_rb_cmd cmd; 3312 struct dc_context *dc_ctx = dc->ctx; 3313 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3314 unsigned int i, j; 3315 unsigned int panel_inst = 0; 3316 3317 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream)) 3318 return; 3319 3320 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst)) 3321 return; 3322 3323 memset(&cmd, 0x0, sizeof(cmd)); 3324 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT; 3325 cmd.update_dirty_rect.header.sub_type = 0; 3326 cmd.update_dirty_rect.header.payload_bytes = 3327 sizeof(cmd.update_dirty_rect) - 3328 sizeof(cmd.update_dirty_rect.header); 3329 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data; 3330 for (i = 0; i < surface_count; i++) { 3331 struct dc_plane_state *plane_state = srf_updates[i].surface; 3332 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr; 3333 3334 if (!srf_updates[i].surface || !flip_addr) 3335 continue; 3336 /* Do not send in immediate flip mode */ 3337 if (srf_updates[i].surface->flip_immediate) 3338 continue; 3339 3340 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3341 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3342 sizeof(flip_addr->dirty_rects)); 3343 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3344 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3345 3346 if (pipe_ctx->stream != stream) 3347 continue; 3348 if (pipe_ctx->plane_state != plane_state) 3349 continue; 3350 3351 update_dirty_rect->panel_inst = panel_inst; 3352 update_dirty_rect->pipe_idx = j; 3353 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); 3354 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); 3355 } 3356 } 3357 } 3358 3359 static void commit_planes_for_stream(struct dc *dc, 3360 struct dc_surface_update *srf_updates, 3361 int surface_count, 3362 struct dc_stream_state *stream, 3363 struct dc_stream_update *stream_update, 3364 enum surface_update_type update_type, 3365 struct dc_state *context) 3366 { 3367 int i, j; 3368 struct pipe_ctx *top_pipe_to_program = NULL; 3369 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3370 bool subvp_prev_use = false; 3371 bool subvp_curr_use = false; 3372 3373 // Once we apply the new subvp context to hardware it won't be in the 3374 // dc->current_state anymore, so we have to cache it before we apply 3375 // the new SubVP context 3376 subvp_prev_use = false; 3377 3378 3379 dc_z10_restore(dc); 3380 3381 if (update_type == UPDATE_TYPE_FULL) { 3382 /* wait for all double-buffer activity to clear on all pipes */ 3383 int pipe_idx; 3384 3385 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) { 3386 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; 3387 3388 if (!pipe_ctx->stream) 3389 continue; 3390 3391 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear) 3392 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg); 3393 } 3394 } 3395 3396 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 3397 /* Optimize seamless boot flag keeps clocks and watermarks high until 3398 * first flip. After first flip, optimization is required to lower 3399 * bandwidth. Important to note that it is expected UEFI will 3400 * only light up a single display on POST, therefore we only expect 3401 * one stream with seamless boot flag set. 3402 */ 3403 if (stream->apply_seamless_boot_optimization) { 3404 stream->apply_seamless_boot_optimization = false; 3405 3406 if (get_seamless_boot_stream_count(context) == 0) 3407 dc->optimized_required = true; 3408 } 3409 } 3410 3411 if (update_type == UPDATE_TYPE_FULL) { 3412 dc_allow_idle_optimizations(dc, false); 3413 3414 if (get_seamless_boot_stream_count(context) == 0) 3415 dc->hwss.prepare_bandwidth(dc, context); 3416 3417 if (dc->debug.enable_double_buffered_dsc_pg_support) 3418 dc->hwss.update_dsc_pg(dc, context, false); 3419 3420 context_clock_trace(dc, context); 3421 } 3422 3423 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3424 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3425 3426 if (!pipe_ctx->top_pipe && 3427 !pipe_ctx->prev_odm_pipe && 3428 pipe_ctx->stream && 3429 pipe_ctx->stream == stream) { 3430 top_pipe_to_program = pipe_ctx; 3431 } 3432 } 3433 3434 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3435 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3436 3437 // Check old context for SubVP 3438 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3439 if (subvp_prev_use) 3440 break; 3441 } 3442 3443 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3444 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3445 3446 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3447 subvp_curr_use = true; 3448 break; 3449 } 3450 } 3451 3452 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 3453 struct pipe_ctx *mpcc_pipe; 3454 struct pipe_ctx *odm_pipe; 3455 3456 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 3457 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 3458 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 3459 } 3460 3461 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3462 if (top_pipe_to_program && 3463 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3464 if (should_use_dmub_lock(stream->link)) { 3465 union dmub_hw_lock_flags hw_locks = { 0 }; 3466 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3467 3468 hw_locks.bits.lock_dig = 1; 3469 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3470 3471 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3472 true, 3473 &hw_locks, 3474 &inst_flags); 3475 } else 3476 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 3477 top_pipe_to_program->stream_res.tg); 3478 } 3479 3480 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3481 if (dc->hwss.subvp_pipe_control_lock) 3482 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3483 dc->hwss.interdependent_update_lock(dc, context, true); 3484 3485 } else { 3486 if (dc->hwss.subvp_pipe_control_lock) 3487 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3488 /* Lock the top pipe while updating plane addrs, since freesync requires 3489 * plane addr update event triggers to be synchronized. 3490 * top_pipe_to_program is expected to never be NULL 3491 */ 3492 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 3493 } 3494 3495 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context); 3496 3497 // Stream updates 3498 if (stream_update) 3499 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 3500 3501 if (surface_count == 0) { 3502 /* 3503 * In case of turning off screen, no need to program front end a second time. 3504 * just return after program blank. 3505 */ 3506 if (dc->hwss.apply_ctx_for_surface) 3507 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 3508 if (dc->hwss.program_front_end_for_ctx) 3509 dc->hwss.program_front_end_for_ctx(dc, context); 3510 3511 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3512 dc->hwss.interdependent_update_lock(dc, context, false); 3513 } else { 3514 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3515 } 3516 dc->hwss.post_unlock_program_front_end(dc, context); 3517 3518 if (update_type != UPDATE_TYPE_FAST) 3519 if (dc->hwss.commit_subvp_config) 3520 dc->hwss.commit_subvp_config(dc, context); 3521 3522 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3523 * move the SubVP lock to after the phantom pipes have been setup 3524 */ 3525 if (dc->hwss.subvp_pipe_control_lock) 3526 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 3527 NULL, subvp_prev_use); 3528 return; 3529 } 3530 3531 if (update_type != UPDATE_TYPE_FAST) { 3532 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3533 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3534 3535 if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP && 3536 pipe_ctx->stream && pipe_ctx->plane_state) { 3537 /* Only update visual confirm for SUBVP here. 3538 * The bar appears on all pipes, so we need to update the bar on all displays, 3539 * so the information doesn't get stale. 3540 */ 3541 struct mpcc_blnd_cfg blnd_cfg = { 0 }; 3542 3543 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, 3544 pipe_ctx->plane_res.hubp->inst); 3545 } 3546 } 3547 } 3548 3549 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 3550 for (i = 0; i < surface_count; i++) { 3551 struct dc_plane_state *plane_state = srf_updates[i].surface; 3552 /*set logical flag for lock/unlock use*/ 3553 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3554 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3555 if (!pipe_ctx->plane_state) 3556 continue; 3557 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3558 continue; 3559 pipe_ctx->plane_state->triplebuffer_flips = false; 3560 if (update_type == UPDATE_TYPE_FAST && 3561 dc->hwss.program_triplebuffer != NULL && 3562 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3563 /*triple buffer for VUpdate only*/ 3564 pipe_ctx->plane_state->triplebuffer_flips = true; 3565 } 3566 } 3567 if (update_type == UPDATE_TYPE_FULL) { 3568 /* force vsync flip when reconfiguring pipes to prevent underflow */ 3569 plane_state->flip_immediate = false; 3570 } 3571 } 3572 } 3573 3574 // Update Type FULL, Surface updates 3575 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3576 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3577 3578 if (!pipe_ctx->top_pipe && 3579 !pipe_ctx->prev_odm_pipe && 3580 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 3581 struct dc_stream_status *stream_status = NULL; 3582 3583 if (!pipe_ctx->plane_state) 3584 continue; 3585 3586 /* Full fe update*/ 3587 if (update_type == UPDATE_TYPE_FAST) 3588 continue; 3589 3590 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 3591 3592 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3593 /*turn off triple buffer for full update*/ 3594 dc->hwss.program_triplebuffer( 3595 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3596 } 3597 stream_status = 3598 stream_get_status(context, pipe_ctx->stream); 3599 3600 if (dc->hwss.apply_ctx_for_surface) 3601 dc->hwss.apply_ctx_for_surface( 3602 dc, pipe_ctx->stream, stream_status->plane_count, context); 3603 } 3604 } 3605 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 3606 dc->hwss.program_front_end_for_ctx(dc, context); 3607 if (dc->debug.validate_dml_output) { 3608 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3609 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 3610 if (cur_pipe->stream == NULL) 3611 continue; 3612 3613 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3614 cur_pipe->plane_res.hubp, dc->ctx, 3615 &context->res_ctx.pipe_ctx[i].rq_regs, 3616 &context->res_ctx.pipe_ctx[i].dlg_regs, 3617 &context->res_ctx.pipe_ctx[i].ttu_regs); 3618 } 3619 } 3620 } 3621 3622 // Update Type FAST, Surface updates 3623 if (update_type == UPDATE_TYPE_FAST) { 3624 if (dc->hwss.set_flip_control_gsl) 3625 for (i = 0; i < surface_count; i++) { 3626 struct dc_plane_state *plane_state = srf_updates[i].surface; 3627 3628 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3629 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3630 3631 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3632 continue; 3633 3634 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3635 continue; 3636 3637 // GSL has to be used for flip immediate 3638 dc->hwss.set_flip_control_gsl(pipe_ctx, 3639 pipe_ctx->plane_state->flip_immediate); 3640 } 3641 } 3642 3643 /* Perform requested Updates */ 3644 for (i = 0; i < surface_count; i++) { 3645 struct dc_plane_state *plane_state = srf_updates[i].surface; 3646 3647 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3648 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3649 3650 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3651 continue; 3652 3653 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3654 continue; 3655 3656 /*program triple buffer after lock based on flip type*/ 3657 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3658 /*only enable triplebuffer for fast_update*/ 3659 dc->hwss.program_triplebuffer( 3660 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3661 } 3662 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3663 dc->hwss.update_plane_addr(dc, pipe_ctx); 3664 } 3665 } 3666 } 3667 3668 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3669 dc->hwss.interdependent_update_lock(dc, context, false); 3670 } else { 3671 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3672 } 3673 3674 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3675 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3676 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3677 top_pipe_to_program->stream_res.tg, 3678 CRTC_STATE_VACTIVE); 3679 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3680 top_pipe_to_program->stream_res.tg, 3681 CRTC_STATE_VBLANK); 3682 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3683 top_pipe_to_program->stream_res.tg, 3684 CRTC_STATE_VACTIVE); 3685 3686 if (should_use_dmub_lock(stream->link)) { 3687 union dmub_hw_lock_flags hw_locks = { 0 }; 3688 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3689 3690 hw_locks.bits.lock_dig = 1; 3691 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3692 3693 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3694 false, 3695 &hw_locks, 3696 &inst_flags); 3697 } else 3698 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3699 top_pipe_to_program->stream_res.tg); 3700 } 3701 3702 if (subvp_curr_use) { 3703 /* If enabling subvp or transitioning from subvp->subvp, enable the 3704 * phantom streams before we program front end for the phantom pipes. 3705 */ 3706 if (update_type != UPDATE_TYPE_FAST) { 3707 if (dc->hwss.enable_phantom_streams) 3708 dc->hwss.enable_phantom_streams(dc, context); 3709 } 3710 } 3711 3712 if (update_type != UPDATE_TYPE_FAST) 3713 dc->hwss.post_unlock_program_front_end(dc, context); 3714 3715 if (subvp_prev_use && !subvp_curr_use) { 3716 /* If disabling subvp, disable phantom streams after front end 3717 * programming has completed (we turn on phantom OTG in order 3718 * to complete the plane disable for phantom pipes). 3719 */ 3720 dc->hwss.apply_ctx_to_hw(dc, context); 3721 } 3722 3723 if (update_type != UPDATE_TYPE_FAST) 3724 if (dc->hwss.commit_subvp_config) 3725 dc->hwss.commit_subvp_config(dc, context); 3726 /* Since phantom pipe programming is moved to post_unlock_program_front_end, 3727 * move the SubVP lock to after the phantom pipes have been setup 3728 */ 3729 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3730 if (dc->hwss.subvp_pipe_control_lock) 3731 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 3732 } else { 3733 if (dc->hwss.subvp_pipe_control_lock) 3734 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3735 } 3736 3737 // Fire manual trigger only when bottom plane is flipped 3738 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3739 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3740 3741 if (!pipe_ctx->plane_state) 3742 continue; 3743 3744 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3745 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3746 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3747 pipe_ctx->plane_state->skip_manual_trigger) 3748 continue; 3749 3750 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3751 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3752 } 3753 } 3754 3755 /** 3756 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change 3757 * 3758 * @dc: Used to get the current state status 3759 * @stream: Target stream, which we want to remove the attached planes 3760 * @surface_count: Number of surface update 3761 * @is_plane_addition: [in] Fill out with true if it is a plane addition case 3762 * 3763 * DCN32x and newer support a feature named Dynamic ODM which can conflict with 3764 * the MPO if used simultaneously in some specific configurations (e.g., 3765 * 4k@144). This function checks if the incoming context requires applying a 3766 * transition state with unnecessary pipe splitting and ODM disabled to 3767 * circumvent our hardware limitations to prevent this edge case. If the OPP 3768 * associated with an MPCC might change due to plane additions, this function 3769 * returns true. 3770 * 3771 * Return: 3772 * Return true if OPP and MPCC might change, otherwise, return false. 3773 */ 3774 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc, 3775 struct dc_stream_state *stream, 3776 int surface_count, 3777 bool *is_plane_addition) 3778 { 3779 3780 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 3781 bool force_minimal_pipe_splitting = false; 3782 bool subvp_active = false; 3783 uint32_t i; 3784 3785 *is_plane_addition = false; 3786 3787 if (cur_stream_status && 3788 dc->current_state->stream_count > 0 && 3789 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) { 3790 /* determine if minimal transition is required due to MPC*/ 3791 if (surface_count > 0) { 3792 if (cur_stream_status->plane_count > surface_count) { 3793 force_minimal_pipe_splitting = true; 3794 } else if (cur_stream_status->plane_count < surface_count) { 3795 force_minimal_pipe_splitting = true; 3796 *is_plane_addition = true; 3797 } 3798 } 3799 } 3800 3801 if (cur_stream_status && 3802 dc->current_state->stream_count == 1 && 3803 dc->debug.enable_single_display_2to1_odm_policy) { 3804 /* determine if minimal transition is required due to dynamic ODM*/ 3805 if (surface_count > 0) { 3806 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) { 3807 force_minimal_pipe_splitting = true; 3808 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) { 3809 force_minimal_pipe_splitting = true; 3810 *is_plane_addition = true; 3811 } 3812 } 3813 } 3814 3815 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3816 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3817 3818 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 3819 subvp_active = true; 3820 break; 3821 } 3822 } 3823 3824 /* For SubVP when adding or removing planes we need to add a minimal transition 3825 * (even when disabling all planes). Whenever disabling a phantom pipe, we 3826 * must use the minimal transition path to disable the pipe correctly. 3827 * 3828 * We want to use the minimal transition whenever subvp is active, not only if 3829 * a plane is being added / removed from a subvp stream (MPO plane can be added 3830 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through 3831 * a min transition to disable subvp. 3832 */ 3833 if (cur_stream_status && subvp_active) { 3834 /* determine if minimal transition is required due to SubVP*/ 3835 if (cur_stream_status->plane_count > surface_count) { 3836 force_minimal_pipe_splitting = true; 3837 } else if (cur_stream_status->plane_count < surface_count) { 3838 force_minimal_pipe_splitting = true; 3839 *is_plane_addition = true; 3840 } 3841 } 3842 3843 return force_minimal_pipe_splitting; 3844 } 3845 3846 /** 3847 * commit_minimal_transition_state - Create a transition pipe split state 3848 * 3849 * @dc: Used to get the current state status 3850 * @transition_base_context: New transition state 3851 * 3852 * In some specific configurations, such as pipe split on multi-display with 3853 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe 3854 * programming when moving to new planes. To mitigate those types of problems, 3855 * this function adds a transition state that minimizes pipe usage before 3856 * programming the new configuration. When adding a new plane, the current 3857 * state requires the least pipes, so it is applied without splitting. When 3858 * removing a plane, the new state requires the least pipes, so it is applied 3859 * without splitting. 3860 * 3861 * Return: 3862 * Return false if something is wrong in the transition state. 3863 */ 3864 static bool commit_minimal_transition_state(struct dc *dc, 3865 struct dc_state *transition_base_context) 3866 { 3867 struct dc_state *transition_context = dc_create_state(dc); 3868 enum pipe_split_policy tmp_mpc_policy; 3869 bool temp_dynamic_odm_policy; 3870 bool temp_subvp_policy; 3871 enum dc_status ret = DC_ERROR_UNEXPECTED; 3872 unsigned int i, j; 3873 unsigned int pipe_in_use = 0; 3874 bool subvp_in_use = false; 3875 3876 if (!transition_context) 3877 return false; 3878 /* Setup: 3879 * Store the current ODM and MPC config in some temp variables to be 3880 * restored after we commit the transition state. 3881 */ 3882 3883 /* check current pipes in use*/ 3884 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3885 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i]; 3886 3887 if (pipe->plane_state) 3888 pipe_in_use++; 3889 } 3890 3891 /* If SubVP is enabled and we are adding or removing planes from any main subvp 3892 * pipe, we must use the minimal transition. 3893 */ 3894 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3895 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3896 3897 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3898 subvp_in_use = true; 3899 break; 3900 } 3901 } 3902 3903 /* When the OS add a new surface if we have been used all of pipes with odm combine 3904 * and mpc split feature, it need use commit_minimal_transition_state to transition safely. 3905 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need 3906 * call it again. Otherwise return true to skip. 3907 * 3908 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially 3909 * enter/exit MPO when DCN still have enough resources. 3910 */ 3911 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) { 3912 dc_release_state(transition_context); 3913 return true; 3914 } 3915 3916 if (!dc->config.is_vmin_only_asic) { 3917 tmp_mpc_policy = dc->debug.pipe_split_policy; 3918 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 3919 } 3920 3921 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; 3922 dc->debug.enable_single_display_2to1_odm_policy = false; 3923 3924 temp_subvp_policy = dc->debug.force_disable_subvp; 3925 dc->debug.force_disable_subvp = true; 3926 3927 dc_resource_state_copy_construct(transition_base_context, transition_context); 3928 3929 /* commit minimal state */ 3930 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) { 3931 for (i = 0; i < transition_context->stream_count; i++) { 3932 struct dc_stream_status *stream_status = &transition_context->stream_status[i]; 3933 3934 for (j = 0; j < stream_status->plane_count; j++) { 3935 struct dc_plane_state *plane_state = stream_status->plane_states[j]; 3936 3937 /* force vsync flip when reconfiguring pipes to prevent underflow 3938 * and corruption 3939 */ 3940 plane_state->flip_immediate = false; 3941 } 3942 } 3943 3944 ret = dc_commit_state_no_check(dc, transition_context); 3945 } 3946 3947 /* always release as dc_commit_state_no_check retains in good case */ 3948 dc_release_state(transition_context); 3949 3950 /* TearDown: 3951 * Restore original configuration for ODM and MPO. 3952 */ 3953 if (!dc->config.is_vmin_only_asic) 3954 dc->debug.pipe_split_policy = tmp_mpc_policy; 3955 3956 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy; 3957 dc->debug.force_disable_subvp = temp_subvp_policy; 3958 3959 if (ret != DC_OK) { 3960 /* this should never happen */ 3961 BREAK_TO_DEBUGGER(); 3962 return false; 3963 } 3964 3965 /* force full surface update */ 3966 for (i = 0; i < dc->current_state->stream_count; i++) { 3967 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) { 3968 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF; 3969 } 3970 } 3971 3972 return true; 3973 } 3974 3975 bool dc_update_planes_and_stream(struct dc *dc, 3976 struct dc_surface_update *srf_updates, int surface_count, 3977 struct dc_stream_state *stream, 3978 struct dc_stream_update *stream_update) 3979 { 3980 struct dc_state *context; 3981 enum surface_update_type update_type; 3982 int i; 3983 struct mall_temp_config mall_temp_config; 3984 3985 /* In cases where MPO and split or ODM are used transitions can 3986 * cause underflow. Apply stream configuration with minimal pipe 3987 * split first to avoid unsupported transitions for active pipes. 3988 */ 3989 bool force_minimal_pipe_splitting; 3990 bool is_plane_addition; 3991 3992 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 3993 dc, 3994 stream, 3995 surface_count, 3996 &is_plane_addition); 3997 3998 /* on plane addition, minimal state is the current one */ 3999 if (force_minimal_pipe_splitting && is_plane_addition && 4000 !commit_minimal_transition_state(dc, dc->current_state)) 4001 return false; 4002 4003 if (!update_planes_and_stream_state( 4004 dc, 4005 srf_updates, 4006 surface_count, 4007 stream, 4008 stream_update, 4009 &update_type, 4010 &context)) 4011 return false; 4012 4013 /* on plane removal, minimal state is the new one */ 4014 if (force_minimal_pipe_splitting && !is_plane_addition) { 4015 /* Since all phantom pipes are removed in full validation, 4016 * we have to save and restore the subvp/mall config when 4017 * we do a minimal transition since the flags marking the 4018 * pipe as subvp/phantom will be cleared (dc copy constructor 4019 * creates a shallow copy). 4020 */ 4021 if (dc->res_pool->funcs->save_mall_state) 4022 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); 4023 if (!commit_minimal_transition_state(dc, context)) { 4024 dc_release_state(context); 4025 return false; 4026 } 4027 if (dc->res_pool->funcs->restore_mall_state) 4028 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config); 4029 4030 /* If we do a minimal transition with plane removal and the context 4031 * has subvp we also have to retain back the phantom stream / planes 4032 * since the refcount is decremented as part of the min transition 4033 * (we commit a state with no subvp, so the phantom streams / planes 4034 * had to be removed). 4035 */ 4036 if (dc->res_pool->funcs->retain_phantom_pipes) 4037 dc->res_pool->funcs->retain_phantom_pipes(dc, context); 4038 update_type = UPDATE_TYPE_FULL; 4039 } 4040 4041 commit_planes_for_stream( 4042 dc, 4043 srf_updates, 4044 surface_count, 4045 stream, 4046 stream_update, 4047 update_type, 4048 context); 4049 4050 if (dc->current_state != context) { 4051 4052 /* Since memory free requires elevated IRQL, an interrupt 4053 * request is generated by mem free. If this happens 4054 * between freeing and reassigning the context, our vsync 4055 * interrupt will call into dc and cause a memory 4056 * corruption BSOD. Hence, we first reassign the context, 4057 * then free the old context. 4058 */ 4059 4060 struct dc_state *old = dc->current_state; 4061 4062 dc->current_state = context; 4063 dc_release_state(old); 4064 4065 // clear any forced full updates 4066 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4067 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4068 4069 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4070 pipe_ctx->plane_state->force_full_update = false; 4071 } 4072 } 4073 return true; 4074 } 4075 4076 void dc_commit_updates_for_stream(struct dc *dc, 4077 struct dc_surface_update *srf_updates, 4078 int surface_count, 4079 struct dc_stream_state *stream, 4080 struct dc_stream_update *stream_update, 4081 struct dc_state *state) 4082 { 4083 const struct dc_stream_status *stream_status; 4084 enum surface_update_type update_type; 4085 struct dc_state *context; 4086 struct dc_context *dc_ctx = dc->ctx; 4087 int i, j; 4088 4089 stream_status = dc_stream_get_status(stream); 4090 context = dc->current_state; 4091 4092 update_type = dc_check_update_surfaces_for_stream( 4093 dc, srf_updates, surface_count, stream_update, stream_status); 4094 4095 /* TODO: Since change commit sequence can have a huge impact, 4096 * we decided to only enable it for DCN3x. However, as soon as 4097 * we get more confident about this change we'll need to enable 4098 * the new sequence for all ASICs. 4099 */ 4100 if (dc->ctx->dce_version >= DCN_VERSION_3_2) { 4101 /* 4102 * Previous frame finished and HW is ready for optimization. 4103 */ 4104 if (update_type == UPDATE_TYPE_FAST) 4105 dc_post_update_surfaces_to_stream(dc); 4106 4107 dc_update_planes_and_stream(dc, srf_updates, 4108 surface_count, stream, 4109 stream_update); 4110 return; 4111 } 4112 4113 if (update_type >= update_surface_trace_level) 4114 update_surface_trace(dc, srf_updates, surface_count); 4115 4116 4117 if (update_type >= UPDATE_TYPE_FULL) { 4118 4119 /* initialize scratch memory for building context */ 4120 context = dc_create_state(dc); 4121 if (context == NULL) { 4122 DC_ERROR("Failed to allocate new validate context!\n"); 4123 return; 4124 } 4125 4126 dc_resource_state_copy_construct(state, context); 4127 4128 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4129 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 4130 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4131 4132 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 4133 new_pipe->plane_state->force_full_update = true; 4134 } 4135 } else if (update_type == UPDATE_TYPE_FAST) { 4136 /* 4137 * Previous frame finished and HW is ready for optimization. 4138 */ 4139 dc_post_update_surfaces_to_stream(dc); 4140 } 4141 4142 4143 for (i = 0; i < surface_count; i++) { 4144 struct dc_plane_state *surface = srf_updates[i].surface; 4145 4146 copy_surface_update_to_plane(surface, &srf_updates[i]); 4147 4148 if (update_type >= UPDATE_TYPE_MED) { 4149 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4150 struct pipe_ctx *pipe_ctx = 4151 &context->res_ctx.pipe_ctx[j]; 4152 4153 if (pipe_ctx->plane_state != surface) 4154 continue; 4155 4156 resource_build_scaling_params(pipe_ctx); 4157 } 4158 } 4159 } 4160 4161 copy_stream_update_to_stream(dc, context, stream, stream_update); 4162 4163 if (update_type >= UPDATE_TYPE_FULL) { 4164 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4165 DC_ERROR("Mode validation failed for stream update!\n"); 4166 dc_release_state(context); 4167 return; 4168 } 4169 } 4170 4171 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 4172 4173 commit_planes_for_stream( 4174 dc, 4175 srf_updates, 4176 surface_count, 4177 stream, 4178 stream_update, 4179 update_type, 4180 context); 4181 /*update current_State*/ 4182 if (dc->current_state != context) { 4183 4184 struct dc_state *old = dc->current_state; 4185 4186 dc->current_state = context; 4187 dc_release_state(old); 4188 4189 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4190 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 4191 4192 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 4193 pipe_ctx->plane_state->force_full_update = false; 4194 } 4195 } 4196 4197 /* Legacy optimization path for DCE. */ 4198 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 4199 dc_post_update_surfaces_to_stream(dc); 4200 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 4201 } 4202 4203 return; 4204 4205 } 4206 4207 uint8_t dc_get_current_stream_count(struct dc *dc) 4208 { 4209 return dc->current_state->stream_count; 4210 } 4211 4212 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 4213 { 4214 if (i < dc->current_state->stream_count) 4215 return dc->current_state->streams[i]; 4216 return NULL; 4217 } 4218 4219 enum dc_irq_source dc_interrupt_to_irq_source( 4220 struct dc *dc, 4221 uint32_t src_id, 4222 uint32_t ext_id) 4223 { 4224 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 4225 } 4226 4227 /* 4228 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 4229 */ 4230 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 4231 { 4232 4233 if (dc == NULL) 4234 return false; 4235 4236 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 4237 } 4238 4239 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 4240 { 4241 dal_irq_service_ack(dc->res_pool->irqs, src); 4242 } 4243 4244 void dc_power_down_on_boot(struct dc *dc) 4245 { 4246 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 4247 dc->hwss.power_down_on_boot) 4248 dc->hwss.power_down_on_boot(dc); 4249 } 4250 4251 void dc_set_power_state( 4252 struct dc *dc, 4253 enum dc_acpi_cm_power_state power_state) 4254 { 4255 struct kref refcount; 4256 struct display_mode_lib *dml; 4257 4258 if (!dc->current_state) 4259 return; 4260 4261 switch (power_state) { 4262 case DC_ACPI_CM_POWER_STATE_D0: 4263 dc_resource_state_construct(dc, dc->current_state); 4264 4265 dc_z10_restore(dc); 4266 4267 if (dc->ctx->dmub_srv) 4268 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); 4269 4270 dc->hwss.init_hw(dc); 4271 4272 if (dc->hwss.init_sys_ctx != NULL && 4273 dc->vm_pa_config.valid) { 4274 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 4275 } 4276 4277 break; 4278 default: 4279 ASSERT(dc->current_state->stream_count == 0); 4280 /* Zero out the current context so that on resume we start with 4281 * clean state, and dc hw programming optimizations will not 4282 * cause any trouble. 4283 */ 4284 dml = kzalloc(sizeof(struct display_mode_lib), 4285 GFP_KERNEL); 4286 4287 ASSERT(dml); 4288 if (!dml) 4289 return; 4290 4291 /* Preserve refcount */ 4292 refcount = dc->current_state->refcount; 4293 /* Preserve display mode lib */ 4294 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 4295 4296 dc_resource_state_destruct(dc->current_state); 4297 memset(dc->current_state, 0, 4298 sizeof(*dc->current_state)); 4299 4300 dc->current_state->refcount = refcount; 4301 dc->current_state->bw_ctx.dml = *dml; 4302 4303 kfree(dml); 4304 4305 break; 4306 } 4307 } 4308 4309 void dc_resume(struct dc *dc) 4310 { 4311 uint32_t i; 4312 4313 for (i = 0; i < dc->link_count; i++) 4314 dc->link_srv->resume(dc->links[i]); 4315 } 4316 4317 bool dc_is_dmcu_initialized(struct dc *dc) 4318 { 4319 struct dmcu *dmcu = dc->res_pool->dmcu; 4320 4321 if (dmcu) 4322 return dmcu->funcs->is_dmcu_initialized(dmcu); 4323 return false; 4324 } 4325 4326 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 4327 { 4328 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 4329 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 4330 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 4331 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 4332 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 4333 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 4334 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 4335 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 4336 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 4337 } 4338 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 4339 { 4340 if (dc->hwss.set_clock) 4341 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 4342 return DC_ERROR_UNEXPECTED; 4343 } 4344 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 4345 { 4346 if (dc->hwss.get_clock) 4347 dc->hwss.get_clock(dc, clock_type, clock_cfg); 4348 } 4349 4350 /* enable/disable eDP PSR without specify stream for eDP */ 4351 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 4352 { 4353 int i; 4354 bool allow_active; 4355 4356 for (i = 0; i < dc->current_state->stream_count ; i++) { 4357 struct dc_link *link; 4358 struct dc_stream_state *stream = dc->current_state->streams[i]; 4359 4360 link = stream->link; 4361 if (!link) 4362 continue; 4363 4364 if (link->psr_settings.psr_feature_enabled) { 4365 if (enable && !link->psr_settings.psr_allow_active) { 4366 allow_active = true; 4367 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 4368 return false; 4369 } else if (!enable && link->psr_settings.psr_allow_active) { 4370 allow_active = false; 4371 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 4372 return false; 4373 } 4374 } 4375 } 4376 4377 return true; 4378 } 4379 4380 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4381 { 4382 if (dc->debug.disable_idle_power_optimizations) 4383 return; 4384 4385 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 4386 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 4387 return; 4388 4389 if (allow == dc->idle_optimizations_allowed) 4390 return; 4391 4392 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 4393 dc->idle_optimizations_allowed = allow; 4394 } 4395 4396 /* set min and max memory clock to lowest and highest DPM level, respectively */ 4397 void dc_unlock_memory_clock_frequency(struct dc *dc) 4398 { 4399 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4400 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 4401 4402 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4403 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4404 } 4405 4406 /* set min memory clock to the min required for current mode, max to maxDPM */ 4407 void dc_lock_memory_clock_frequency(struct dc *dc) 4408 { 4409 if (dc->clk_mgr->funcs->get_memclk_states_from_smu) 4410 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 4411 4412 if (dc->clk_mgr->funcs->set_hard_min_memclk) 4413 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 4414 4415 if (dc->clk_mgr->funcs->set_hard_max_memclk) 4416 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 4417 } 4418 4419 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 4420 { 4421 struct dc_state *context = dc->current_state; 4422 struct hubp *hubp; 4423 struct pipe_ctx *pipe; 4424 int i; 4425 4426 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4427 pipe = &context->res_ctx.pipe_ctx[i]; 4428 4429 if (pipe->stream != NULL) { 4430 dc->hwss.disable_pixel_data(dc, pipe, true); 4431 4432 // wait for double buffer 4433 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4434 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 4435 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 4436 4437 hubp = pipe->plane_res.hubp; 4438 hubp->funcs->set_blank_regs(hubp, true); 4439 } 4440 } 4441 4442 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 4443 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 4444 4445 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4446 pipe = &context->res_ctx.pipe_ctx[i]; 4447 4448 if (pipe->stream != NULL) { 4449 dc->hwss.disable_pixel_data(dc, pipe, false); 4450 4451 hubp = pipe->plane_res.hubp; 4452 hubp->funcs->set_blank_regs(hubp, false); 4453 } 4454 } 4455 } 4456 4457 4458 /** 4459 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 4460 * @dc: pointer to dc of the dm calling this 4461 * @enable: True = transition to DC mode, false = transition back to AC mode 4462 * 4463 * Some SoCs define additional clock limits when in DC mode, DM should 4464 * invoke this function when the platform undergoes a power source transition 4465 * so DC can apply/unapply the limit. This interface may be disruptive to 4466 * the onscreen content. 4467 * 4468 * Context: Triggered by OS through DM interface, or manually by escape calls. 4469 * Need to hold a dclock when doing so. 4470 * 4471 * Return: none (void function) 4472 * 4473 */ 4474 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 4475 { 4476 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 4477 unsigned int softMax, maxDPM, funcMin; 4478 bool p_state_change_support; 4479 4480 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) 4481 return; 4482 4483 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 4484 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; 4485 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 4486 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 4487 4488 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 4489 if (p_state_change_support) { 4490 if (funcMin <= softMax) 4491 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 4492 // else: No-Op 4493 } else { 4494 if (funcMin <= softMax) 4495 blank_and_force_memclk(dc, true, softMax); 4496 // else: No-Op 4497 } 4498 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 4499 if (p_state_change_support) { 4500 if (funcMin <= softMax) 4501 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 4502 // else: No-Op 4503 } else { 4504 if (funcMin <= softMax) 4505 blank_and_force_memclk(dc, true, maxDPM); 4506 // else: No-Op 4507 } 4508 } 4509 dc->clk_mgr->dc_mode_softmax_enabled = enable; 4510 } 4511 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 4512 struct dc_cursor_attributes *cursor_attr) 4513 { 4514 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 4515 return true; 4516 return false; 4517 } 4518 4519 /* cleanup on driver unload */ 4520 void dc_hardware_release(struct dc *dc) 4521 { 4522 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc); 4523 4524 if (dc->hwss.hardware_release) 4525 dc->hwss.hardware_release(dc); 4526 } 4527 4528 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc) 4529 { 4530 if (dc->current_state) 4531 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true; 4532 } 4533 4534 /** 4535 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification 4536 * 4537 * @dc: [in] dc structure 4538 * 4539 * Checks whether DMUB FW supports outbox notifications, if supported DM 4540 * should register outbox interrupt prior to actually enabling interrupts 4541 * via dc_enable_dmub_outbox 4542 * 4543 * Return: 4544 * True if DMUB FW supports outbox notifications, False otherwise 4545 */ 4546 bool dc_is_dmub_outbox_supported(struct dc *dc) 4547 { 4548 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 4549 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 4550 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 4551 !dc->debug.dpia_debug.bits.disable_dpia) 4552 return true; 4553 4554 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 && 4555 !dc->debug.dpia_debug.bits.disable_dpia) 4556 return true; 4557 4558 /* dmub aux needs dmub notifications to be enabled */ 4559 return dc->debug.enable_dmub_aux_for_legacy_ddc; 4560 } 4561 4562 /** 4563 * dc_enable_dmub_notifications - Check if dmub fw supports outbox 4564 * 4565 * @dc: [in] dc structure 4566 * 4567 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 4568 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This 4569 * API shall be removed after switching. 4570 * 4571 * Return: 4572 * True if DMUB FW supports outbox notifications, False otherwise 4573 */ 4574 bool dc_enable_dmub_notifications(struct dc *dc) 4575 { 4576 return dc_is_dmub_outbox_supported(dc); 4577 } 4578 4579 /** 4580 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 4581 * 4582 * @dc: [in] dc structure 4583 * 4584 * Enables DMUB unsolicited notifications to x86 via outbox. 4585 */ 4586 void dc_enable_dmub_outbox(struct dc *dc) 4587 { 4588 struct dc_context *dc_ctx = dc->ctx; 4589 4590 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 4591 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__); 4592 } 4593 4594 /** 4595 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 4596 * Sets port index appropriately for legacy DDC 4597 * @dc: dc structure 4598 * @link_index: link index 4599 * @payload: aux payload 4600 * 4601 * Returns: True if successful, False if failure 4602 */ 4603 bool dc_process_dmub_aux_transfer_async(struct dc *dc, 4604 uint32_t link_index, 4605 struct aux_payload *payload) 4606 { 4607 uint8_t action; 4608 union dmub_rb_cmd cmd = {0}; 4609 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4610 4611 ASSERT(payload->length <= 16); 4612 4613 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 4614 cmd.dp_aux_access.header.payload_bytes = 0; 4615 /* For dpia, ddc_pin is set to NULL */ 4616 if (!dc->links[link_index]->ddc->ddc_pin) 4617 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 4618 else 4619 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 4620 4621 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 4622 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 4623 cmd.dp_aux_access.aux_control.timeout = 0; 4624 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 4625 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 4626 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 4627 4628 /* set aux action */ 4629 if (payload->i2c_over_aux) { 4630 if (payload->write) { 4631 if (payload->mot) 4632 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 4633 else 4634 action = DP_AUX_REQ_ACTION_I2C_WRITE; 4635 } else { 4636 if (payload->mot) 4637 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 4638 else 4639 action = DP_AUX_REQ_ACTION_I2C_READ; 4640 } 4641 } else { 4642 if (payload->write) 4643 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 4644 else 4645 action = DP_AUX_REQ_ACTION_DPCD_READ; 4646 } 4647 4648 cmd.dp_aux_access.aux_control.dpaux.action = action; 4649 4650 if (payload->length && payload->write) { 4651 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 4652 payload->data, 4653 payload->length 4654 ); 4655 } 4656 4657 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4658 dc_dmub_srv_cmd_execute(dmub_srv); 4659 dc_dmub_srv_wait_idle(dmub_srv); 4660 4661 return true; 4662 } 4663 4664 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 4665 uint8_t dpia_port_index) 4666 { 4667 uint8_t index, link_index = 0xFF; 4668 4669 for (index = 0; index < dc->link_count; index++) { 4670 /* ddc_hw_inst has dpia port index for dpia links 4671 * and ddc instance for legacy links 4672 */ 4673 if (!dc->links[index]->ddc->ddc_pin) { 4674 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 4675 link_index = index; 4676 break; 4677 } 4678 } 4679 } 4680 ASSERT(link_index != 0xFF); 4681 return link_index; 4682 } 4683 4684 /** 4685 * dc_process_dmub_set_config_async - Submits set_config command 4686 * 4687 * @dc: [in] dc structure 4688 * @link_index: [in] link_index: link index 4689 * @payload: [in] aux payload 4690 * @notify: [out] set_config immediate reply 4691 * 4692 * Submits set_config command to dmub via inbox message. 4693 * 4694 * Return: 4695 * True if successful, False if failure 4696 */ 4697 bool dc_process_dmub_set_config_async(struct dc *dc, 4698 uint32_t link_index, 4699 struct set_config_cmd_payload *payload, 4700 struct dmub_notification *notify) 4701 { 4702 union dmub_rb_cmd cmd = {0}; 4703 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4704 bool is_cmd_complete = true; 4705 4706 /* prepare SET_CONFIG command */ 4707 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 4708 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 4709 4710 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 4711 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 4712 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 4713 4714 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { 4715 /* command is not processed by dmub */ 4716 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 4717 return is_cmd_complete; 4718 } 4719 4720 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 4721 if (cmd.set_config_access.header.ret_status == 1) 4722 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 4723 else 4724 /* cmd pending, will receive notification via outbox */ 4725 is_cmd_complete = false; 4726 4727 return is_cmd_complete; 4728 } 4729 4730 /** 4731 * dc_process_dmub_set_mst_slots - Submits MST solt allocation 4732 * 4733 * @dc: [in] dc structure 4734 * @link_index: [in] link index 4735 * @mst_alloc_slots: [in] mst slots to be allotted 4736 * @mst_slots_in_use: [out] mst slots in use returned in failure case 4737 * 4738 * Submits mst slot allocation command to dmub via inbox message 4739 * 4740 * Return: 4741 * DC_OK if successful, DC_ERROR if failure 4742 */ 4743 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 4744 uint32_t link_index, 4745 uint8_t mst_alloc_slots, 4746 uint8_t *mst_slots_in_use) 4747 { 4748 union dmub_rb_cmd cmd = {0}; 4749 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4750 4751 /* prepare MST_ALLOC_SLOTS command */ 4752 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 4753 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 4754 4755 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 4756 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 4757 4758 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) 4759 /* command is not processed by dmub */ 4760 return DC_ERROR_UNEXPECTED; 4761 4762 /* command processed by dmub, if ret_status is 1 */ 4763 if (cmd.set_config_access.header.ret_status != 1) 4764 /* command processing error */ 4765 return DC_ERROR_UNEXPECTED; 4766 4767 /* command processed and we have a status of 2, mst not enabled in dpia */ 4768 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 4769 return DC_FAIL_UNSUPPORTED_1; 4770 4771 /* previously configured mst alloc and used slots did not match */ 4772 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 4773 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 4774 return DC_NOT_SUPPORTED; 4775 } 4776 4777 return DC_OK; 4778 } 4779 4780 /** 4781 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 4782 * 4783 * @dc: [in] dc structure 4784 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 4785 * 4786 * Submits dpia hpd int enable command to dmub via inbox message 4787 */ 4788 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc, 4789 uint32_t hpd_int_enable) 4790 { 4791 union dmub_rb_cmd cmd = {0}; 4792 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4793 4794 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 4795 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 4796 4797 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4798 dc_dmub_srv_cmd_execute(dmub_srv); 4799 dc_dmub_srv_wait_idle(dmub_srv); 4800 4801 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 4802 } 4803 4804 /** 4805 * dc_disable_accelerated_mode - disable accelerated mode 4806 * @dc: dc structure 4807 */ 4808 void dc_disable_accelerated_mode(struct dc *dc) 4809 { 4810 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 4811 } 4812 4813 4814 /** 4815 * dc_notify_vsync_int_state - notifies vsync enable/disable state 4816 * @dc: dc structure 4817 * @stream: stream where vsync int state changed 4818 * @enable: whether vsync is enabled or disabled 4819 * 4820 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM 4821 * interrupts after steady state is reached. 4822 */ 4823 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 4824 { 4825 int i; 4826 int edp_num; 4827 struct pipe_ctx *pipe = NULL; 4828 struct dc_link *link = stream->sink->link; 4829 struct dc_link *edp_links[MAX_NUM_EDP]; 4830 4831 4832 if (link->psr_settings.psr_feature_enabled) 4833 return; 4834 4835 /*find primary pipe associated with stream*/ 4836 for (i = 0; i < MAX_PIPES; i++) { 4837 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4838 4839 if (pipe->stream == stream && pipe->stream_res.tg) 4840 break; 4841 } 4842 4843 if (i == MAX_PIPES) { 4844 ASSERT(0); 4845 return; 4846 } 4847 4848 dc_get_edp_links(dc, edp_links, &edp_num); 4849 4850 /* Determine panel inst */ 4851 for (i = 0; i < edp_num; i++) { 4852 if (edp_links[i] == link) 4853 break; 4854 } 4855 4856 if (i == edp_num) { 4857 return; 4858 } 4859 4860 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 4861 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 4862 } 4863 4864 /** 4865 * dc_extended_blank_supported - Decide whether extended blank is supported 4866 * 4867 * @dc: [in] Current DC state 4868 * 4869 * Extended blank is a freesync optimization feature to be enabled in the 4870 * future. During the extra vblank period gained from freesync, we have the 4871 * ability to enter z9/z10. 4872 * 4873 * Return: 4874 * Indicate whether extended blank is supported (%true or %false) 4875 */ 4876 bool dc_extended_blank_supported(struct dc *dc) 4877 { 4878 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 4879 && dc->caps.zstate_support && dc->caps.is_apu; 4880 } 4881