1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include <linux/slab.h> 26 #include <linux/mm.h> 27 28 #include "dm_services.h" 29 30 #include "dc.h" 31 32 #include "core_status.h" 33 #include "core_types.h" 34 #include "hw_sequencer.h" 35 #include "dce/dce_hwseq.h" 36 37 #include "resource.h" 38 39 #include "clk_mgr.h" 40 #include "clock_source.h" 41 #include "dc_bios_types.h" 42 43 #include "bios_parser_interface.h" 44 #include "include/irq_service_interface.h" 45 #include "transform.h" 46 #include "dmcu.h" 47 #include "dpp.h" 48 #include "timing_generator.h" 49 #include "abm.h" 50 #include "virtual/virtual_link_encoder.h" 51 52 #include "link_hwss.h" 53 #include "link_encoder.h" 54 55 #include "dc_link_ddc.h" 56 #include "dm_helpers.h" 57 #include "mem_input.h" 58 #include "hubp.h" 59 60 #include "dc_link_dp.h" 61 #include "dc_dmub_srv.h" 62 63 #include "dsc.h" 64 65 #include "vm_helper.h" 66 67 #include "dce/dce_i2c.h" 68 69 #include "dmub/dmub_srv.h" 70 71 #include "dce/dmub_hw_lock_mgr.h" 72 73 #include "dc_trace.h" 74 75 #define CTX \ 76 dc->ctx 77 78 #define DC_LOGGER \ 79 dc->ctx->logger 80 81 static const char DC_BUILD_ID[] = "production-build"; 82 83 /** 84 * DOC: Overview 85 * 86 * DC is the OS-agnostic component of the amdgpu DC driver. 87 * 88 * DC maintains and validates a set of structs representing the state of the 89 * driver and writes that state to AMD hardware 90 * 91 * Main DC HW structs: 92 * 93 * struct dc - The central struct. One per driver. Created on driver load, 94 * destroyed on driver unload. 95 * 96 * struct dc_context - One per driver. 97 * Used as a backpointer by most other structs in dc. 98 * 99 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 100 * plugpoints). Created on driver load, destroyed on driver unload. 101 * 102 * struct dc_sink - One per display. Created on boot or hotplug. 103 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 104 * (the display directly attached). It may also have one or more remote 105 * sinks (in the Multi-Stream Transport case) 106 * 107 * struct resource_pool - One per driver. Represents the hw blocks not in the 108 * main pipeline. Not directly accessible by dm. 109 * 110 * Main dc state structs: 111 * 112 * These structs can be created and destroyed as needed. There is a full set of 113 * these structs in dc->current_state representing the currently programmed state. 114 * 115 * struct dc_state - The global DC state to track global state information, 116 * such as bandwidth values. 117 * 118 * struct dc_stream_state - Represents the hw configuration for the pipeline from 119 * a framebuffer to a display. Maps one-to-one with dc_sink. 120 * 121 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 122 * and may have more in the Multi-Plane Overlay case. 123 * 124 * struct resource_context - Represents the programmable state of everything in 125 * the resource_pool. Not directly accessible by dm. 126 * 127 * struct pipe_ctx - A member of struct resource_context. Represents the 128 * internal hardware pipeline components. Each dc_plane_state has either 129 * one or two (in the pipe-split case). 130 */ 131 132 /******************************************************************************* 133 * Private functions 134 ******************************************************************************/ 135 136 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 137 { 138 if (new > *original) 139 *original = new; 140 } 141 142 static void destroy_links(struct dc *dc) 143 { 144 uint32_t i; 145 146 for (i = 0; i < dc->link_count; i++) { 147 if (NULL != dc->links[i]) 148 link_destroy(&dc->links[i]); 149 } 150 } 151 152 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 153 { 154 int i; 155 uint32_t count = 0; 156 157 for (i = 0; i < num_links; i++) { 158 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 159 links[i]->is_internal_display) 160 count++; 161 } 162 163 return count; 164 } 165 166 static bool create_links( 167 struct dc *dc, 168 uint32_t num_virtual_links) 169 { 170 int i; 171 int connectors_num; 172 struct dc_bios *bios = dc->ctx->dc_bios; 173 174 dc->link_count = 0; 175 176 connectors_num = bios->funcs->get_connectors_number(bios); 177 178 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 179 180 if (connectors_num > ENUM_ID_COUNT) { 181 dm_error( 182 "DC: Number of connectors %d exceeds maximum of %d!\n", 183 connectors_num, 184 ENUM_ID_COUNT); 185 return false; 186 } 187 188 dm_output_to_console( 189 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 190 __func__, 191 connectors_num, 192 num_virtual_links); 193 194 for (i = 0; i < connectors_num; i++) { 195 struct link_init_data link_init_params = {0}; 196 struct dc_link *link; 197 198 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 199 200 link_init_params.ctx = dc->ctx; 201 /* next BIOS object table connector */ 202 link_init_params.connector_index = i; 203 link_init_params.link_index = dc->link_count; 204 link_init_params.dc = dc; 205 link = link_create(&link_init_params); 206 207 if (link) { 208 dc->links[dc->link_count] = link; 209 link->dc = dc; 210 ++dc->link_count; 211 } 212 } 213 214 DC_LOG_DC("BIOS object table - end"); 215 216 for (i = 0; i < num_virtual_links; i++) { 217 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 218 struct encoder_init_data enc_init = {0}; 219 220 if (link == NULL) { 221 BREAK_TO_DEBUGGER(); 222 goto failed_alloc; 223 } 224 225 link->link_index = dc->link_count; 226 dc->links[dc->link_count] = link; 227 dc->link_count++; 228 229 link->ctx = dc->ctx; 230 link->dc = dc; 231 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 232 link->link_id.type = OBJECT_TYPE_CONNECTOR; 233 link->link_id.id = CONNECTOR_ID_VIRTUAL; 234 link->link_id.enum_id = ENUM_ID_1; 235 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 236 237 if (!link->link_enc) { 238 BREAK_TO_DEBUGGER(); 239 goto failed_alloc; 240 } 241 242 link->link_status.dpcd_caps = &link->dpcd_caps; 243 244 enc_init.ctx = dc->ctx; 245 enc_init.channel = CHANNEL_ID_UNKNOWN; 246 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 247 enc_init.transmitter = TRANSMITTER_UNKNOWN; 248 enc_init.connector = link->link_id; 249 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 250 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 251 enc_init.encoder.enum_id = ENUM_ID_1; 252 virtual_link_encoder_construct(link->link_enc, &enc_init); 253 } 254 255 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 256 257 return true; 258 259 failed_alloc: 260 return false; 261 } 262 263 static struct dc_perf_trace *dc_perf_trace_create(void) 264 { 265 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 266 } 267 268 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 269 { 270 kfree(*perf_trace); 271 *perf_trace = NULL; 272 } 273 274 /** 275 * dc_stream_adjust_vmin_vmax: 276 * 277 * Looks up the pipe context of dc_stream_state and updates the 278 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 279 * Rate, which is a power-saving feature that targets reducing panel 280 * refresh rate while the screen is static 281 * 282 * @dc: dc reference 283 * @stream: Initial dc stream state 284 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 285 */ 286 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 287 struct dc_stream_state *stream, 288 struct dc_crtc_timing_adjust *adjust) 289 { 290 int i = 0; 291 bool ret = false; 292 293 stream->adjust = *adjust; 294 295 for (i = 0; i < MAX_PIPES; i++) { 296 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 297 298 if (pipe->stream == stream && pipe->stream_res.tg) { 299 dc->hwss.set_drr(&pipe, 300 1, 301 adjust->v_total_min, 302 adjust->v_total_max, 303 adjust->v_total_mid, 304 adjust->v_total_mid_frame_num); 305 306 ret = true; 307 } 308 } 309 return ret; 310 } 311 312 bool dc_stream_get_crtc_position(struct dc *dc, 313 struct dc_stream_state **streams, int num_streams, 314 unsigned int *v_pos, unsigned int *nom_v_pos) 315 { 316 /* TODO: Support multiple streams */ 317 const struct dc_stream_state *stream = streams[0]; 318 int i = 0; 319 bool ret = false; 320 struct crtc_position position; 321 322 for (i = 0; i < MAX_PIPES; i++) { 323 struct pipe_ctx *pipe = 324 &dc->current_state->res_ctx.pipe_ctx[i]; 325 326 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 327 dc->hwss.get_position(&pipe, 1, &position); 328 329 *v_pos = position.vertical_count; 330 *nom_v_pos = position.nominal_vcount; 331 ret = true; 332 } 333 } 334 return ret; 335 } 336 337 /** 338 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 339 * @dc: DC Object 340 * @stream: The stream to configure CRC on. 341 * @enable: Enable CRC if true, disable otherwise. 342 * @crc_window: CRC window (x/y start/end) information 343 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 344 * once. 345 * 346 * By default, only CRC0 is configured, and the entire frame is used to 347 * calculate the crc. 348 */ 349 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 350 struct crc_params *crc_window, bool enable, bool continuous) 351 { 352 int i; 353 struct pipe_ctx *pipe; 354 struct crc_params param; 355 struct timing_generator *tg; 356 357 for (i = 0; i < MAX_PIPES; i++) { 358 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 359 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 360 break; 361 } 362 /* Stream not found */ 363 if (i == MAX_PIPES) 364 return false; 365 366 /* By default, capture the full frame */ 367 param.windowa_x_start = 0; 368 param.windowa_y_start = 0; 369 param.windowa_x_end = pipe->stream->timing.h_addressable; 370 param.windowa_y_end = pipe->stream->timing.v_addressable; 371 param.windowb_x_start = 0; 372 param.windowb_y_start = 0; 373 param.windowb_x_end = pipe->stream->timing.h_addressable; 374 param.windowb_y_end = pipe->stream->timing.v_addressable; 375 376 if (crc_window) { 377 param.windowa_x_start = crc_window->windowa_x_start; 378 param.windowa_y_start = crc_window->windowa_y_start; 379 param.windowa_x_end = crc_window->windowa_x_end; 380 param.windowa_y_end = crc_window->windowa_y_end; 381 param.windowb_x_start = crc_window->windowb_x_start; 382 param.windowb_y_start = crc_window->windowb_y_start; 383 param.windowb_x_end = crc_window->windowb_x_end; 384 param.windowb_y_end = crc_window->windowb_y_end; 385 } 386 387 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 388 param.odm_mode = pipe->next_odm_pipe ? 1:0; 389 390 /* Default to the union of both windows */ 391 param.selection = UNION_WINDOW_A_B; 392 param.continuous_mode = continuous; 393 param.enable = enable; 394 395 tg = pipe->stream_res.tg; 396 397 /* Only call if supported */ 398 if (tg->funcs->configure_crc) 399 return tg->funcs->configure_crc(tg, ¶m); 400 DC_LOG_WARNING("CRC capture not supported."); 401 return false; 402 } 403 404 /** 405 * dc_stream_get_crc() - Get CRC values for the given stream. 406 * @dc: DC object 407 * @stream: The DC stream state of the stream to get CRCs from. 408 * @r_cr: CRC value for the first of the 3 channels stored here. 409 * @g_y: CRC value for the second of the 3 channels stored here. 410 * @b_cb: CRC value for the third of the 3 channels stored here. 411 * 412 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 413 * Return false if stream is not found, or if CRCs are not enabled. 414 */ 415 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 416 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 417 { 418 int i; 419 struct pipe_ctx *pipe; 420 struct timing_generator *tg; 421 422 for (i = 0; i < MAX_PIPES; i++) { 423 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 424 if (pipe->stream == stream) 425 break; 426 } 427 /* Stream not found */ 428 if (i == MAX_PIPES) 429 return false; 430 431 tg = pipe->stream_res.tg; 432 433 if (tg->funcs->get_crc) 434 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 435 DC_LOG_WARNING("CRC capture not supported."); 436 return false; 437 } 438 439 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 440 enum dc_dynamic_expansion option) 441 { 442 /* OPP FMT dyn expansion updates*/ 443 int i = 0; 444 struct pipe_ctx *pipe_ctx; 445 446 for (i = 0; i < MAX_PIPES; i++) { 447 if (dc->current_state->res_ctx.pipe_ctx[i].stream 448 == stream) { 449 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 450 pipe_ctx->stream_res.opp->dyn_expansion = option; 451 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 452 pipe_ctx->stream_res.opp, 453 COLOR_SPACE_YCBCR601, 454 stream->timing.display_color_depth, 455 stream->signal); 456 } 457 } 458 } 459 460 void dc_stream_set_dither_option(struct dc_stream_state *stream, 461 enum dc_dither_option option) 462 { 463 struct bit_depth_reduction_params params; 464 struct dc_link *link = stream->link; 465 struct pipe_ctx *pipes = NULL; 466 int i; 467 468 for (i = 0; i < MAX_PIPES; i++) { 469 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 470 stream) { 471 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 472 break; 473 } 474 } 475 476 if (!pipes) 477 return; 478 if (option > DITHER_OPTION_MAX) 479 return; 480 481 stream->dither_option = option; 482 483 memset(¶ms, 0, sizeof(params)); 484 resource_build_bit_depth_reduction_params(stream, ¶ms); 485 stream->bit_depth_params = params; 486 487 if (pipes->plane_res.xfm && 488 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 489 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 490 pipes->plane_res.xfm, 491 pipes->plane_res.scl_data.lb_params.depth, 492 &stream->bit_depth_params); 493 } 494 495 pipes->stream_res.opp->funcs-> 496 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 497 } 498 499 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 500 { 501 int i = 0; 502 bool ret = false; 503 struct pipe_ctx *pipes; 504 505 for (i = 0; i < MAX_PIPES; i++) { 506 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 507 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 508 dc->hwss.program_gamut_remap(pipes); 509 ret = true; 510 } 511 } 512 513 return ret; 514 } 515 516 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 517 { 518 int i = 0; 519 bool ret = false; 520 struct pipe_ctx *pipes; 521 522 for (i = 0; i < MAX_PIPES; i++) { 523 if (dc->current_state->res_ctx.pipe_ctx[i].stream 524 == stream) { 525 526 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 527 dc->hwss.program_output_csc(dc, 528 pipes, 529 stream->output_color_space, 530 stream->csc_color_matrix.matrix, 531 pipes->stream_res.opp->inst); 532 ret = true; 533 } 534 } 535 536 return ret; 537 } 538 539 void dc_stream_set_static_screen_params(struct dc *dc, 540 struct dc_stream_state **streams, 541 int num_streams, 542 const struct dc_static_screen_params *params) 543 { 544 int i = 0; 545 int j = 0; 546 struct pipe_ctx *pipes_affected[MAX_PIPES]; 547 int num_pipes_affected = 0; 548 549 for (i = 0; i < num_streams; i++) { 550 struct dc_stream_state *stream = streams[i]; 551 552 for (j = 0; j < MAX_PIPES; j++) { 553 if (dc->current_state->res_ctx.pipe_ctx[j].stream 554 == stream) { 555 pipes_affected[num_pipes_affected++] = 556 &dc->current_state->res_ctx.pipe_ctx[j]; 557 } 558 } 559 } 560 561 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 562 } 563 564 static void dc_destruct(struct dc *dc) 565 { 566 if (dc->current_state) { 567 dc_release_state(dc->current_state); 568 dc->current_state = NULL; 569 } 570 571 destroy_links(dc); 572 573 if (dc->clk_mgr) { 574 dc_destroy_clk_mgr(dc->clk_mgr); 575 dc->clk_mgr = NULL; 576 } 577 578 dc_destroy_resource_pool(dc); 579 580 if (dc->ctx->gpio_service) 581 dal_gpio_service_destroy(&dc->ctx->gpio_service); 582 583 if (dc->ctx->created_bios) 584 dal_bios_parser_destroy(&dc->ctx->dc_bios); 585 586 dc_perf_trace_destroy(&dc->ctx->perf_trace); 587 588 kfree(dc->ctx); 589 dc->ctx = NULL; 590 591 kfree(dc->bw_vbios); 592 dc->bw_vbios = NULL; 593 594 kfree(dc->bw_dceip); 595 dc->bw_dceip = NULL; 596 597 #ifdef CONFIG_DRM_AMD_DC_DCN 598 kfree(dc->dcn_soc); 599 dc->dcn_soc = NULL; 600 601 kfree(dc->dcn_ip); 602 dc->dcn_ip = NULL; 603 604 #endif 605 kfree(dc->vm_helper); 606 dc->vm_helper = NULL; 607 608 } 609 610 static bool dc_construct_ctx(struct dc *dc, 611 const struct dc_init_data *init_params) 612 { 613 struct dc_context *dc_ctx; 614 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 615 616 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 617 if (!dc_ctx) 618 return false; 619 620 dc_ctx->cgs_device = init_params->cgs_device; 621 dc_ctx->driver_context = init_params->driver; 622 dc_ctx->dc = dc; 623 dc_ctx->asic_id = init_params->asic_id; 624 dc_ctx->dc_sink_id_count = 0; 625 dc_ctx->dc_stream_id_count = 0; 626 dc_ctx->dce_environment = init_params->dce_environment; 627 628 /* Create logger */ 629 630 dc_version = resource_parse_asic_id(init_params->asic_id); 631 dc_ctx->dce_version = dc_version; 632 633 dc_ctx->perf_trace = dc_perf_trace_create(); 634 if (!dc_ctx->perf_trace) { 635 ASSERT_CRITICAL(false); 636 return false; 637 } 638 639 dc->ctx = dc_ctx; 640 641 return true; 642 } 643 644 static bool dc_construct(struct dc *dc, 645 const struct dc_init_data *init_params) 646 { 647 struct dc_context *dc_ctx; 648 struct bw_calcs_dceip *dc_dceip; 649 struct bw_calcs_vbios *dc_vbios; 650 #ifdef CONFIG_DRM_AMD_DC_DCN 651 struct dcn_soc_bounding_box *dcn_soc; 652 struct dcn_ip_params *dcn_ip; 653 #endif 654 655 dc->config = init_params->flags; 656 657 // Allocate memory for the vm_helper 658 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 659 if (!dc->vm_helper) { 660 dm_error("%s: failed to create dc->vm_helper\n", __func__); 661 goto fail; 662 } 663 664 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 665 666 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 667 if (!dc_dceip) { 668 dm_error("%s: failed to create dceip\n", __func__); 669 goto fail; 670 } 671 672 dc->bw_dceip = dc_dceip; 673 674 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 675 if (!dc_vbios) { 676 dm_error("%s: failed to create vbios\n", __func__); 677 goto fail; 678 } 679 680 dc->bw_vbios = dc_vbios; 681 #ifdef CONFIG_DRM_AMD_DC_DCN 682 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 683 if (!dcn_soc) { 684 dm_error("%s: failed to create dcn_soc\n", __func__); 685 goto fail; 686 } 687 688 dc->dcn_soc = dcn_soc; 689 690 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 691 if (!dcn_ip) { 692 dm_error("%s: failed to create dcn_ip\n", __func__); 693 goto fail; 694 } 695 696 dc->dcn_ip = dcn_ip; 697 #endif 698 699 if (!dc_construct_ctx(dc, init_params)) { 700 dm_error("%s: failed to create ctx\n", __func__); 701 goto fail; 702 } 703 704 dc_ctx = dc->ctx; 705 706 /* Resource should construct all asic specific resources. 707 * This should be the only place where we need to parse the asic id 708 */ 709 if (init_params->vbios_override) 710 dc_ctx->dc_bios = init_params->vbios_override; 711 else { 712 /* Create BIOS parser */ 713 struct bp_init_data bp_init_data; 714 715 bp_init_data.ctx = dc_ctx; 716 bp_init_data.bios = init_params->asic_id.atombios_base_address; 717 718 dc_ctx->dc_bios = dal_bios_parser_create( 719 &bp_init_data, dc_ctx->dce_version); 720 721 if (!dc_ctx->dc_bios) { 722 ASSERT_CRITICAL(false); 723 goto fail; 724 } 725 726 dc_ctx->created_bios = true; 727 } 728 729 dc->vendor_signature = init_params->vendor_signature; 730 731 /* Create GPIO service */ 732 dc_ctx->gpio_service = dal_gpio_service_create( 733 dc_ctx->dce_version, 734 dc_ctx->dce_environment, 735 dc_ctx); 736 737 if (!dc_ctx->gpio_service) { 738 ASSERT_CRITICAL(false); 739 goto fail; 740 } 741 742 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 743 if (!dc->res_pool) 744 goto fail; 745 746 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 747 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 748 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 749 750 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 751 if (!dc->clk_mgr) 752 goto fail; 753 #ifdef CONFIG_DRM_AMD_DC_DCN 754 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 755 #endif 756 757 if (dc->res_pool->funcs->update_bw_bounding_box) 758 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 759 760 /* Creation of current_state must occur after dc->dml 761 * is initialized in dc_create_resource_pool because 762 * on creation it copies the contents of dc->dml 763 */ 764 765 dc->current_state = dc_create_state(dc); 766 767 if (!dc->current_state) { 768 dm_error("%s: failed to create validate ctx\n", __func__); 769 goto fail; 770 } 771 772 dc_resource_state_construct(dc, dc->current_state); 773 774 if (!create_links(dc, init_params->num_virtual_links)) 775 goto fail; 776 777 return true; 778 779 fail: 780 return false; 781 } 782 783 static void disable_all_writeback_pipes_for_stream( 784 const struct dc *dc, 785 struct dc_stream_state *stream, 786 struct dc_state *context) 787 { 788 int i; 789 790 for (i = 0; i < stream->num_wb_info; i++) 791 stream->writeback_info[i].wb_enabled = false; 792 } 793 794 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, 795 struct dc_stream_state *stream, bool lock) 796 { 797 int i = 0; 798 799 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 800 if (dc->hwss.interdependent_update_lock) 801 dc->hwss.interdependent_update_lock(dc, context, lock); 802 else { 803 for (i = 0; i < dc->res_pool->pipe_count; i++) { 804 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 805 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 806 807 // Copied conditions that were previously in dce110_apply_ctx_for_surface 808 if (stream == pipe_ctx->stream) { 809 if (!pipe_ctx->top_pipe && 810 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 811 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 812 } 813 } 814 } 815 } 816 817 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 818 { 819 int i, j; 820 struct dc_state *dangling_context = dc_create_state(dc); 821 struct dc_state *current_ctx; 822 823 if (dangling_context == NULL) 824 return; 825 826 dc_resource_state_copy_construct(dc->current_state, dangling_context); 827 828 for (i = 0; i < dc->res_pool->pipe_count; i++) { 829 struct dc_stream_state *old_stream = 830 dc->current_state->res_ctx.pipe_ctx[i].stream; 831 bool should_disable = true; 832 833 for (j = 0; j < context->stream_count; j++) { 834 if (old_stream == context->streams[j]) { 835 should_disable = false; 836 break; 837 } 838 } 839 if (should_disable && old_stream) { 840 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 841 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 842 843 if (dc->hwss.apply_ctx_for_surface) { 844 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 845 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 846 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 847 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 848 } 849 if (dc->hwss.program_front_end_for_ctx) { 850 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 851 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 852 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 853 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 854 } 855 } 856 } 857 858 current_ctx = dc->current_state; 859 dc->current_state = dangling_context; 860 dc_release_state(current_ctx); 861 } 862 863 static void disable_vbios_mode_if_required( 864 struct dc *dc, 865 struct dc_state *context) 866 { 867 unsigned int i, j; 868 869 /* check if timing_changed, disable stream*/ 870 for (i = 0; i < dc->res_pool->pipe_count; i++) { 871 struct dc_stream_state *stream = NULL; 872 struct dc_link *link = NULL; 873 struct pipe_ctx *pipe = NULL; 874 875 pipe = &context->res_ctx.pipe_ctx[i]; 876 stream = pipe->stream; 877 if (stream == NULL) 878 continue; 879 880 // only looking for first odm pipe 881 if (pipe->prev_odm_pipe) 882 continue; 883 884 if (stream->link->local_sink && 885 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 886 link = stream->link; 887 } 888 889 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 890 unsigned int enc_inst, tg_inst = 0; 891 unsigned int pix_clk_100hz; 892 893 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 894 if (enc_inst != ENGINE_ID_UNKNOWN) { 895 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 896 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 897 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 898 dc->res_pool->stream_enc[j]); 899 break; 900 } 901 } 902 903 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 904 dc->res_pool->dp_clock_source, 905 tg_inst, &pix_clk_100hz); 906 907 if (link->link_status.link_active) { 908 uint32_t requested_pix_clk_100hz = 909 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 910 911 if (pix_clk_100hz != requested_pix_clk_100hz) { 912 core_link_disable_stream(pipe); 913 pipe->stream->dpms_off = false; 914 } 915 } 916 } 917 } 918 } 919 } 920 921 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 922 { 923 int i; 924 PERF_TRACE(); 925 for (i = 0; i < MAX_PIPES; i++) { 926 int count = 0; 927 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 928 929 if (!pipe->plane_state) 930 continue; 931 932 /* Timeout 100 ms */ 933 while (count < 100000) { 934 /* Must set to false to start with, due to OR in update function */ 935 pipe->plane_state->status.is_flip_pending = false; 936 dc->hwss.update_pending_status(pipe); 937 if (!pipe->plane_state->status.is_flip_pending) 938 break; 939 udelay(1); 940 count++; 941 } 942 ASSERT(!pipe->plane_state->status.is_flip_pending); 943 } 944 PERF_TRACE(); 945 } 946 947 /******************************************************************************* 948 * Public functions 949 ******************************************************************************/ 950 951 struct dc *dc_create(const struct dc_init_data *init_params) 952 { 953 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 954 unsigned int full_pipe_count; 955 956 if (!dc) 957 return NULL; 958 959 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 960 if (!dc_construct_ctx(dc, init_params)) 961 goto destruct_dc; 962 } else { 963 if (!dc_construct(dc, init_params)) 964 goto destruct_dc; 965 966 full_pipe_count = dc->res_pool->pipe_count; 967 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 968 full_pipe_count--; 969 dc->caps.max_streams = min( 970 full_pipe_count, 971 dc->res_pool->stream_enc_count); 972 973 dc->optimize_seamless_boot_streams = 0; 974 dc->caps.max_links = dc->link_count; 975 dc->caps.max_audios = dc->res_pool->audio_count; 976 dc->caps.linear_pitch_alignment = 64; 977 978 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 979 980 if (dc->res_pool->dmcu != NULL) 981 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 982 } 983 984 /* Populate versioning information */ 985 dc->versions.dc_ver = DC_VER; 986 987 dc->build_id = DC_BUILD_ID; 988 989 DC_LOG_DC("Display Core initialized\n"); 990 991 992 993 return dc; 994 995 destruct_dc: 996 dc_destruct(dc); 997 kfree(dc); 998 return NULL; 999 } 1000 1001 static void detect_edp_presence(struct dc *dc) 1002 { 1003 struct dc_link *edp_link = get_edp_link(dc); 1004 bool edp_sink_present = true; 1005 1006 if (!edp_link) 1007 return; 1008 1009 if (dc->config.edp_not_connected) { 1010 edp_sink_present = false; 1011 } else { 1012 enum dc_connection_type type; 1013 dc_link_detect_sink(edp_link, &type); 1014 if (type == dc_connection_none) 1015 edp_sink_present = false; 1016 } 1017 1018 edp_link->edp_sink_present = edp_sink_present; 1019 } 1020 1021 void dc_hardware_init(struct dc *dc) 1022 { 1023 1024 detect_edp_presence(dc); 1025 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1026 dc->hwss.init_hw(dc); 1027 } 1028 1029 void dc_init_callbacks(struct dc *dc, 1030 const struct dc_callback_init *init_params) 1031 { 1032 #ifdef CONFIG_DRM_AMD_DC_HDCP 1033 dc->ctx->cp_psp = init_params->cp_psp; 1034 #endif 1035 } 1036 1037 void dc_deinit_callbacks(struct dc *dc) 1038 { 1039 #ifdef CONFIG_DRM_AMD_DC_HDCP 1040 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1041 #endif 1042 } 1043 1044 void dc_destroy(struct dc **dc) 1045 { 1046 dc_destruct(*dc); 1047 kfree(*dc); 1048 *dc = NULL; 1049 } 1050 1051 static void enable_timing_multisync( 1052 struct dc *dc, 1053 struct dc_state *ctx) 1054 { 1055 int i = 0, multisync_count = 0; 1056 int pipe_count = dc->res_pool->pipe_count; 1057 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1058 1059 for (i = 0; i < pipe_count; i++) { 1060 if (!ctx->res_ctx.pipe_ctx[i].stream || 1061 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1062 continue; 1063 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1064 continue; 1065 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1066 multisync_count++; 1067 } 1068 1069 if (multisync_count > 0) { 1070 dc->hwss.enable_per_frame_crtc_position_reset( 1071 dc, multisync_count, multisync_pipes); 1072 } 1073 } 1074 1075 static void program_timing_sync( 1076 struct dc *dc, 1077 struct dc_state *ctx) 1078 { 1079 int i, j, k; 1080 int group_index = 0; 1081 int num_group = 0; 1082 int pipe_count = dc->res_pool->pipe_count; 1083 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1084 1085 for (i = 0; i < pipe_count; i++) { 1086 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe) 1087 continue; 1088 1089 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1090 } 1091 1092 for (i = 0; i < pipe_count; i++) { 1093 int group_size = 1; 1094 struct pipe_ctx *pipe_set[MAX_PIPES]; 1095 1096 if (!unsynced_pipes[i]) 1097 continue; 1098 1099 pipe_set[0] = unsynced_pipes[i]; 1100 unsynced_pipes[i] = NULL; 1101 1102 /* Add tg to the set, search rest of the tg's for ones with 1103 * same timing, add all tgs with same timing to the group 1104 */ 1105 for (j = i + 1; j < pipe_count; j++) { 1106 if (!unsynced_pipes[j]) 1107 continue; 1108 1109 if (resource_are_streams_timing_synchronizable( 1110 unsynced_pipes[j]->stream, 1111 pipe_set[0]->stream)) { 1112 pipe_set[group_size] = unsynced_pipes[j]; 1113 unsynced_pipes[j] = NULL; 1114 group_size++; 1115 } 1116 } 1117 1118 /* set first unblanked pipe as master */ 1119 for (j = 0; j < group_size; j++) { 1120 bool is_blanked; 1121 1122 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1123 is_blanked = 1124 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1125 else 1126 is_blanked = 1127 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1128 if (!is_blanked) { 1129 if (j == 0) 1130 break; 1131 1132 swap(pipe_set[0], pipe_set[j]); 1133 break; 1134 } 1135 } 1136 1137 1138 for (k = 0; k < group_size; k++) { 1139 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1140 1141 status->timing_sync_info.group_id = num_group; 1142 status->timing_sync_info.group_size = group_size; 1143 if (k == 0) 1144 status->timing_sync_info.master = true; 1145 else 1146 status->timing_sync_info.master = false; 1147 1148 } 1149 /* remove any other unblanked pipes as they have already been synced */ 1150 for (j = j + 1; j < group_size; j++) { 1151 bool is_blanked; 1152 1153 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1154 is_blanked = 1155 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1156 else 1157 is_blanked = 1158 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1159 if (!is_blanked) { 1160 group_size--; 1161 pipe_set[j] = pipe_set[group_size]; 1162 j--; 1163 } 1164 } 1165 1166 if (group_size > 1) { 1167 dc->hwss.enable_timing_synchronization( 1168 dc, group_index, group_size, pipe_set); 1169 group_index++; 1170 } 1171 num_group++; 1172 } 1173 } 1174 1175 static bool context_changed( 1176 struct dc *dc, 1177 struct dc_state *context) 1178 { 1179 uint8_t i; 1180 1181 if (context->stream_count != dc->current_state->stream_count) 1182 return true; 1183 1184 for (i = 0; i < dc->current_state->stream_count; i++) { 1185 if (dc->current_state->streams[i] != context->streams[i]) 1186 return true; 1187 } 1188 1189 return false; 1190 } 1191 1192 bool dc_validate_seamless_boot_timing(const struct dc *dc, 1193 const struct dc_sink *sink, 1194 struct dc_crtc_timing *crtc_timing) 1195 { 1196 struct timing_generator *tg; 1197 struct stream_encoder *se = NULL; 1198 1199 struct dc_crtc_timing hw_crtc_timing = {0}; 1200 1201 struct dc_link *link = sink->link; 1202 unsigned int i, enc_inst, tg_inst = 0; 1203 1204 // Seamless port only support single DP and EDP so far 1205 if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && 1206 sink->sink_signal != SIGNAL_TYPE_EDP) 1207 return false; 1208 1209 /* Check for enabled DIG to identify enabled display */ 1210 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1211 return false; 1212 1213 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1214 1215 if (enc_inst == ENGINE_ID_UNKNOWN) 1216 return false; 1217 1218 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1219 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1220 1221 se = dc->res_pool->stream_enc[i]; 1222 1223 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1224 dc->res_pool->stream_enc[i]); 1225 break; 1226 } 1227 } 1228 1229 // tg_inst not found 1230 if (i == dc->res_pool->stream_enc_count) 1231 return false; 1232 1233 if (tg_inst >= dc->res_pool->timing_generator_count) 1234 return false; 1235 1236 tg = dc->res_pool->timing_generators[tg_inst]; 1237 1238 if (!tg->funcs->get_hw_timing) 1239 return false; 1240 1241 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1242 return false; 1243 1244 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1245 return false; 1246 1247 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1248 return false; 1249 1250 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1251 return false; 1252 1253 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1254 return false; 1255 1256 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1257 return false; 1258 1259 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1260 return false; 1261 1262 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1263 return false; 1264 1265 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1266 return false; 1267 1268 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1269 return false; 1270 1271 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1272 return false; 1273 1274 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1275 return false; 1276 1277 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1278 return false; 1279 1280 if (dc_is_dp_signal(link->connector_signal)) { 1281 unsigned int pix_clk_100hz; 1282 1283 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1284 dc->res_pool->dp_clock_source, 1285 tg_inst, &pix_clk_100hz); 1286 1287 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1288 return false; 1289 1290 if (!se->funcs->dp_get_pixel_format) 1291 return false; 1292 1293 if (!se->funcs->dp_get_pixel_format( 1294 se, 1295 &hw_crtc_timing.pixel_encoding, 1296 &hw_crtc_timing.display_color_depth)) 1297 return false; 1298 1299 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1300 return false; 1301 1302 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1303 return false; 1304 } 1305 1306 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1307 return false; 1308 } 1309 1310 return true; 1311 } 1312 1313 void dc_enable_stereo( 1314 struct dc *dc, 1315 struct dc_state *context, 1316 struct dc_stream_state *streams[], 1317 uint8_t stream_count) 1318 { 1319 int i, j; 1320 struct pipe_ctx *pipe; 1321 1322 for (i = 0; i < MAX_PIPES; i++) { 1323 if (context != NULL) 1324 pipe = &context->res_ctx.pipe_ctx[i]; 1325 else 1326 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1327 for (j = 0 ; pipe && j < stream_count; j++) { 1328 if (streams[j] && streams[j] == pipe->stream && 1329 dc->hwss.setup_stereo) 1330 dc->hwss.setup_stereo(pipe, dc); 1331 } 1332 } 1333 } 1334 1335 void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1336 { 1337 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1338 enable_timing_multisync(dc, context); 1339 program_timing_sync(dc, context); 1340 } 1341 } 1342 1343 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1344 { 1345 int i; 1346 unsigned int stream_mask = 0; 1347 1348 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1349 if (context->res_ctx.pipe_ctx[i].stream) 1350 stream_mask |= 1 << i; 1351 } 1352 1353 return stream_mask; 1354 } 1355 1356 /* 1357 * Applies given context to HW and copy it into current context. 1358 * It's up to the user to release the src context afterwards. 1359 */ 1360 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1361 { 1362 struct dc_bios *dcb = dc->ctx->dc_bios; 1363 enum dc_status result = DC_ERROR_UNEXPECTED; 1364 struct pipe_ctx *pipe; 1365 int i, k, l; 1366 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1367 1368 #if defined(CONFIG_DRM_AMD_DC_DCN) 1369 dc_allow_idle_optimizations(dc, false); 1370 #endif 1371 1372 for (i = 0; i < context->stream_count; i++) 1373 dc_streams[i] = context->streams[i]; 1374 1375 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1376 disable_vbios_mode_if_required(dc, context); 1377 dc->hwss.enable_accelerated_mode(dc, context); 1378 } 1379 1380 for (i = 0; i < context->stream_count; i++) 1381 if (context->streams[i]->apply_seamless_boot_optimization) 1382 dc->optimize_seamless_boot_streams++; 1383 1384 if (context->stream_count > dc->optimize_seamless_boot_streams || 1385 context->stream_count == 0) 1386 dc->hwss.prepare_bandwidth(dc, context); 1387 1388 disable_dangling_plane(dc, context); 1389 /* re-program planes for existing stream, in case we need to 1390 * free up plane resource for later use 1391 */ 1392 if (dc->hwss.apply_ctx_for_surface) { 1393 for (i = 0; i < context->stream_count; i++) { 1394 if (context->streams[i]->mode_changed) 1395 continue; 1396 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1397 dc->hwss.apply_ctx_for_surface( 1398 dc, context->streams[i], 1399 context->stream_status[i].plane_count, 1400 context); /* use new pipe config in new context */ 1401 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1402 dc->hwss.post_unlock_program_front_end(dc, context); 1403 } 1404 } 1405 1406 /* Program hardware */ 1407 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1408 pipe = &context->res_ctx.pipe_ctx[i]; 1409 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1410 } 1411 1412 result = dc->hwss.apply_ctx_to_hw(dc, context); 1413 1414 if (result != DC_OK) 1415 return result; 1416 1417 dc_trigger_sync(dc, context); 1418 1419 /* Program all planes within new context*/ 1420 if (dc->hwss.program_front_end_for_ctx) { 1421 dc->hwss.interdependent_update_lock(dc, context, true); 1422 dc->hwss.program_front_end_for_ctx(dc, context); 1423 dc->hwss.interdependent_update_lock(dc, context, false); 1424 dc->hwss.post_unlock_program_front_end(dc, context); 1425 } 1426 for (i = 0; i < context->stream_count; i++) { 1427 const struct dc_link *link = context->streams[i]->link; 1428 1429 if (!context->streams[i]->mode_changed) 1430 continue; 1431 1432 if (dc->hwss.apply_ctx_for_surface) { 1433 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1434 dc->hwss.apply_ctx_for_surface( 1435 dc, context->streams[i], 1436 context->stream_status[i].plane_count, 1437 context); 1438 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1439 dc->hwss.post_unlock_program_front_end(dc, context); 1440 } 1441 1442 /* 1443 * enable stereo 1444 * TODO rework dc_enable_stereo call to work with validation sets? 1445 */ 1446 for (k = 0; k < MAX_PIPES; k++) { 1447 pipe = &context->res_ctx.pipe_ctx[k]; 1448 1449 for (l = 0 ; pipe && l < context->stream_count; l++) { 1450 if (context->streams[l] && 1451 context->streams[l] == pipe->stream && 1452 dc->hwss.setup_stereo) 1453 dc->hwss.setup_stereo(pipe, dc); 1454 } 1455 } 1456 1457 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1458 context->streams[i]->timing.h_addressable, 1459 context->streams[i]->timing.v_addressable, 1460 context->streams[i]->timing.h_total, 1461 context->streams[i]->timing.v_total, 1462 context->streams[i]->timing.pix_clk_100hz / 10); 1463 } 1464 1465 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1466 1467 if (context->stream_count > dc->optimize_seamless_boot_streams || 1468 context->stream_count == 0) { 1469 /* Must wait for no flips to be pending before doing optimize bw */ 1470 wait_for_no_pipes_pending(dc, context); 1471 /* pplib is notified if disp_num changed */ 1472 dc->hwss.optimize_bandwidth(dc, context); 1473 } 1474 1475 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1476 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1477 else 1478 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1479 1480 context->stream_mask = get_stream_mask(dc, context); 1481 1482 if (context->stream_mask != dc->current_state->stream_mask) 1483 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1484 1485 for (i = 0; i < context->stream_count; i++) 1486 context->streams[i]->mode_changed = false; 1487 1488 dc_release_state(dc->current_state); 1489 1490 dc->current_state = context; 1491 1492 dc_retain_state(dc->current_state); 1493 1494 return result; 1495 } 1496 1497 bool dc_commit_state(struct dc *dc, struct dc_state *context) 1498 { 1499 enum dc_status result = DC_ERROR_UNEXPECTED; 1500 int i; 1501 1502 if (!context_changed(dc, context)) 1503 return DC_OK; 1504 1505 DC_LOG_DC("%s: %d streams\n", 1506 __func__, context->stream_count); 1507 1508 for (i = 0; i < context->stream_count; i++) { 1509 struct dc_stream_state *stream = context->streams[i]; 1510 1511 dc_stream_log(dc, stream); 1512 } 1513 1514 result = dc_commit_state_no_check(dc, context); 1515 1516 return (result == DC_OK); 1517 } 1518 1519 #if defined(CONFIG_DRM_AMD_DC_DCN) 1520 bool dc_acquire_release_mpc_3dlut( 1521 struct dc *dc, bool acquire, 1522 struct dc_stream_state *stream, 1523 struct dc_3dlut **lut, 1524 struct dc_transfer_func **shaper) 1525 { 1526 int pipe_idx; 1527 bool ret = false; 1528 bool found_pipe_idx = false; 1529 const struct resource_pool *pool = dc->res_pool; 1530 struct resource_context *res_ctx = &dc->current_state->res_ctx; 1531 int mpcc_id = 0; 1532 1533 if (pool && res_ctx) { 1534 if (acquire) { 1535 /*find pipe idx for the given stream*/ 1536 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 1537 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 1538 found_pipe_idx = true; 1539 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 1540 break; 1541 } 1542 } 1543 } else 1544 found_pipe_idx = true;/*for release pipe_idx is not required*/ 1545 1546 if (found_pipe_idx) { 1547 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 1548 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 1549 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 1550 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 1551 } 1552 } 1553 return ret; 1554 } 1555 #endif 1556 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 1557 { 1558 int i; 1559 struct pipe_ctx *pipe; 1560 1561 for (i = 0; i < MAX_PIPES; i++) { 1562 pipe = &context->res_ctx.pipe_ctx[i]; 1563 1564 if (!pipe->plane_state) 1565 continue; 1566 1567 /* Must set to false to start with, due to OR in update function */ 1568 pipe->plane_state->status.is_flip_pending = false; 1569 dc->hwss.update_pending_status(pipe); 1570 if (pipe->plane_state->status.is_flip_pending) 1571 return true; 1572 } 1573 return false; 1574 } 1575 1576 void dc_post_update_surfaces_to_stream(struct dc *dc) 1577 { 1578 int i; 1579 struct dc_state *context = dc->current_state; 1580 1581 if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0) 1582 return; 1583 1584 post_surface_trace(dc); 1585 1586 if (is_flip_pending_in_pipes(dc, context)) 1587 return; 1588 1589 for (i = 0; i < dc->res_pool->pipe_count; i++) 1590 if (context->res_ctx.pipe_ctx[i].stream == NULL || 1591 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 1592 context->res_ctx.pipe_ctx[i].pipe_idx = i; 1593 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 1594 } 1595 1596 dc->hwss.optimize_bandwidth(dc, context); 1597 1598 dc->optimized_required = false; 1599 dc->wm_optimized_required = false; 1600 } 1601 1602 static void init_state(struct dc *dc, struct dc_state *context) 1603 { 1604 /* Each context must have their own instance of VBA and in order to 1605 * initialize and obtain IP and SOC the base DML instance from DC is 1606 * initially copied into every context 1607 */ 1608 #ifdef CONFIG_DRM_AMD_DC_DCN 1609 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 1610 #endif 1611 } 1612 1613 struct dc_state *dc_create_state(struct dc *dc) 1614 { 1615 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 1616 GFP_KERNEL); 1617 1618 if (!context) 1619 return NULL; 1620 1621 init_state(dc, context); 1622 1623 kref_init(&context->refcount); 1624 1625 return context; 1626 } 1627 1628 struct dc_state *dc_copy_state(struct dc_state *src_ctx) 1629 { 1630 int i, j; 1631 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 1632 1633 if (!new_ctx) 1634 return NULL; 1635 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 1636 1637 for (i = 0; i < MAX_PIPES; i++) { 1638 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 1639 1640 if (cur_pipe->top_pipe) 1641 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 1642 1643 if (cur_pipe->bottom_pipe) 1644 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 1645 1646 if (cur_pipe->prev_odm_pipe) 1647 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 1648 1649 if (cur_pipe->next_odm_pipe) 1650 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 1651 1652 } 1653 1654 for (i = 0; i < new_ctx->stream_count; i++) { 1655 dc_stream_retain(new_ctx->streams[i]); 1656 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 1657 dc_plane_state_retain( 1658 new_ctx->stream_status[i].plane_states[j]); 1659 } 1660 1661 kref_init(&new_ctx->refcount); 1662 1663 return new_ctx; 1664 } 1665 1666 void dc_retain_state(struct dc_state *context) 1667 { 1668 kref_get(&context->refcount); 1669 } 1670 1671 static void dc_state_free(struct kref *kref) 1672 { 1673 struct dc_state *context = container_of(kref, struct dc_state, refcount); 1674 dc_resource_state_destruct(context); 1675 kvfree(context); 1676 } 1677 1678 void dc_release_state(struct dc_state *context) 1679 { 1680 kref_put(&context->refcount, dc_state_free); 1681 } 1682 1683 bool dc_set_generic_gpio_for_stereo(bool enable, 1684 struct gpio_service *gpio_service) 1685 { 1686 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 1687 struct gpio_pin_info pin_info; 1688 struct gpio *generic; 1689 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 1690 GFP_KERNEL); 1691 1692 if (!config) 1693 return false; 1694 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 1695 1696 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 1697 kfree(config); 1698 return false; 1699 } else { 1700 generic = dal_gpio_service_create_generic_mux( 1701 gpio_service, 1702 pin_info.offset, 1703 pin_info.mask); 1704 } 1705 1706 if (!generic) { 1707 kfree(config); 1708 return false; 1709 } 1710 1711 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 1712 1713 config->enable_output_from_mux = enable; 1714 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 1715 1716 if (gpio_result == GPIO_RESULT_OK) 1717 gpio_result = dal_mux_setup_config(generic, config); 1718 1719 if (gpio_result == GPIO_RESULT_OK) { 1720 dal_gpio_close(generic); 1721 dal_gpio_destroy_generic_mux(&generic); 1722 kfree(config); 1723 return true; 1724 } else { 1725 dal_gpio_close(generic); 1726 dal_gpio_destroy_generic_mux(&generic); 1727 kfree(config); 1728 return false; 1729 } 1730 } 1731 1732 static bool is_surface_in_context( 1733 const struct dc_state *context, 1734 const struct dc_plane_state *plane_state) 1735 { 1736 int j; 1737 1738 for (j = 0; j < MAX_PIPES; j++) { 1739 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1740 1741 if (plane_state == pipe_ctx->plane_state) { 1742 return true; 1743 } 1744 } 1745 1746 return false; 1747 } 1748 1749 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 1750 { 1751 union surface_update_flags *update_flags = &u->surface->update_flags; 1752 enum surface_update_type update_type = UPDATE_TYPE_FAST; 1753 1754 if (!u->plane_info) 1755 return UPDATE_TYPE_FAST; 1756 1757 if (u->plane_info->color_space != u->surface->color_space) { 1758 update_flags->bits.color_space_change = 1; 1759 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1760 } 1761 1762 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 1763 update_flags->bits.horizontal_mirror_change = 1; 1764 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1765 } 1766 1767 if (u->plane_info->rotation != u->surface->rotation) { 1768 update_flags->bits.rotation_change = 1; 1769 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 1770 } 1771 1772 if (u->plane_info->format != u->surface->format) { 1773 update_flags->bits.pixel_format_change = 1; 1774 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 1775 } 1776 1777 if (u->plane_info->stereo_format != u->surface->stereo_format) { 1778 update_flags->bits.stereo_format_change = 1; 1779 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 1780 } 1781 1782 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 1783 update_flags->bits.per_pixel_alpha_change = 1; 1784 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1785 } 1786 1787 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 1788 update_flags->bits.global_alpha_change = 1; 1789 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1790 } 1791 1792 if (u->plane_info->dcc.enable != u->surface->dcc.enable 1793 || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks 1794 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 1795 update_flags->bits.dcc_change = 1; 1796 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1797 } 1798 1799 if (resource_pixel_format_to_bpp(u->plane_info->format) != 1800 resource_pixel_format_to_bpp(u->surface->format)) { 1801 /* different bytes per element will require full bandwidth 1802 * and DML calculation 1803 */ 1804 update_flags->bits.bpp_change = 1; 1805 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 1806 } 1807 1808 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 1809 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 1810 update_flags->bits.plane_size_change = 1; 1811 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1812 } 1813 1814 1815 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 1816 sizeof(union dc_tiling_info)) != 0) { 1817 update_flags->bits.swizzle_change = 1; 1818 elevate_update_type(&update_type, UPDATE_TYPE_MED); 1819 1820 /* todo: below are HW dependent, we should add a hook to 1821 * DCE/N resource and validated there. 1822 */ 1823 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 1824 /* swizzled mode requires RQ to be setup properly, 1825 * thus need to run DML to calculate RQ settings 1826 */ 1827 update_flags->bits.bandwidth_change = 1; 1828 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 1829 } 1830 } 1831 1832 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 1833 return update_type; 1834 } 1835 1836 static enum surface_update_type get_scaling_info_update_type( 1837 const struct dc_surface_update *u) 1838 { 1839 union surface_update_flags *update_flags = &u->surface->update_flags; 1840 1841 if (!u->scaling_info) 1842 return UPDATE_TYPE_FAST; 1843 1844 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 1845 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 1846 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 1847 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 1848 || u->scaling_info->scaling_quality.integer_scaling != 1849 u->surface->scaling_quality.integer_scaling 1850 ) { 1851 update_flags->bits.scaling_change = 1; 1852 1853 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 1854 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 1855 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 1856 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 1857 /* Making dst rect smaller requires a bandwidth change */ 1858 update_flags->bits.bandwidth_change = 1; 1859 } 1860 1861 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 1862 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 1863 1864 update_flags->bits.scaling_change = 1; 1865 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 1866 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 1867 /* Making src rect bigger requires a bandwidth change */ 1868 update_flags->bits.clock_change = 1; 1869 } 1870 1871 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 1872 || u->scaling_info->src_rect.y != u->surface->src_rect.y 1873 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 1874 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 1875 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 1876 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 1877 update_flags->bits.position_change = 1; 1878 1879 if (update_flags->bits.clock_change 1880 || update_flags->bits.bandwidth_change 1881 || update_flags->bits.scaling_change) 1882 return UPDATE_TYPE_FULL; 1883 1884 if (update_flags->bits.position_change) 1885 return UPDATE_TYPE_MED; 1886 1887 return UPDATE_TYPE_FAST; 1888 } 1889 1890 static enum surface_update_type det_surface_update(const struct dc *dc, 1891 const struct dc_surface_update *u) 1892 { 1893 const struct dc_state *context = dc->current_state; 1894 enum surface_update_type type; 1895 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 1896 union surface_update_flags *update_flags = &u->surface->update_flags; 1897 1898 if (u->flip_addr) 1899 update_flags->bits.addr_update = 1; 1900 1901 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 1902 update_flags->raw = 0xFFFFFFFF; 1903 return UPDATE_TYPE_FULL; 1904 } 1905 1906 update_flags->raw = 0; // Reset all flags 1907 1908 type = get_plane_info_update_type(u); 1909 elevate_update_type(&overall_type, type); 1910 1911 type = get_scaling_info_update_type(u); 1912 elevate_update_type(&overall_type, type); 1913 1914 if (u->flip_addr) 1915 update_flags->bits.addr_update = 1; 1916 1917 if (u->in_transfer_func) 1918 update_flags->bits.in_transfer_func_change = 1; 1919 1920 if (u->input_csc_color_matrix) 1921 update_flags->bits.input_csc_change = 1; 1922 1923 if (u->coeff_reduction_factor) 1924 update_flags->bits.coeff_reduction_change = 1; 1925 1926 if (u->gamut_remap_matrix) 1927 update_flags->bits.gamut_remap_change = 1; 1928 1929 if (u->gamma) { 1930 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 1931 1932 if (u->plane_info) 1933 format = u->plane_info->format; 1934 else if (u->surface) 1935 format = u->surface->format; 1936 1937 if (dce_use_lut(format)) 1938 update_flags->bits.gamma_change = 1; 1939 } 1940 1941 if (u->hdr_mult.value) 1942 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 1943 update_flags->bits.hdr_mult = 1; 1944 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 1945 } 1946 1947 if (update_flags->bits.in_transfer_func_change) { 1948 type = UPDATE_TYPE_MED; 1949 elevate_update_type(&overall_type, type); 1950 } 1951 1952 if (update_flags->bits.input_csc_change 1953 || update_flags->bits.coeff_reduction_change 1954 || update_flags->bits.gamma_change 1955 || update_flags->bits.gamut_remap_change) { 1956 type = UPDATE_TYPE_FULL; 1957 elevate_update_type(&overall_type, type); 1958 } 1959 1960 return overall_type; 1961 } 1962 1963 static enum surface_update_type check_update_surfaces_for_stream( 1964 struct dc *dc, 1965 struct dc_surface_update *updates, 1966 int surface_count, 1967 struct dc_stream_update *stream_update, 1968 const struct dc_stream_status *stream_status) 1969 { 1970 int i; 1971 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 1972 1973 #if defined(CONFIG_DRM_AMD_DC_DCN) 1974 if (dc->idle_optimizations_allowed) 1975 overall_type = UPDATE_TYPE_FULL; 1976 1977 #endif 1978 if (stream_status == NULL || stream_status->plane_count != surface_count) 1979 overall_type = UPDATE_TYPE_FULL; 1980 1981 /* some stream updates require passive update */ 1982 if (stream_update) { 1983 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 1984 1985 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 1986 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 1987 stream_update->integer_scaling_update) 1988 su_flags->bits.scaling = 1; 1989 1990 if (stream_update->out_transfer_func) 1991 su_flags->bits.out_tf = 1; 1992 1993 if (stream_update->abm_level) 1994 su_flags->bits.abm_level = 1; 1995 1996 if (stream_update->dpms_off) 1997 su_flags->bits.dpms_off = 1; 1998 1999 if (stream_update->gamut_remap) 2000 su_flags->bits.gamut_remap = 1; 2001 2002 if (stream_update->wb_update) 2003 su_flags->bits.wb_update = 1; 2004 2005 if (stream_update->dsc_config) 2006 su_flags->bits.dsc_changed = 1; 2007 2008 if (su_flags->raw != 0) 2009 overall_type = UPDATE_TYPE_FULL; 2010 2011 if (stream_update->output_csc_transform || stream_update->output_color_space) 2012 su_flags->bits.out_csc = 1; 2013 } 2014 2015 for (i = 0 ; i < surface_count; i++) { 2016 enum surface_update_type type = 2017 det_surface_update(dc, &updates[i]); 2018 2019 elevate_update_type(&overall_type, type); 2020 } 2021 2022 return overall_type; 2023 } 2024 2025 /* 2026 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2027 * 2028 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2029 */ 2030 enum surface_update_type dc_check_update_surfaces_for_stream( 2031 struct dc *dc, 2032 struct dc_surface_update *updates, 2033 int surface_count, 2034 struct dc_stream_update *stream_update, 2035 const struct dc_stream_status *stream_status) 2036 { 2037 int i; 2038 enum surface_update_type type; 2039 2040 if (stream_update) 2041 stream_update->stream->update_flags.raw = 0; 2042 for (i = 0; i < surface_count; i++) 2043 updates[i].surface->update_flags.raw = 0; 2044 2045 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2046 if (type == UPDATE_TYPE_FULL) { 2047 if (stream_update) { 2048 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2049 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2050 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2051 } 2052 for (i = 0; i < surface_count; i++) 2053 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2054 } 2055 2056 if (type == UPDATE_TYPE_FAST) { 2057 // If there's an available clock comparator, we use that. 2058 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2059 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2060 dc->optimized_required = true; 2061 // Else we fallback to mem compare. 2062 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2063 dc->optimized_required = true; 2064 } 2065 2066 dc->optimized_required |= dc->wm_optimized_required; 2067 } 2068 2069 return type; 2070 } 2071 2072 static struct dc_stream_status *stream_get_status( 2073 struct dc_state *ctx, 2074 struct dc_stream_state *stream) 2075 { 2076 uint8_t i; 2077 2078 for (i = 0; i < ctx->stream_count; i++) { 2079 if (stream == ctx->streams[i]) { 2080 return &ctx->stream_status[i]; 2081 } 2082 } 2083 2084 return NULL; 2085 } 2086 2087 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2088 2089 static void copy_surface_update_to_plane( 2090 struct dc_plane_state *surface, 2091 struct dc_surface_update *srf_update) 2092 { 2093 if (srf_update->flip_addr) { 2094 surface->address = srf_update->flip_addr->address; 2095 surface->flip_immediate = 2096 srf_update->flip_addr->flip_immediate; 2097 surface->time.time_elapsed_in_us[surface->time.index] = 2098 srf_update->flip_addr->flip_timestamp_in_us - 2099 surface->time.prev_update_time_in_us; 2100 surface->time.prev_update_time_in_us = 2101 srf_update->flip_addr->flip_timestamp_in_us; 2102 surface->time.index++; 2103 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2104 surface->time.index = 0; 2105 2106 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2107 } 2108 2109 if (srf_update->scaling_info) { 2110 surface->scaling_quality = 2111 srf_update->scaling_info->scaling_quality; 2112 surface->dst_rect = 2113 srf_update->scaling_info->dst_rect; 2114 surface->src_rect = 2115 srf_update->scaling_info->src_rect; 2116 surface->clip_rect = 2117 srf_update->scaling_info->clip_rect; 2118 } 2119 2120 if (srf_update->plane_info) { 2121 surface->color_space = 2122 srf_update->plane_info->color_space; 2123 surface->format = 2124 srf_update->plane_info->format; 2125 surface->plane_size = 2126 srf_update->plane_info->plane_size; 2127 surface->rotation = 2128 srf_update->plane_info->rotation; 2129 surface->horizontal_mirror = 2130 srf_update->plane_info->horizontal_mirror; 2131 surface->stereo_format = 2132 srf_update->plane_info->stereo_format; 2133 surface->tiling_info = 2134 srf_update->plane_info->tiling_info; 2135 surface->visible = 2136 srf_update->plane_info->visible; 2137 surface->per_pixel_alpha = 2138 srf_update->plane_info->per_pixel_alpha; 2139 surface->global_alpha = 2140 srf_update->plane_info->global_alpha; 2141 surface->global_alpha_value = 2142 srf_update->plane_info->global_alpha_value; 2143 surface->dcc = 2144 srf_update->plane_info->dcc; 2145 surface->layer_index = 2146 srf_update->plane_info->layer_index; 2147 } 2148 2149 if (srf_update->gamma && 2150 (surface->gamma_correction != 2151 srf_update->gamma)) { 2152 memcpy(&surface->gamma_correction->entries, 2153 &srf_update->gamma->entries, 2154 sizeof(struct dc_gamma_entries)); 2155 surface->gamma_correction->is_identity = 2156 srf_update->gamma->is_identity; 2157 surface->gamma_correction->num_entries = 2158 srf_update->gamma->num_entries; 2159 surface->gamma_correction->type = 2160 srf_update->gamma->type; 2161 } 2162 2163 if (srf_update->in_transfer_func && 2164 (surface->in_transfer_func != 2165 srf_update->in_transfer_func)) { 2166 surface->in_transfer_func->sdr_ref_white_level = 2167 srf_update->in_transfer_func->sdr_ref_white_level; 2168 surface->in_transfer_func->tf = 2169 srf_update->in_transfer_func->tf; 2170 surface->in_transfer_func->type = 2171 srf_update->in_transfer_func->type; 2172 memcpy(&surface->in_transfer_func->tf_pts, 2173 &srf_update->in_transfer_func->tf_pts, 2174 sizeof(struct dc_transfer_func_distributed_points)); 2175 } 2176 2177 if (srf_update->func_shaper && 2178 (surface->in_shaper_func != 2179 srf_update->func_shaper)) 2180 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2181 sizeof(*surface->in_shaper_func)); 2182 2183 if (srf_update->lut3d_func && 2184 (surface->lut3d_func != 2185 srf_update->lut3d_func)) 2186 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2187 sizeof(*surface->lut3d_func)); 2188 2189 if (srf_update->hdr_mult.value) 2190 surface->hdr_mult = 2191 srf_update->hdr_mult; 2192 2193 if (srf_update->blend_tf && 2194 (surface->blend_tf != 2195 srf_update->blend_tf)) 2196 memcpy(surface->blend_tf, srf_update->blend_tf, 2197 sizeof(*surface->blend_tf)); 2198 2199 if (srf_update->input_csc_color_matrix) 2200 surface->input_csc_color_matrix = 2201 *srf_update->input_csc_color_matrix; 2202 2203 if (srf_update->coeff_reduction_factor) 2204 surface->coeff_reduction_factor = 2205 *srf_update->coeff_reduction_factor; 2206 2207 if (srf_update->gamut_remap_matrix) 2208 surface->gamut_remap_matrix = 2209 *srf_update->gamut_remap_matrix; 2210 } 2211 2212 static void copy_stream_update_to_stream(struct dc *dc, 2213 struct dc_state *context, 2214 struct dc_stream_state *stream, 2215 struct dc_stream_update *update) 2216 { 2217 struct dc_context *dc_ctx = dc->ctx; 2218 2219 if (update == NULL || stream == NULL) 2220 return; 2221 2222 if (update->src.height && update->src.width) 2223 stream->src = update->src; 2224 2225 if (update->dst.height && update->dst.width) 2226 stream->dst = update->dst; 2227 2228 if (update->out_transfer_func && 2229 stream->out_transfer_func != update->out_transfer_func) { 2230 stream->out_transfer_func->sdr_ref_white_level = 2231 update->out_transfer_func->sdr_ref_white_level; 2232 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2233 stream->out_transfer_func->type = 2234 update->out_transfer_func->type; 2235 memcpy(&stream->out_transfer_func->tf_pts, 2236 &update->out_transfer_func->tf_pts, 2237 sizeof(struct dc_transfer_func_distributed_points)); 2238 } 2239 2240 if (update->hdr_static_metadata) 2241 stream->hdr_static_metadata = *update->hdr_static_metadata; 2242 2243 if (update->abm_level) 2244 stream->abm_level = *update->abm_level; 2245 2246 if (update->periodic_interrupt0) 2247 stream->periodic_interrupt0 = *update->periodic_interrupt0; 2248 2249 if (update->periodic_interrupt1) 2250 stream->periodic_interrupt1 = *update->periodic_interrupt1; 2251 2252 if (update->gamut_remap) 2253 stream->gamut_remap_matrix = *update->gamut_remap; 2254 2255 /* Note: this being updated after mode set is currently not a use case 2256 * however if it arises OCSC would need to be reprogrammed at the 2257 * minimum 2258 */ 2259 if (update->output_color_space) 2260 stream->output_color_space = *update->output_color_space; 2261 2262 if (update->output_csc_transform) 2263 stream->csc_color_matrix = *update->output_csc_transform; 2264 2265 if (update->vrr_infopacket) 2266 stream->vrr_infopacket = *update->vrr_infopacket; 2267 2268 if (update->dpms_off) 2269 stream->dpms_off = *update->dpms_off; 2270 2271 if (update->vsc_infopacket) 2272 stream->vsc_infopacket = *update->vsc_infopacket; 2273 2274 if (update->vsp_infopacket) 2275 stream->vsp_infopacket = *update->vsp_infopacket; 2276 2277 if (update->dither_option) 2278 stream->dither_option = *update->dither_option; 2279 2280 if (update->pending_test_pattern) 2281 stream->test_pattern = *update->pending_test_pattern; 2282 /* update current stream with writeback info */ 2283 if (update->wb_update) { 2284 int i; 2285 2286 stream->num_wb_info = update->wb_update->num_wb_info; 2287 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2288 for (i = 0; i < stream->num_wb_info; i++) 2289 stream->writeback_info[i] = 2290 update->wb_update->writeback_info[i]; 2291 } 2292 if (update->dsc_config) { 2293 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2294 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2295 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2296 update->dsc_config->num_slices_v != 0); 2297 2298 /* Use temporarry context for validating new DSC config */ 2299 struct dc_state *dsc_validate_context = dc_create_state(dc); 2300 2301 if (dsc_validate_context) { 2302 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2303 2304 stream->timing.dsc_cfg = *update->dsc_config; 2305 stream->timing.flags.DSC = enable_dsc; 2306 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2307 stream->timing.dsc_cfg = old_dsc_cfg; 2308 stream->timing.flags.DSC = old_dsc_enabled; 2309 update->dsc_config = NULL; 2310 } 2311 2312 dc_release_state(dsc_validate_context); 2313 } else { 2314 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2315 update->dsc_config = NULL; 2316 } 2317 } 2318 } 2319 2320 static void commit_planes_do_stream_update(struct dc *dc, 2321 struct dc_stream_state *stream, 2322 struct dc_stream_update *stream_update, 2323 enum surface_update_type update_type, 2324 struct dc_state *context) 2325 { 2326 int j; 2327 bool should_program_abm; 2328 2329 // Stream updates 2330 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2331 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2332 2333 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 2334 2335 if (stream_update->periodic_interrupt0 && 2336 dc->hwss.setup_periodic_interrupt) 2337 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); 2338 2339 if (stream_update->periodic_interrupt1 && 2340 dc->hwss.setup_periodic_interrupt) 2341 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); 2342 2343 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 2344 stream_update->vrr_infopacket || 2345 stream_update->vsc_infopacket || 2346 stream_update->vsp_infopacket) { 2347 resource_build_info_frame(pipe_ctx); 2348 dc->hwss.update_info_frame(pipe_ctx); 2349 } 2350 2351 if (stream_update->hdr_static_metadata && 2352 stream->use_dynamic_meta && 2353 dc->hwss.set_dmdata_attributes && 2354 pipe_ctx->stream->dmdata_address.quad_part != 0) 2355 dc->hwss.set_dmdata_attributes(pipe_ctx); 2356 2357 if (stream_update->gamut_remap) 2358 dc_stream_set_gamut_remap(dc, stream); 2359 2360 if (stream_update->output_csc_transform) 2361 dc_stream_program_csc_matrix(dc, stream); 2362 2363 if (stream_update->dither_option) { 2364 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 2365 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 2366 &pipe_ctx->stream->bit_depth_params); 2367 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 2368 &stream->bit_depth_params, 2369 &stream->clamping); 2370 while (odm_pipe) { 2371 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 2372 &stream->bit_depth_params, 2373 &stream->clamping); 2374 odm_pipe = odm_pipe->next_odm_pipe; 2375 } 2376 } 2377 2378 if (stream_update->pending_test_pattern) { 2379 dc_link_dp_set_test_pattern(stream->link, 2380 stream->test_pattern.type, 2381 stream->test_pattern.color_space, 2382 stream->test_pattern.p_link_settings, 2383 stream->test_pattern.p_custom_pattern, 2384 stream->test_pattern.cust_pattern_size); 2385 } 2386 2387 /* Full fe update*/ 2388 if (update_type == UPDATE_TYPE_FAST) 2389 continue; 2390 2391 if (stream_update->dsc_config) 2392 dp_update_dsc_config(pipe_ctx); 2393 2394 if (stream_update->dpms_off) { 2395 if (*stream_update->dpms_off) { 2396 core_link_disable_stream(pipe_ctx); 2397 /* for dpms, keep acquired resources*/ 2398 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 2399 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 2400 2401 dc->hwss.optimize_bandwidth(dc, dc->current_state); 2402 } else { 2403 if (dc->optimize_seamless_boot_streams == 0) 2404 dc->hwss.prepare_bandwidth(dc, dc->current_state); 2405 2406 core_link_enable_stream(dc->current_state, pipe_ctx); 2407 } 2408 } 2409 2410 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 2411 should_program_abm = true; 2412 2413 // if otg funcs defined check if blanked before programming 2414 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 2415 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 2416 should_program_abm = false; 2417 2418 if (should_program_abm) { 2419 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 2420 dc->hwss.set_abm_immediate_disable(pipe_ctx); 2421 } else { 2422 pipe_ctx->stream_res.abm->funcs->set_abm_level( 2423 pipe_ctx->stream_res.abm, stream->abm_level); 2424 } 2425 } 2426 } 2427 } 2428 } 2429 } 2430 2431 static void commit_planes_for_stream(struct dc *dc, 2432 struct dc_surface_update *srf_updates, 2433 int surface_count, 2434 struct dc_stream_state *stream, 2435 struct dc_stream_update *stream_update, 2436 enum surface_update_type update_type, 2437 struct dc_state *context) 2438 { 2439 int i, j; 2440 struct pipe_ctx *top_pipe_to_program = NULL; 2441 2442 if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) { 2443 /* Optimize seamless boot flag keeps clocks and watermarks high until 2444 * first flip. After first flip, optimization is required to lower 2445 * bandwidth. Important to note that it is expected UEFI will 2446 * only light up a single display on POST, therefore we only expect 2447 * one stream with seamless boot flag set. 2448 */ 2449 if (stream->apply_seamless_boot_optimization) { 2450 stream->apply_seamless_boot_optimization = false; 2451 dc->optimize_seamless_boot_streams--; 2452 2453 if (dc->optimize_seamless_boot_streams == 0) 2454 dc->optimized_required = true; 2455 } 2456 } 2457 2458 if (update_type == UPDATE_TYPE_FULL) { 2459 #if defined(CONFIG_DRM_AMD_DC_DCN) 2460 dc_allow_idle_optimizations(dc, false); 2461 2462 #endif 2463 if (dc->optimize_seamless_boot_streams == 0) 2464 dc->hwss.prepare_bandwidth(dc, context); 2465 2466 context_clock_trace(dc, context); 2467 } 2468 2469 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2470 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2471 2472 if (!pipe_ctx->top_pipe && 2473 !pipe_ctx->prev_odm_pipe && 2474 pipe_ctx->stream && 2475 pipe_ctx->stream == stream) { 2476 top_pipe_to_program = pipe_ctx; 2477 } 2478 } 2479 2480 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 2481 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 2482 if (should_use_dmub_lock(stream->link)) { 2483 union dmub_hw_lock_flags hw_locks = { 0 }; 2484 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 2485 2486 hw_locks.bits.lock_dig = 1; 2487 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 2488 2489 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 2490 true, 2491 &hw_locks, 2492 &inst_flags); 2493 } else 2494 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 2495 top_pipe_to_program->stream_res.tg); 2496 } 2497 2498 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) 2499 dc->hwss.interdependent_update_lock(dc, context, true); 2500 else 2501 /* Lock the top pipe while updating plane addrs, since freesync requires 2502 * plane addr update event triggers to be synchronized. 2503 * top_pipe_to_program is expected to never be NULL 2504 */ 2505 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 2506 2507 // Stream updates 2508 if (stream_update) 2509 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 2510 2511 if (surface_count == 0) { 2512 /* 2513 * In case of turning off screen, no need to program front end a second time. 2514 * just return after program blank. 2515 */ 2516 if (dc->hwss.apply_ctx_for_surface) 2517 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 2518 if (dc->hwss.program_front_end_for_ctx) 2519 dc->hwss.program_front_end_for_ctx(dc, context); 2520 2521 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) 2522 dc->hwss.interdependent_update_lock(dc, context, false); 2523 else 2524 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 2525 2526 dc->hwss.post_unlock_program_front_end(dc, context); 2527 return; 2528 } 2529 2530 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 2531 for (i = 0; i < surface_count; i++) { 2532 struct dc_plane_state *plane_state = srf_updates[i].surface; 2533 /*set logical flag for lock/unlock use*/ 2534 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2535 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2536 if (!pipe_ctx->plane_state) 2537 continue; 2538 if (pipe_ctx->plane_state != plane_state) 2539 continue; 2540 plane_state->triplebuffer_flips = false; 2541 if (update_type == UPDATE_TYPE_FAST && 2542 dc->hwss.program_triplebuffer != NULL && 2543 !plane_state->flip_immediate && dc->debug.enable_tri_buf) { 2544 /*triple buffer for VUpdate only*/ 2545 plane_state->triplebuffer_flips = true; 2546 } 2547 } 2548 } 2549 } 2550 2551 // Update Type FULL, Surface updates 2552 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2553 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2554 2555 if (!pipe_ctx->top_pipe && 2556 !pipe_ctx->prev_odm_pipe && 2557 pipe_ctx->stream && 2558 pipe_ctx->stream == stream) { 2559 struct dc_stream_status *stream_status = NULL; 2560 2561 if (!pipe_ctx->plane_state) 2562 continue; 2563 2564 /* Full fe update*/ 2565 if (update_type == UPDATE_TYPE_FAST) 2566 continue; 2567 2568 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 2569 2570 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2571 /*turn off triple buffer for full update*/ 2572 dc->hwss.program_triplebuffer( 2573 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 2574 } 2575 stream_status = 2576 stream_get_status(context, pipe_ctx->stream); 2577 2578 if (dc->hwss.apply_ctx_for_surface) 2579 dc->hwss.apply_ctx_for_surface( 2580 dc, pipe_ctx->stream, stream_status->plane_count, context); 2581 } 2582 } 2583 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 2584 dc->hwss.program_front_end_for_ctx(dc, context); 2585 #ifdef CONFIG_DRM_AMD_DC_DCN 2586 if (dc->debug.validate_dml_output) { 2587 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2588 struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; 2589 if (cur_pipe.stream == NULL) 2590 continue; 2591 2592 cur_pipe.plane_res.hubp->funcs->validate_dml_output( 2593 cur_pipe.plane_res.hubp, dc->ctx, 2594 &context->res_ctx.pipe_ctx[i].rq_regs, 2595 &context->res_ctx.pipe_ctx[i].dlg_regs, 2596 &context->res_ctx.pipe_ctx[i].ttu_regs); 2597 } 2598 } 2599 #endif 2600 } 2601 2602 // Update Type FAST, Surface updates 2603 if (update_type == UPDATE_TYPE_FAST) { 2604 if (dc->hwss.set_flip_control_gsl) 2605 for (i = 0; i < surface_count; i++) { 2606 struct dc_plane_state *plane_state = srf_updates[i].surface; 2607 2608 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2609 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2610 2611 if (pipe_ctx->stream != stream) 2612 continue; 2613 2614 if (pipe_ctx->plane_state != plane_state) 2615 continue; 2616 2617 // GSL has to be used for flip immediate 2618 dc->hwss.set_flip_control_gsl(pipe_ctx, 2619 plane_state->flip_immediate); 2620 } 2621 } 2622 /* Perform requested Updates */ 2623 for (i = 0; i < surface_count; i++) { 2624 struct dc_plane_state *plane_state = srf_updates[i].surface; 2625 2626 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2627 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2628 2629 if (pipe_ctx->stream != stream) 2630 continue; 2631 2632 if (pipe_ctx->plane_state != plane_state) 2633 continue; 2634 /*program triple buffer after lock based on flip type*/ 2635 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2636 /*only enable triplebuffer for fast_update*/ 2637 dc->hwss.program_triplebuffer( 2638 dc, pipe_ctx, plane_state->triplebuffer_flips); 2639 } 2640 if (srf_updates[i].flip_addr) 2641 dc->hwss.update_plane_addr(dc, pipe_ctx); 2642 } 2643 } 2644 } 2645 2646 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) 2647 dc->hwss.interdependent_update_lock(dc, context, false); 2648 else 2649 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 2650 2651 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 2652 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 2653 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 2654 top_pipe_to_program->stream_res.tg, 2655 CRTC_STATE_VACTIVE); 2656 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 2657 top_pipe_to_program->stream_res.tg, 2658 CRTC_STATE_VBLANK); 2659 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 2660 top_pipe_to_program->stream_res.tg, 2661 CRTC_STATE_VACTIVE); 2662 2663 if (stream && should_use_dmub_lock(stream->link)) { 2664 union dmub_hw_lock_flags hw_locks = { 0 }; 2665 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 2666 2667 hw_locks.bits.lock_dig = 1; 2668 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 2669 2670 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 2671 false, 2672 &hw_locks, 2673 &inst_flags); 2674 } else 2675 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 2676 top_pipe_to_program->stream_res.tg); 2677 } 2678 2679 if (update_type != UPDATE_TYPE_FAST) 2680 dc->hwss.post_unlock_program_front_end(dc, context); 2681 2682 // Fire manual trigger only when bottom plane is flipped 2683 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2684 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2685 2686 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 2687 !pipe_ctx->stream || pipe_ctx->stream != stream || 2688 !pipe_ctx->plane_state->update_flags.bits.addr_update) 2689 continue; 2690 2691 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 2692 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 2693 } 2694 } 2695 2696 void dc_commit_updates_for_stream(struct dc *dc, 2697 struct dc_surface_update *srf_updates, 2698 int surface_count, 2699 struct dc_stream_state *stream, 2700 struct dc_stream_update *stream_update, 2701 struct dc_state *state) 2702 { 2703 const struct dc_stream_status *stream_status; 2704 enum surface_update_type update_type; 2705 struct dc_state *context; 2706 struct dc_context *dc_ctx = dc->ctx; 2707 int i, j; 2708 2709 stream_status = dc_stream_get_status(stream); 2710 context = dc->current_state; 2711 2712 update_type = dc_check_update_surfaces_for_stream( 2713 dc, srf_updates, surface_count, stream_update, stream_status); 2714 2715 if (update_type >= update_surface_trace_level) 2716 update_surface_trace(dc, srf_updates, surface_count); 2717 2718 2719 if (update_type >= UPDATE_TYPE_FULL) { 2720 2721 /* initialize scratch memory for building context */ 2722 context = dc_create_state(dc); 2723 if (context == NULL) { 2724 DC_ERROR("Failed to allocate new validate context!\n"); 2725 return; 2726 } 2727 2728 dc_resource_state_copy_construct(state, context); 2729 2730 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2731 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 2732 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 2733 2734 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 2735 new_pipe->plane_state->force_full_update = true; 2736 } 2737 } 2738 2739 2740 for (i = 0; i < surface_count; i++) { 2741 struct dc_plane_state *surface = srf_updates[i].surface; 2742 2743 copy_surface_update_to_plane(surface, &srf_updates[i]); 2744 2745 if (update_type >= UPDATE_TYPE_MED) { 2746 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2747 struct pipe_ctx *pipe_ctx = 2748 &context->res_ctx.pipe_ctx[j]; 2749 2750 if (pipe_ctx->plane_state != surface) 2751 continue; 2752 2753 resource_build_scaling_params(pipe_ctx); 2754 } 2755 } 2756 } 2757 2758 copy_stream_update_to_stream(dc, context, stream, stream_update); 2759 2760 if (update_type >= UPDATE_TYPE_FULL) { 2761 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 2762 DC_ERROR("Mode validation failed for stream update!\n"); 2763 dc_release_state(context); 2764 return; 2765 } 2766 } 2767 2768 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 2769 2770 commit_planes_for_stream( 2771 dc, 2772 srf_updates, 2773 surface_count, 2774 stream, 2775 stream_update, 2776 update_type, 2777 context); 2778 /*update current_State*/ 2779 if (dc->current_state != context) { 2780 2781 struct dc_state *old = dc->current_state; 2782 2783 dc->current_state = context; 2784 dc_release_state(old); 2785 2786 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2787 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2788 2789 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 2790 pipe_ctx->plane_state->force_full_update = false; 2791 } 2792 } 2793 /*let's use current_state to update watermark etc*/ 2794 if (update_type >= UPDATE_TYPE_FULL) { 2795 dc_post_update_surfaces_to_stream(dc); 2796 2797 if (dc_ctx->dce_version >= DCE_VERSION_MAX) 2798 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 2799 else 2800 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 2801 } 2802 2803 return; 2804 2805 } 2806 2807 uint8_t dc_get_current_stream_count(struct dc *dc) 2808 { 2809 return dc->current_state->stream_count; 2810 } 2811 2812 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 2813 { 2814 if (i < dc->current_state->stream_count) 2815 return dc->current_state->streams[i]; 2816 return NULL; 2817 } 2818 2819 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link) 2820 { 2821 uint8_t i; 2822 struct dc_context *ctx = link->ctx; 2823 2824 for (i = 0; i < ctx->dc->current_state->stream_count; i++) { 2825 if (ctx->dc->current_state->streams[i]->link == link) 2826 return ctx->dc->current_state->streams[i]; 2827 } 2828 2829 return NULL; 2830 } 2831 2832 enum dc_irq_source dc_interrupt_to_irq_source( 2833 struct dc *dc, 2834 uint32_t src_id, 2835 uint32_t ext_id) 2836 { 2837 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 2838 } 2839 2840 /* 2841 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 2842 */ 2843 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 2844 { 2845 2846 if (dc == NULL) 2847 return false; 2848 2849 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 2850 } 2851 2852 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 2853 { 2854 dal_irq_service_ack(dc->res_pool->irqs, src); 2855 } 2856 2857 void dc_power_down_on_boot(struct dc *dc) 2858 { 2859 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 2860 dc->hwss.power_down_on_boot) 2861 dc->hwss.power_down_on_boot(dc); 2862 } 2863 2864 void dc_set_power_state( 2865 struct dc *dc, 2866 enum dc_acpi_cm_power_state power_state) 2867 { 2868 struct kref refcount; 2869 struct display_mode_lib *dml; 2870 2871 switch (power_state) { 2872 case DC_ACPI_CM_POWER_STATE_D0: 2873 dc_resource_state_construct(dc, dc->current_state); 2874 2875 if (dc->ctx->dmub_srv) 2876 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); 2877 2878 dc->hwss.init_hw(dc); 2879 2880 if (dc->hwss.init_sys_ctx != NULL && 2881 dc->vm_pa_config.valid) { 2882 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 2883 } 2884 2885 break; 2886 default: 2887 ASSERT(dc->current_state->stream_count == 0); 2888 /* Zero out the current context so that on resume we start with 2889 * clean state, and dc hw programming optimizations will not 2890 * cause any trouble. 2891 */ 2892 dml = kzalloc(sizeof(struct display_mode_lib), 2893 GFP_KERNEL); 2894 2895 ASSERT(dml); 2896 if (!dml) 2897 return; 2898 2899 /* Preserve refcount */ 2900 refcount = dc->current_state->refcount; 2901 /* Preserve display mode lib */ 2902 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 2903 2904 dc_resource_state_destruct(dc->current_state); 2905 memset(dc->current_state, 0, 2906 sizeof(*dc->current_state)); 2907 2908 dc->current_state->refcount = refcount; 2909 dc->current_state->bw_ctx.dml = *dml; 2910 2911 kfree(dml); 2912 2913 break; 2914 } 2915 } 2916 2917 void dc_resume(struct dc *dc) 2918 { 2919 uint32_t i; 2920 2921 for (i = 0; i < dc->link_count; i++) 2922 core_link_resume(dc->links[i]); 2923 } 2924 2925 bool dc_is_dmcu_initialized(struct dc *dc) 2926 { 2927 struct dmcu *dmcu = dc->res_pool->dmcu; 2928 2929 if (dmcu) 2930 return dmcu->funcs->is_dmcu_initialized(dmcu); 2931 return false; 2932 } 2933 2934 bool dc_submit_i2c( 2935 struct dc *dc, 2936 uint32_t link_index, 2937 struct i2c_command *cmd) 2938 { 2939 2940 struct dc_link *link = dc->links[link_index]; 2941 struct ddc_service *ddc = link->ddc; 2942 return dce_i2c_submit_command( 2943 dc->res_pool, 2944 ddc->ddc_pin, 2945 cmd); 2946 } 2947 2948 bool dc_submit_i2c_oem( 2949 struct dc *dc, 2950 struct i2c_command *cmd) 2951 { 2952 struct ddc_service *ddc = dc->res_pool->oem_device; 2953 return dce_i2c_submit_command( 2954 dc->res_pool, 2955 ddc->ddc_pin, 2956 cmd); 2957 } 2958 2959 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) 2960 { 2961 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { 2962 BREAK_TO_DEBUGGER(); 2963 return false; 2964 } 2965 2966 dc_sink_retain(sink); 2967 2968 dc_link->remote_sinks[dc_link->sink_count] = sink; 2969 dc_link->sink_count++; 2970 2971 return true; 2972 } 2973 2974 /* 2975 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link 2976 * 2977 * EDID length is in bytes 2978 */ 2979 struct dc_sink *dc_link_add_remote_sink( 2980 struct dc_link *link, 2981 const uint8_t *edid, 2982 int len, 2983 struct dc_sink_init_data *init_data) 2984 { 2985 struct dc_sink *dc_sink; 2986 enum dc_edid_status edid_status; 2987 2988 if (len > DC_MAX_EDID_BUFFER_SIZE) { 2989 dm_error("Max EDID buffer size breached!\n"); 2990 return NULL; 2991 } 2992 2993 if (!init_data) { 2994 BREAK_TO_DEBUGGER(); 2995 return NULL; 2996 } 2997 2998 if (!init_data->link) { 2999 BREAK_TO_DEBUGGER(); 3000 return NULL; 3001 } 3002 3003 dc_sink = dc_sink_create(init_data); 3004 3005 if (!dc_sink) 3006 return NULL; 3007 3008 memmove(dc_sink->dc_edid.raw_edid, edid, len); 3009 dc_sink->dc_edid.length = len; 3010 3011 if (!link_add_remote_sink_helper( 3012 link, 3013 dc_sink)) 3014 goto fail_add_sink; 3015 3016 edid_status = dm_helpers_parse_edid_caps( 3017 link->ctx, 3018 &dc_sink->dc_edid, 3019 &dc_sink->edid_caps); 3020 3021 /* 3022 * Treat device as no EDID device if EDID 3023 * parsing fails 3024 */ 3025 if (edid_status != EDID_OK) { 3026 dc_sink->dc_edid.length = 0; 3027 dm_error("Bad EDID, status%d!\n", edid_status); 3028 } 3029 3030 return dc_sink; 3031 3032 fail_add_sink: 3033 dc_sink_release(dc_sink); 3034 return NULL; 3035 } 3036 3037 /* 3038 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link 3039 * 3040 * Note that this just removes the struct dc_sink - it doesn't 3041 * program hardware or alter other members of dc_link 3042 */ 3043 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) 3044 { 3045 int i; 3046 3047 if (!link->sink_count) { 3048 BREAK_TO_DEBUGGER(); 3049 return; 3050 } 3051 3052 for (i = 0; i < link->sink_count; i++) { 3053 if (link->remote_sinks[i] == sink) { 3054 dc_sink_release(sink); 3055 link->remote_sinks[i] = NULL; 3056 3057 /* shrink array to remove empty place */ 3058 while (i < link->sink_count - 1) { 3059 link->remote_sinks[i] = link->remote_sinks[i+1]; 3060 i++; 3061 } 3062 link->remote_sinks[i] = NULL; 3063 link->sink_count--; 3064 return; 3065 } 3066 } 3067 } 3068 3069 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 3070 { 3071 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 3072 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 3073 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 3074 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 3075 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 3076 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 3077 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 3078 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 3079 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 3080 } 3081 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 3082 { 3083 if (dc->hwss.set_clock) 3084 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 3085 return DC_ERROR_UNEXPECTED; 3086 } 3087 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 3088 { 3089 if (dc->hwss.get_clock) 3090 dc->hwss.get_clock(dc, clock_type, clock_cfg); 3091 } 3092 3093 /* enable/disable eDP PSR without specify stream for eDP */ 3094 bool dc_set_psr_allow_active(struct dc *dc, bool enable) 3095 { 3096 int i; 3097 3098 for (i = 0; i < dc->current_state->stream_count ; i++) { 3099 struct dc_link *link; 3100 struct dc_stream_state *stream = dc->current_state->streams[i]; 3101 3102 link = stream->link; 3103 if (!link) 3104 continue; 3105 3106 if (link->psr_settings.psr_feature_enabled) { 3107 if (enable && !link->psr_settings.psr_allow_active) 3108 return dc_link_set_psr_allow_active(link, true, false, false); 3109 else if (!enable && link->psr_settings.psr_allow_active) 3110 return dc_link_set_psr_allow_active(link, false, true, false); 3111 } 3112 } 3113 3114 return true; 3115 } 3116 3117 #if defined(CONFIG_DRM_AMD_DC_DCN) 3118 3119 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 3120 { 3121 if (dc->debug.disable_idle_power_optimizations) 3122 return; 3123 3124 if (allow == dc->idle_optimizations_allowed) 3125 return; 3126 3127 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 3128 dc->idle_optimizations_allowed = allow; 3129 } 3130 3131 /* 3132 * blank all streams, and set min and max memory clock to 3133 * lowest and highest DPM level, respectively 3134 */ 3135 void dc_unlock_memory_clock_frequency(struct dc *dc) 3136 { 3137 unsigned int i; 3138 3139 for (i = 0; i < MAX_PIPES; i++) 3140 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 3141 core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]); 3142 3143 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 3144 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 3145 } 3146 3147 /* 3148 * set min memory clock to the min required for current mode, 3149 * max to maxDPM, and unblank streams 3150 */ 3151 void dc_lock_memory_clock_frequency(struct dc *dc) 3152 { 3153 unsigned int i; 3154 3155 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 3156 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 3157 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 3158 3159 for (i = 0; i < MAX_PIPES; i++) 3160 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 3161 core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); 3162 } 3163 3164 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 3165 struct dc_cursor_attributes *cursor_attr) 3166 { 3167 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 3168 return true; 3169 return false; 3170 } 3171 3172 /* cleanup on driver unload */ 3173 void dc_hardware_release(struct dc *dc) 3174 { 3175 if (dc->hwss.hardware_release) 3176 dc->hwss.hardware_release(dc); 3177 } 3178 #endif 3179