1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25 #include "dm_services.h" 26 27 #include "dc.h" 28 29 #include "core_status.h" 30 #include "core_types.h" 31 #include "hw_sequencer.h" 32 #include "dce/dce_hwseq.h" 33 34 #include "resource.h" 35 36 #include "clock_source.h" 37 #include "dc_bios_types.h" 38 39 #include "bios_parser_interface.h" 40 #include "include/irq_service_interface.h" 41 #include "transform.h" 42 #include "dmcu.h" 43 #include "dpp.h" 44 #include "timing_generator.h" 45 #include "abm.h" 46 #include "virtual/virtual_link_encoder.h" 47 48 #include "link_hwss.h" 49 #include "link_encoder.h" 50 51 #include "dc_link_ddc.h" 52 #include "dm_helpers.h" 53 #include "mem_input.h" 54 #include "hubp.h" 55 #define DC_LOGGER \ 56 dc->ctx->logger 57 58 59 /******************************************************************************* 60 * Private functions 61 ******************************************************************************/ 62 63 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 64 { 65 if (new > *original) 66 *original = new; 67 } 68 69 static void destroy_links(struct dc *dc) 70 { 71 uint32_t i; 72 73 for (i = 0; i < dc->link_count; i++) { 74 if (NULL != dc->links[i]) 75 link_destroy(&dc->links[i]); 76 } 77 } 78 79 static bool create_links( 80 struct dc *dc, 81 uint32_t num_virtual_links) 82 { 83 int i; 84 int connectors_num; 85 struct dc_bios *bios = dc->ctx->dc_bios; 86 87 dc->link_count = 0; 88 89 connectors_num = bios->funcs->get_connectors_number(bios); 90 91 if (connectors_num > ENUM_ID_COUNT) { 92 dm_error( 93 "DC: Number of connectors %d exceeds maximum of %d!\n", 94 connectors_num, 95 ENUM_ID_COUNT); 96 return false; 97 } 98 99 if (connectors_num == 0 && num_virtual_links == 0) { 100 dm_error("DC: Number of connectors is zero!\n"); 101 } 102 103 dm_output_to_console( 104 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 105 __func__, 106 connectors_num, 107 num_virtual_links); 108 109 for (i = 0; i < connectors_num; i++) { 110 struct link_init_data link_init_params = {0}; 111 struct dc_link *link; 112 113 link_init_params.ctx = dc->ctx; 114 /* next BIOS object table connector */ 115 link_init_params.connector_index = i; 116 link_init_params.link_index = dc->link_count; 117 link_init_params.dc = dc; 118 link = link_create(&link_init_params); 119 120 if (link) { 121 dc->links[dc->link_count] = link; 122 link->dc = dc; 123 ++dc->link_count; 124 } 125 } 126 127 for (i = 0; i < num_virtual_links; i++) { 128 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 129 struct encoder_init_data enc_init = {0}; 130 131 if (link == NULL) { 132 BREAK_TO_DEBUGGER(); 133 goto failed_alloc; 134 } 135 136 link->link_index = dc->link_count; 137 dc->links[dc->link_count] = link; 138 dc->link_count++; 139 140 link->ctx = dc->ctx; 141 link->dc = dc; 142 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 143 link->link_id.type = OBJECT_TYPE_CONNECTOR; 144 link->link_id.id = CONNECTOR_ID_VIRTUAL; 145 link->link_id.enum_id = ENUM_ID_1; 146 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 147 148 if (!link->link_enc) { 149 BREAK_TO_DEBUGGER(); 150 goto failed_alloc; 151 } 152 153 link->link_status.dpcd_caps = &link->dpcd_caps; 154 155 enc_init.ctx = dc->ctx; 156 enc_init.channel = CHANNEL_ID_UNKNOWN; 157 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 158 enc_init.transmitter = TRANSMITTER_UNKNOWN; 159 enc_init.connector = link->link_id; 160 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 161 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 162 enc_init.encoder.enum_id = ENUM_ID_1; 163 virtual_link_encoder_construct(link->link_enc, &enc_init); 164 } 165 166 return true; 167 168 failed_alloc: 169 return false; 170 } 171 172 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 173 struct dc_stream_state **streams, int num_streams, 174 int vmin, int vmax) 175 { 176 /* TODO: Support multiple streams */ 177 struct dc_stream_state *stream = streams[0]; 178 int i = 0; 179 bool ret = false; 180 181 for (i = 0; i < MAX_PIPES; i++) { 182 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 183 184 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 185 dc->hwss.set_drr(&pipe, 1, vmin, vmax); 186 187 /* build and update the info frame */ 188 resource_build_info_frame(pipe); 189 dc->hwss.update_info_frame(pipe); 190 191 ret = true; 192 } 193 } 194 return ret; 195 } 196 197 bool dc_stream_get_crtc_position(struct dc *dc, 198 struct dc_stream_state **streams, int num_streams, 199 unsigned int *v_pos, unsigned int *nom_v_pos) 200 { 201 /* TODO: Support multiple streams */ 202 struct dc_stream_state *stream = streams[0]; 203 int i = 0; 204 bool ret = false; 205 struct crtc_position position; 206 207 for (i = 0; i < MAX_PIPES; i++) { 208 struct pipe_ctx *pipe = 209 &dc->current_state->res_ctx.pipe_ctx[i]; 210 211 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 212 dc->hwss.get_position(&pipe, 1, &position); 213 214 *v_pos = position.vertical_count; 215 *nom_v_pos = position.nominal_vcount; 216 ret = true; 217 } 218 } 219 return ret; 220 } 221 222 /** 223 * dc_stream_configure_crc: Configure CRC capture for the given stream. 224 * @dc: DC Object 225 * @stream: The stream to configure CRC on. 226 * @enable: Enable CRC if true, disable otherwise. 227 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 228 * once. 229 * 230 * By default, only CRC0 is configured, and the entire frame is used to 231 * calculate the crc. 232 */ 233 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 234 bool enable, bool continuous) 235 { 236 int i; 237 struct pipe_ctx *pipe; 238 struct crc_params param; 239 struct timing_generator *tg; 240 241 for (i = 0; i < MAX_PIPES; i++) { 242 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 243 if (pipe->stream == stream) 244 break; 245 } 246 /* Stream not found */ 247 if (i == MAX_PIPES) 248 return false; 249 250 /* Always capture the full frame */ 251 param.windowa_x_start = 0; 252 param.windowa_y_start = 0; 253 param.windowa_x_end = pipe->stream->timing.h_addressable; 254 param.windowa_y_end = pipe->stream->timing.v_addressable; 255 param.windowb_x_start = 0; 256 param.windowb_y_start = 0; 257 param.windowb_x_end = pipe->stream->timing.h_addressable; 258 param.windowb_y_end = pipe->stream->timing.v_addressable; 259 260 /* Default to the union of both windows */ 261 param.selection = UNION_WINDOW_A_B; 262 param.continuous_mode = continuous; 263 param.enable = enable; 264 265 tg = pipe->stream_res.tg; 266 267 /* Only call if supported */ 268 if (tg->funcs->configure_crc) 269 return tg->funcs->configure_crc(tg, ¶m); 270 DC_LOG_WARNING("CRC capture not supported."); 271 return false; 272 } 273 274 /** 275 * dc_stream_get_crc: Get CRC values for the given stream. 276 * @dc: DC object 277 * @stream: The DC stream state of the stream to get CRCs from. 278 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here. 279 * 280 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 281 * Return false if stream is not found, or if CRCs are not enabled. 282 */ 283 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 284 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 285 { 286 int i; 287 struct pipe_ctx *pipe; 288 struct timing_generator *tg; 289 290 for (i = 0; i < MAX_PIPES; i++) { 291 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 292 if (pipe->stream == stream) 293 break; 294 } 295 /* Stream not found */ 296 if (i == MAX_PIPES) 297 return false; 298 299 tg = pipe->stream_res.tg; 300 301 if (tg->funcs->get_crc) 302 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 303 DC_LOG_WARNING("CRC capture not supported."); 304 return false; 305 } 306 307 void dc_stream_set_dither_option(struct dc_stream_state *stream, 308 enum dc_dither_option option) 309 { 310 struct bit_depth_reduction_params params; 311 struct dc_link *link = stream->status.link; 312 struct pipe_ctx *pipes = NULL; 313 int i; 314 315 for (i = 0; i < MAX_PIPES; i++) { 316 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 317 stream) { 318 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 319 break; 320 } 321 } 322 323 if (!pipes) 324 return; 325 if (option > DITHER_OPTION_MAX) 326 return; 327 328 stream->dither_option = option; 329 330 memset(¶ms, 0, sizeof(params)); 331 resource_build_bit_depth_reduction_params(stream, ¶ms); 332 stream->bit_depth_params = params; 333 334 if (pipes->plane_res.xfm && 335 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 336 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 337 pipes->plane_res.xfm, 338 pipes->plane_res.scl_data.lb_params.depth, 339 &stream->bit_depth_params); 340 } 341 342 pipes->stream_res.opp->funcs-> 343 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 344 } 345 346 void dc_stream_set_static_screen_events(struct dc *dc, 347 struct dc_stream_state **streams, 348 int num_streams, 349 const struct dc_static_screen_events *events) 350 { 351 int i = 0; 352 int j = 0; 353 struct pipe_ctx *pipes_affected[MAX_PIPES]; 354 int num_pipes_affected = 0; 355 356 for (i = 0; i < num_streams; i++) { 357 struct dc_stream_state *stream = streams[i]; 358 359 for (j = 0; j < MAX_PIPES; j++) { 360 if (dc->current_state->res_ctx.pipe_ctx[j].stream 361 == stream) { 362 pipes_affected[num_pipes_affected++] = 363 &dc->current_state->res_ctx.pipe_ctx[j]; 364 } 365 } 366 } 367 368 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events); 369 } 370 371 static void destruct(struct dc *dc) 372 { 373 dc_release_state(dc->current_state); 374 dc->current_state = NULL; 375 376 destroy_links(dc); 377 378 dc_destroy_resource_pool(dc); 379 380 if (dc->ctx->gpio_service) 381 dal_gpio_service_destroy(&dc->ctx->gpio_service); 382 383 if (dc->ctx->i2caux) 384 dal_i2caux_destroy(&dc->ctx->i2caux); 385 386 if (dc->ctx->created_bios) 387 dal_bios_parser_destroy(&dc->ctx->dc_bios); 388 389 if (dc->ctx->logger) 390 dal_logger_destroy(&dc->ctx->logger); 391 392 kfree(dc->ctx); 393 dc->ctx = NULL; 394 395 kfree(dc->bw_vbios); 396 dc->bw_vbios = NULL; 397 398 kfree(dc->bw_dceip); 399 dc->bw_dceip = NULL; 400 401 #ifdef CONFIG_DRM_AMD_DC_DCN1_0 402 kfree(dc->dcn_soc); 403 dc->dcn_soc = NULL; 404 405 kfree(dc->dcn_ip); 406 dc->dcn_ip = NULL; 407 408 #endif 409 } 410 411 static bool construct(struct dc *dc, 412 const struct dc_init_data *init_params) 413 { 414 struct dal_logger *logger; 415 struct dc_context *dc_ctx; 416 struct bw_calcs_dceip *dc_dceip; 417 struct bw_calcs_vbios *dc_vbios; 418 #ifdef CONFIG_DRM_AMD_DC_DCN1_0 419 struct dcn_soc_bounding_box *dcn_soc; 420 struct dcn_ip_params *dcn_ip; 421 #endif 422 423 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 424 425 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 426 if (!dc_dceip) { 427 dm_error("%s: failed to create dceip\n", __func__); 428 goto fail; 429 } 430 431 dc->bw_dceip = dc_dceip; 432 433 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 434 if (!dc_vbios) { 435 dm_error("%s: failed to create vbios\n", __func__); 436 goto fail; 437 } 438 439 dc->bw_vbios = dc_vbios; 440 #ifdef CONFIG_DRM_AMD_DC_DCN1_0 441 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 442 if (!dcn_soc) { 443 dm_error("%s: failed to create dcn_soc\n", __func__); 444 goto fail; 445 } 446 447 dc->dcn_soc = dcn_soc; 448 449 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 450 if (!dcn_ip) { 451 dm_error("%s: failed to create dcn_ip\n", __func__); 452 goto fail; 453 } 454 455 dc->dcn_ip = dcn_ip; 456 #endif 457 458 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 459 if (!dc_ctx) { 460 dm_error("%s: failed to create ctx\n", __func__); 461 goto fail; 462 } 463 464 dc_ctx->cgs_device = init_params->cgs_device; 465 dc_ctx->driver_context = init_params->driver; 466 dc_ctx->dc = dc; 467 dc_ctx->asic_id = init_params->asic_id; 468 dc->ctx = dc_ctx; 469 470 dc->current_state = dc_create_state(); 471 472 if (!dc->current_state) { 473 dm_error("%s: failed to create validate ctx\n", __func__); 474 goto fail; 475 } 476 477 /* Create logger */ 478 logger = dal_logger_create(dc_ctx, init_params->log_mask); 479 480 if (!logger) { 481 /* can *not* call logger. call base driver 'print error' */ 482 dm_error("%s: failed to create Logger!\n", __func__); 483 goto fail; 484 } 485 dc_ctx->logger = logger; 486 dc_ctx->dce_environment = init_params->dce_environment; 487 488 dc_version = resource_parse_asic_id(init_params->asic_id); 489 dc_ctx->dce_version = dc_version; 490 491 /* Resource should construct all asic specific resources. 492 * This should be the only place where we need to parse the asic id 493 */ 494 if (init_params->vbios_override) 495 dc_ctx->dc_bios = init_params->vbios_override; 496 else { 497 /* Create BIOS parser */ 498 struct bp_init_data bp_init_data; 499 500 bp_init_data.ctx = dc_ctx; 501 bp_init_data.bios = init_params->asic_id.atombios_base_address; 502 503 dc_ctx->dc_bios = dal_bios_parser_create( 504 &bp_init_data, dc_version); 505 506 if (!dc_ctx->dc_bios) { 507 ASSERT_CRITICAL(false); 508 goto fail; 509 } 510 511 dc_ctx->created_bios = true; 512 } 513 514 /* Create I2C AUX */ 515 dc_ctx->i2caux = dal_i2caux_create(dc_ctx); 516 517 if (!dc_ctx->i2caux) { 518 ASSERT_CRITICAL(false); 519 goto fail; 520 } 521 522 /* Create GPIO service */ 523 dc_ctx->gpio_service = dal_gpio_service_create( 524 dc_version, 525 dc_ctx->dce_environment, 526 dc_ctx); 527 528 if (!dc_ctx->gpio_service) { 529 ASSERT_CRITICAL(false); 530 goto fail; 531 } 532 533 dc->res_pool = dc_create_resource_pool( 534 dc, 535 init_params->num_virtual_links, 536 dc_version, 537 init_params->asic_id); 538 if (!dc->res_pool) 539 goto fail; 540 541 dc_resource_state_construct(dc, dc->current_state); 542 543 if (!create_links(dc, init_params->num_virtual_links)) 544 goto fail; 545 546 return true; 547 548 fail: 549 550 destruct(dc); 551 return false; 552 } 553 554 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 555 { 556 int i, j; 557 struct dc_state *dangling_context = dc_create_state(); 558 struct dc_state *current_ctx; 559 560 if (dangling_context == NULL) 561 return; 562 563 dc_resource_state_copy_construct(dc->current_state, dangling_context); 564 565 for (i = 0; i < dc->res_pool->pipe_count; i++) { 566 struct dc_stream_state *old_stream = 567 dc->current_state->res_ctx.pipe_ctx[i].stream; 568 bool should_disable = true; 569 570 for (j = 0; j < context->stream_count; j++) { 571 if (old_stream == context->streams[j]) { 572 should_disable = false; 573 break; 574 } 575 } 576 if (should_disable && old_stream) { 577 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 578 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 579 } 580 } 581 582 current_ctx = dc->current_state; 583 dc->current_state = dangling_context; 584 dc_release_state(current_ctx); 585 } 586 587 /******************************************************************************* 588 * Public functions 589 ******************************************************************************/ 590 591 struct dc *dc_create(const struct dc_init_data *init_params) 592 { 593 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 594 unsigned int full_pipe_count; 595 596 if (NULL == dc) 597 goto alloc_fail; 598 599 if (false == construct(dc, init_params)) 600 goto construct_fail; 601 602 /*TODO: separate HW and SW initialization*/ 603 dc->hwss.init_hw(dc); 604 605 full_pipe_count = dc->res_pool->pipe_count; 606 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 607 full_pipe_count--; 608 dc->caps.max_streams = min( 609 full_pipe_count, 610 dc->res_pool->stream_enc_count); 611 612 dc->caps.max_links = dc->link_count; 613 dc->caps.max_audios = dc->res_pool->audio_count; 614 dc->caps.linear_pitch_alignment = 64; 615 616 /* Populate versioning information */ 617 dc->versions.dc_ver = DC_VER; 618 619 if (dc->res_pool->dmcu != NULL) 620 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 621 622 dc->config = init_params->flags; 623 624 DC_LOG_DC("Display Core initialized\n"); 625 626 627 /* TODO: missing feature to be enabled */ 628 dc->debug.disable_dfs_bypass = true; 629 630 return dc; 631 632 construct_fail: 633 kfree(dc); 634 635 alloc_fail: 636 return NULL; 637 } 638 639 void dc_destroy(struct dc **dc) 640 { 641 destruct(*dc); 642 kfree(*dc); 643 *dc = NULL; 644 } 645 646 static void enable_timing_multisync( 647 struct dc *dc, 648 struct dc_state *ctx) 649 { 650 int i = 0, multisync_count = 0; 651 int pipe_count = dc->res_pool->pipe_count; 652 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 653 654 for (i = 0; i < pipe_count; i++) { 655 if (!ctx->res_ctx.pipe_ctx[i].stream || 656 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 657 continue; 658 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 659 continue; 660 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 661 multisync_count++; 662 } 663 664 if (multisync_count > 0) { 665 dc->hwss.enable_per_frame_crtc_position_reset( 666 dc, multisync_count, multisync_pipes); 667 } 668 } 669 670 static void program_timing_sync( 671 struct dc *dc, 672 struct dc_state *ctx) 673 { 674 int i, j; 675 int group_index = 0; 676 int pipe_count = dc->res_pool->pipe_count; 677 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 678 679 for (i = 0; i < pipe_count; i++) { 680 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe) 681 continue; 682 683 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 684 } 685 686 for (i = 0; i < pipe_count; i++) { 687 int group_size = 1; 688 struct pipe_ctx *pipe_set[MAX_PIPES]; 689 690 if (!unsynced_pipes[i]) 691 continue; 692 693 pipe_set[0] = unsynced_pipes[i]; 694 unsynced_pipes[i] = NULL; 695 696 /* Add tg to the set, search rest of the tg's for ones with 697 * same timing, add all tgs with same timing to the group 698 */ 699 for (j = i + 1; j < pipe_count; j++) { 700 if (!unsynced_pipes[j]) 701 continue; 702 703 if (resource_are_streams_timing_synchronizable( 704 unsynced_pipes[j]->stream, 705 pipe_set[0]->stream)) { 706 pipe_set[group_size] = unsynced_pipes[j]; 707 unsynced_pipes[j] = NULL; 708 group_size++; 709 } 710 } 711 712 /* set first unblanked pipe as master */ 713 for (j = 0; j < group_size; j++) { 714 struct pipe_ctx *temp; 715 716 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) { 717 if (j == 0) 718 break; 719 720 temp = pipe_set[0]; 721 pipe_set[0] = pipe_set[j]; 722 pipe_set[j] = temp; 723 break; 724 } 725 } 726 727 /* remove any other unblanked pipes as they have already been synced */ 728 for (j = j + 1; j < group_size; j++) { 729 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) { 730 group_size--; 731 pipe_set[j] = pipe_set[group_size]; 732 j--; 733 } 734 } 735 736 if (group_size > 1) { 737 dc->hwss.enable_timing_synchronization( 738 dc, group_index, group_size, pipe_set); 739 group_index++; 740 } 741 } 742 } 743 744 static bool context_changed( 745 struct dc *dc, 746 struct dc_state *context) 747 { 748 uint8_t i; 749 750 if (context->stream_count != dc->current_state->stream_count) 751 return true; 752 753 for (i = 0; i < dc->current_state->stream_count; i++) { 754 if (dc->current_state->streams[i] != context->streams[i]) 755 return true; 756 } 757 758 return false; 759 } 760 761 bool dc_enable_stereo( 762 struct dc *dc, 763 struct dc_state *context, 764 struct dc_stream_state *streams[], 765 uint8_t stream_count) 766 { 767 bool ret = true; 768 int i, j; 769 struct pipe_ctx *pipe; 770 771 for (i = 0; i < MAX_PIPES; i++) { 772 if (context != NULL) 773 pipe = &context->res_ctx.pipe_ctx[i]; 774 else 775 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 776 for (j = 0 ; pipe && j < stream_count; j++) { 777 if (streams[j] && streams[j] == pipe->stream && 778 dc->hwss.setup_stereo) 779 dc->hwss.setup_stereo(pipe, dc); 780 } 781 } 782 783 return ret; 784 } 785 786 /* 787 * Applies given context to HW and copy it into current context. 788 * It's up to the user to release the src context afterwards. 789 */ 790 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 791 { 792 struct dc_bios *dcb = dc->ctx->dc_bios; 793 enum dc_status result = DC_ERROR_UNEXPECTED; 794 struct pipe_ctx *pipe; 795 int i, k, l; 796 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 797 798 disable_dangling_plane(dc, context); 799 800 for (i = 0; i < context->stream_count; i++) 801 dc_streams[i] = context->streams[i]; 802 803 if (!dcb->funcs->is_accelerated_mode(dcb)) 804 dc->hwss.enable_accelerated_mode(dc, context); 805 806 dc->hwss.set_bandwidth(dc, context, false); 807 808 /* re-program planes for existing stream, in case we need to 809 * free up plane resource for later use 810 */ 811 for (i = 0; i < context->stream_count; i++) { 812 if (context->streams[i]->mode_changed) 813 continue; 814 815 dc->hwss.apply_ctx_for_surface( 816 dc, context->streams[i], 817 context->stream_status[i].plane_count, 818 context); /* use new pipe config in new context */ 819 } 820 821 /* Program hardware */ 822 dc->hwss.ready_shared_resources(dc, context); 823 824 for (i = 0; i < dc->res_pool->pipe_count; i++) { 825 pipe = &context->res_ctx.pipe_ctx[i]; 826 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 827 } 828 829 result = dc->hwss.apply_ctx_to_hw(dc, context); 830 831 if (result != DC_OK) 832 return result; 833 834 if (context->stream_count > 1) { 835 enable_timing_multisync(dc, context); 836 program_timing_sync(dc, context); 837 } 838 839 /* Program all planes within new context*/ 840 for (i = 0; i < context->stream_count; i++) { 841 const struct dc_sink *sink = context->streams[i]->sink; 842 843 if (!context->streams[i]->mode_changed) 844 continue; 845 846 dc->hwss.apply_ctx_for_surface( 847 dc, context->streams[i], 848 context->stream_status[i].plane_count, 849 context); 850 851 /* 852 * enable stereo 853 * TODO rework dc_enable_stereo call to work with validation sets? 854 */ 855 for (k = 0; k < MAX_PIPES; k++) { 856 pipe = &context->res_ctx.pipe_ctx[k]; 857 858 for (l = 0 ; pipe && l < context->stream_count; l++) { 859 if (context->streams[l] && 860 context->streams[l] == pipe->stream && 861 dc->hwss.setup_stereo) 862 dc->hwss.setup_stereo(pipe, dc); 863 } 864 } 865 866 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}", 867 context->streams[i]->timing.h_addressable, 868 context->streams[i]->timing.v_addressable, 869 context->streams[i]->timing.h_total, 870 context->streams[i]->timing.v_total, 871 context->streams[i]->timing.pix_clk_khz); 872 } 873 874 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 875 876 /* pplib is notified if disp_num changed */ 877 dc->hwss.set_bandwidth(dc, context, true); 878 879 dc_release_state(dc->current_state); 880 881 dc->current_state = context; 882 883 dc_retain_state(dc->current_state); 884 885 dc->hwss.optimize_shared_resources(dc); 886 887 return result; 888 } 889 890 bool dc_commit_state(struct dc *dc, struct dc_state *context) 891 { 892 enum dc_status result = DC_ERROR_UNEXPECTED; 893 int i; 894 895 if (false == context_changed(dc, context)) 896 return DC_OK; 897 898 DC_LOG_DC("%s: %d streams\n", 899 __func__, context->stream_count); 900 901 for (i = 0; i < context->stream_count; i++) { 902 struct dc_stream_state *stream = context->streams[i]; 903 904 dc_stream_log(stream, 905 dc->ctx->logger, 906 LOG_DC); 907 } 908 909 result = dc_commit_state_no_check(dc, context); 910 911 return (result == DC_OK); 912 } 913 914 bool dc_post_update_surfaces_to_stream(struct dc *dc) 915 { 916 int i; 917 struct dc_state *context = dc->current_state; 918 919 post_surface_trace(dc); 920 921 for (i = 0; i < dc->res_pool->pipe_count; i++) 922 if (context->res_ctx.pipe_ctx[i].stream == NULL || 923 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 924 context->res_ctx.pipe_ctx[i].pipe_idx = i; 925 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 926 } 927 928 dc->optimized_required = false; 929 930 /* 3rd param should be true, temp w/a for RV*/ 931 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 932 dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0); 933 #else 934 dc->hwss.set_bandwidth(dc, context, true); 935 #endif 936 return true; 937 } 938 939 /* 940 * TODO this whole function needs to go 941 * 942 * dc_surface_update is needlessly complex. See if we can just replace this 943 * with a dc_plane_state and follow the atomic model a bit more closely here. 944 */ 945 bool dc_commit_planes_to_stream( 946 struct dc *dc, 947 struct dc_plane_state **plane_states, 948 uint8_t new_plane_count, 949 struct dc_stream_state *dc_stream, 950 struct dc_state *state) 951 { 952 /* no need to dynamically allocate this. it's pretty small */ 953 struct dc_surface_update updates[MAX_SURFACES]; 954 struct dc_flip_addrs *flip_addr; 955 struct dc_plane_info *plane_info; 956 struct dc_scaling_info *scaling_info; 957 int i; 958 struct dc_stream_update *stream_update = 959 kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); 960 961 if (!stream_update) { 962 BREAK_TO_DEBUGGER(); 963 return false; 964 } 965 966 flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs), 967 GFP_KERNEL); 968 plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info), 969 GFP_KERNEL); 970 scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info), 971 GFP_KERNEL); 972 973 if (!flip_addr || !plane_info || !scaling_info) { 974 kfree(flip_addr); 975 kfree(plane_info); 976 kfree(scaling_info); 977 kfree(stream_update); 978 return false; 979 } 980 981 memset(updates, 0, sizeof(updates)); 982 983 stream_update->src = dc_stream->src; 984 stream_update->dst = dc_stream->dst; 985 stream_update->out_transfer_func = dc_stream->out_transfer_func; 986 987 for (i = 0; i < new_plane_count; i++) { 988 updates[i].surface = plane_states[i]; 989 updates[i].gamma = 990 (struct dc_gamma *)plane_states[i]->gamma_correction; 991 updates[i].in_transfer_func = plane_states[i]->in_transfer_func; 992 flip_addr[i].address = plane_states[i]->address; 993 flip_addr[i].flip_immediate = plane_states[i]->flip_immediate; 994 plane_info[i].color_space = plane_states[i]->color_space; 995 plane_info[i].input_tf = plane_states[i]->input_tf; 996 plane_info[i].format = plane_states[i]->format; 997 plane_info[i].plane_size = plane_states[i]->plane_size; 998 plane_info[i].rotation = plane_states[i]->rotation; 999 plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror; 1000 plane_info[i].stereo_format = plane_states[i]->stereo_format; 1001 plane_info[i].tiling_info = plane_states[i]->tiling_info; 1002 plane_info[i].visible = plane_states[i]->visible; 1003 plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha; 1004 plane_info[i].dcc = plane_states[i]->dcc; 1005 scaling_info[i].scaling_quality = plane_states[i]->scaling_quality; 1006 scaling_info[i].src_rect = plane_states[i]->src_rect; 1007 scaling_info[i].dst_rect = plane_states[i]->dst_rect; 1008 scaling_info[i].clip_rect = plane_states[i]->clip_rect; 1009 1010 updates[i].flip_addr = &flip_addr[i]; 1011 updates[i].plane_info = &plane_info[i]; 1012 updates[i].scaling_info = &scaling_info[i]; 1013 } 1014 1015 dc_commit_updates_for_stream( 1016 dc, 1017 updates, 1018 new_plane_count, 1019 dc_stream, stream_update, plane_states, state); 1020 1021 kfree(flip_addr); 1022 kfree(plane_info); 1023 kfree(scaling_info); 1024 kfree(stream_update); 1025 return true; 1026 } 1027 1028 struct dc_state *dc_create_state(void) 1029 { 1030 struct dc_state *context = kzalloc(sizeof(struct dc_state), 1031 GFP_KERNEL); 1032 1033 if (!context) 1034 return NULL; 1035 1036 kref_init(&context->refcount); 1037 return context; 1038 } 1039 1040 void dc_retain_state(struct dc_state *context) 1041 { 1042 kref_get(&context->refcount); 1043 } 1044 1045 static void dc_state_free(struct kref *kref) 1046 { 1047 struct dc_state *context = container_of(kref, struct dc_state, refcount); 1048 dc_resource_state_destruct(context); 1049 kfree(context); 1050 } 1051 1052 void dc_release_state(struct dc_state *context) 1053 { 1054 kref_put(&context->refcount, dc_state_free); 1055 } 1056 1057 static bool is_surface_in_context( 1058 const struct dc_state *context, 1059 const struct dc_plane_state *plane_state) 1060 { 1061 int j; 1062 1063 for (j = 0; j < MAX_PIPES; j++) { 1064 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1065 1066 if (plane_state == pipe_ctx->plane_state) { 1067 return true; 1068 } 1069 } 1070 1071 return false; 1072 } 1073 1074 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format) 1075 { 1076 switch (format) { 1077 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: 1078 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: 1079 return 12; 1080 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: 1081 case SURFACE_PIXEL_FORMAT_GRPH_RGB565: 1082 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: 1083 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: 1084 return 16; 1085 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: 1086 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: 1087 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: 1088 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: 1089 return 32; 1090 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: 1091 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: 1092 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: 1093 return 64; 1094 default: 1095 ASSERT_CRITICAL(false); 1096 return -1; 1097 } 1098 } 1099 1100 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 1101 { 1102 union surface_update_flags *update_flags = &u->surface->update_flags; 1103 1104 if (!u->plane_info) 1105 return UPDATE_TYPE_FAST; 1106 1107 if (u->plane_info->color_space != u->surface->color_space) 1108 update_flags->bits.color_space_change = 1; 1109 1110 if (u->plane_info->input_tf != u->surface->input_tf) 1111 update_flags->bits.input_tf_change = 1; 1112 1113 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) 1114 update_flags->bits.horizontal_mirror_change = 1; 1115 1116 if (u->plane_info->rotation != u->surface->rotation) 1117 update_flags->bits.rotation_change = 1; 1118 1119 if (u->plane_info->format != u->surface->format) 1120 update_flags->bits.pixel_format_change = 1; 1121 1122 if (u->plane_info->stereo_format != u->surface->stereo_format) 1123 update_flags->bits.stereo_format_change = 1; 1124 1125 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) 1126 update_flags->bits.per_pixel_alpha_change = 1; 1127 1128 if (u->plane_info->dcc.enable != u->surface->dcc.enable 1129 || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks 1130 || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) 1131 update_flags->bits.dcc_change = 1; 1132 1133 if (pixel_format_to_bpp(u->plane_info->format) != 1134 pixel_format_to_bpp(u->surface->format)) 1135 /* different bytes per element will require full bandwidth 1136 * and DML calculation 1137 */ 1138 update_flags->bits.bpp_change = 1; 1139 1140 if (u->gamma && dce_use_lut(u->plane_info->format)) 1141 update_flags->bits.gamma_change = 1; 1142 1143 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 1144 sizeof(union dc_tiling_info)) != 0) { 1145 update_flags->bits.swizzle_change = 1; 1146 /* todo: below are HW dependent, we should add a hook to 1147 * DCE/N resource and validated there. 1148 */ 1149 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) 1150 /* swizzled mode requires RQ to be setup properly, 1151 * thus need to run DML to calculate RQ settings 1152 */ 1153 update_flags->bits.bandwidth_change = 1; 1154 } 1155 1156 if (update_flags->bits.rotation_change 1157 || update_flags->bits.stereo_format_change 1158 || update_flags->bits.pixel_format_change 1159 || update_flags->bits.gamma_change 1160 || update_flags->bits.bpp_change 1161 || update_flags->bits.bandwidth_change 1162 || update_flags->bits.output_tf_change) 1163 return UPDATE_TYPE_FULL; 1164 1165 return UPDATE_TYPE_MED; 1166 } 1167 1168 static enum surface_update_type get_scaling_info_update_type( 1169 const struct dc_surface_update *u) 1170 { 1171 union surface_update_flags *update_flags = &u->surface->update_flags; 1172 1173 if (!u->scaling_info) 1174 return UPDATE_TYPE_FAST; 1175 1176 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 1177 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 1178 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 1179 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) { 1180 update_flags->bits.scaling_change = 1; 1181 1182 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 1183 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 1184 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 1185 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 1186 /* Making dst rect smaller requires a bandwidth change */ 1187 update_flags->bits.bandwidth_change = 1; 1188 } 1189 1190 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 1191 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 1192 1193 update_flags->bits.scaling_change = 1; 1194 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 1195 && u->scaling_info->src_rect.height > u->surface->src_rect.height) 1196 /* Making src rect bigger requires a bandwidth change */ 1197 update_flags->bits.clock_change = 1; 1198 } 1199 1200 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 1201 || u->scaling_info->src_rect.y != u->surface->src_rect.y 1202 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 1203 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 1204 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 1205 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 1206 update_flags->bits.position_change = 1; 1207 1208 if (update_flags->bits.clock_change 1209 || update_flags->bits.bandwidth_change) 1210 return UPDATE_TYPE_FULL; 1211 1212 if (update_flags->bits.scaling_change 1213 || update_flags->bits.position_change) 1214 return UPDATE_TYPE_MED; 1215 1216 return UPDATE_TYPE_FAST; 1217 } 1218 1219 static enum surface_update_type det_surface_update(const struct dc *dc, 1220 const struct dc_surface_update *u) 1221 { 1222 const struct dc_state *context = dc->current_state; 1223 enum surface_update_type type; 1224 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 1225 union surface_update_flags *update_flags = &u->surface->update_flags; 1226 1227 update_flags->raw = 0; // Reset all flags 1228 1229 if (!is_surface_in_context(context, u->surface)) { 1230 update_flags->bits.new_plane = 1; 1231 return UPDATE_TYPE_FULL; 1232 } 1233 1234 type = get_plane_info_update_type(u); 1235 elevate_update_type(&overall_type, type); 1236 1237 type = get_scaling_info_update_type(u); 1238 elevate_update_type(&overall_type, type); 1239 1240 if (u->in_transfer_func) 1241 update_flags->bits.in_transfer_func_change = 1; 1242 1243 if (u->input_csc_color_matrix) 1244 update_flags->bits.input_csc_change = 1; 1245 1246 if (update_flags->bits.in_transfer_func_change 1247 || update_flags->bits.input_csc_change) { 1248 type = UPDATE_TYPE_MED; 1249 elevate_update_type(&overall_type, type); 1250 } 1251 1252 return overall_type; 1253 } 1254 1255 static enum surface_update_type check_update_surfaces_for_stream( 1256 struct dc *dc, 1257 struct dc_surface_update *updates, 1258 int surface_count, 1259 struct dc_stream_update *stream_update, 1260 const struct dc_stream_status *stream_status) 1261 { 1262 int i; 1263 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 1264 1265 if (stream_status == NULL || stream_status->plane_count != surface_count) 1266 return UPDATE_TYPE_FULL; 1267 1268 if (stream_update) 1269 return UPDATE_TYPE_FULL; 1270 1271 for (i = 0 ; i < surface_count; i++) { 1272 enum surface_update_type type = 1273 det_surface_update(dc, &updates[i]); 1274 1275 if (type == UPDATE_TYPE_FULL) 1276 return type; 1277 1278 elevate_update_type(&overall_type, type); 1279 } 1280 1281 return overall_type; 1282 } 1283 1284 enum surface_update_type dc_check_update_surfaces_for_stream( 1285 struct dc *dc, 1286 struct dc_surface_update *updates, 1287 int surface_count, 1288 struct dc_stream_update *stream_update, 1289 const struct dc_stream_status *stream_status) 1290 { 1291 int i; 1292 enum surface_update_type type; 1293 1294 for (i = 0; i < surface_count; i++) 1295 updates[i].surface->update_flags.raw = 0; 1296 1297 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 1298 if (type == UPDATE_TYPE_FULL) 1299 for (i = 0; i < surface_count; i++) 1300 updates[i].surface->update_flags.bits.full_update = 1; 1301 1302 return type; 1303 } 1304 1305 static struct dc_stream_status *stream_get_status( 1306 struct dc_state *ctx, 1307 struct dc_stream_state *stream) 1308 { 1309 uint8_t i; 1310 1311 for (i = 0; i < ctx->stream_count; i++) { 1312 if (stream == ctx->streams[i]) { 1313 return &ctx->stream_status[i]; 1314 } 1315 } 1316 1317 return NULL; 1318 } 1319 1320 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 1321 1322 1323 static void commit_planes_for_stream(struct dc *dc, 1324 struct dc_surface_update *srf_updates, 1325 int surface_count, 1326 struct dc_stream_state *stream, 1327 struct dc_stream_update *stream_update, 1328 enum surface_update_type update_type, 1329 struct dc_state *context) 1330 { 1331 int i, j; 1332 struct pipe_ctx *top_pipe_to_program = NULL; 1333 1334 if (update_type == UPDATE_TYPE_FULL) { 1335 dc->hwss.set_bandwidth(dc, context, false); 1336 context_clock_trace(dc, context); 1337 } 1338 1339 if (surface_count == 0) { 1340 /* 1341 * In case of turning off screen, no need to program front end a second time. 1342 * just return after program front end. 1343 */ 1344 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context); 1345 return; 1346 } 1347 1348 /* Full fe update*/ 1349 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1350 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1351 1352 if (!pipe_ctx->top_pipe && 1353 pipe_ctx->stream && 1354 pipe_ctx->stream == stream) { 1355 struct dc_stream_status *stream_status = NULL; 1356 1357 top_pipe_to_program = pipe_ctx; 1358 1359 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state) 1360 continue; 1361 1362 stream_status = 1363 stream_get_status(context, pipe_ctx->stream); 1364 1365 dc->hwss.apply_ctx_for_surface( 1366 dc, pipe_ctx->stream, stream_status->plane_count, context); 1367 1368 if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) { 1369 if (pipe_ctx->stream_res.tg->funcs->is_blanked) { 1370 // if otg funcs defined check if blanked before programming 1371 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 1372 pipe_ctx->stream_res.abm->funcs->set_abm_level( 1373 pipe_ctx->stream_res.abm, stream->abm_level); 1374 } else 1375 pipe_ctx->stream_res.abm->funcs->set_abm_level( 1376 pipe_ctx->stream_res.abm, stream->abm_level); 1377 } 1378 } 1379 } 1380 1381 if (update_type == UPDATE_TYPE_FULL) 1382 context_timing_trace(dc, &context->res_ctx); 1383 1384 /* Lock the top pipe while updating plane addrs, since freesync requires 1385 * plane addr update event triggers to be synchronized. 1386 * top_pipe_to_program is expected to never be NULL 1387 */ 1388 if (update_type == UPDATE_TYPE_FAST) { 1389 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 1390 1391 /* Perform requested Updates */ 1392 for (i = 0; i < surface_count; i++) { 1393 struct dc_plane_state *plane_state = srf_updates[i].surface; 1394 1395 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1396 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1397 1398 if (pipe_ctx->stream != stream) 1399 continue; 1400 1401 if (pipe_ctx->plane_state != plane_state) 1402 continue; 1403 1404 if (srf_updates[i].flip_addr) 1405 dc->hwss.update_plane_addr(dc, pipe_ctx); 1406 } 1407 } 1408 1409 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 1410 } 1411 1412 if (stream && stream_update && update_type > UPDATE_TYPE_FAST) 1413 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1414 struct pipe_ctx *pipe_ctx = 1415 &context->res_ctx.pipe_ctx[j]; 1416 1417 if (pipe_ctx->stream != stream) 1418 continue; 1419 1420 if (stream_update->hdr_static_metadata) { 1421 resource_build_info_frame(pipe_ctx); 1422 dc->hwss.update_info_frame(pipe_ctx); 1423 } 1424 } 1425 } 1426 1427 void dc_commit_updates_for_stream(struct dc *dc, 1428 struct dc_surface_update *srf_updates, 1429 int surface_count, 1430 struct dc_stream_state *stream, 1431 struct dc_stream_update *stream_update, 1432 struct dc_plane_state **plane_states, 1433 struct dc_state *state) 1434 { 1435 const struct dc_stream_status *stream_status; 1436 enum surface_update_type update_type; 1437 struct dc_state *context; 1438 struct dc_context *dc_ctx = dc->ctx; 1439 int i, j; 1440 1441 stream_status = dc_stream_get_status(stream); 1442 context = dc->current_state; 1443 1444 update_type = dc_check_update_surfaces_for_stream( 1445 dc, srf_updates, surface_count, stream_update, stream_status); 1446 1447 if (update_type >= update_surface_trace_level) 1448 update_surface_trace(dc, srf_updates, surface_count); 1449 1450 1451 if (update_type >= UPDATE_TYPE_FULL) { 1452 1453 /* initialize scratch memory for building context */ 1454 context = dc_create_state(); 1455 if (context == NULL) { 1456 DC_ERROR("Failed to allocate new validate context!\n"); 1457 return; 1458 } 1459 1460 dc_resource_state_copy_construct(state, context); 1461 } 1462 1463 1464 for (i = 0; i < surface_count; i++) { 1465 struct dc_plane_state *surface = srf_updates[i].surface; 1466 1467 /* TODO: On flip we don't build the state, so it still has the 1468 * old address. Which is why we are updating the address here 1469 */ 1470 if (srf_updates[i].flip_addr) { 1471 surface->address = srf_updates[i].flip_addr->address; 1472 surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate; 1473 1474 } 1475 1476 if (update_type >= UPDATE_TYPE_MED) { 1477 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1478 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1479 1480 if (pipe_ctx->plane_state != surface) 1481 continue; 1482 1483 resource_build_scaling_params(pipe_ctx); 1484 } 1485 } 1486 } 1487 1488 commit_planes_for_stream( 1489 dc, 1490 srf_updates, 1491 surface_count, 1492 stream, 1493 stream_update, 1494 update_type, 1495 context); 1496 /*update current_State*/ 1497 if (dc->current_state != context) { 1498 1499 struct dc_state *old = dc->current_state; 1500 1501 dc->current_state = context; 1502 dc_release_state(old); 1503 1504 } 1505 /*let's use current_state to update watermark etc*/ 1506 if (update_type >= UPDATE_TYPE_FULL) 1507 dc_post_update_surfaces_to_stream(dc); 1508 1509 return; 1510 1511 } 1512 1513 uint8_t dc_get_current_stream_count(struct dc *dc) 1514 { 1515 return dc->current_state->stream_count; 1516 } 1517 1518 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 1519 { 1520 if (i < dc->current_state->stream_count) 1521 return dc->current_state->streams[i]; 1522 return NULL; 1523 } 1524 1525 enum dc_irq_source dc_interrupt_to_irq_source( 1526 struct dc *dc, 1527 uint32_t src_id, 1528 uint32_t ext_id) 1529 { 1530 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 1531 } 1532 1533 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 1534 { 1535 1536 if (dc == NULL) 1537 return false; 1538 1539 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 1540 } 1541 1542 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 1543 { 1544 dal_irq_service_ack(dc->res_pool->irqs, src); 1545 } 1546 1547 void dc_set_power_state( 1548 struct dc *dc, 1549 enum dc_acpi_cm_power_state power_state) 1550 { 1551 struct kref refcount; 1552 1553 switch (power_state) { 1554 case DC_ACPI_CM_POWER_STATE_D0: 1555 dc_resource_state_construct(dc, dc->current_state); 1556 1557 dc->hwss.init_hw(dc); 1558 break; 1559 default: 1560 1561 dc->hwss.power_down(dc); 1562 1563 /* Zero out the current context so that on resume we start with 1564 * clean state, and dc hw programming optimizations will not 1565 * cause any trouble. 1566 */ 1567 1568 /* Preserve refcount */ 1569 refcount = dc->current_state->refcount; 1570 dc_resource_state_destruct(dc->current_state); 1571 memset(dc->current_state, 0, 1572 sizeof(*dc->current_state)); 1573 1574 dc->current_state->refcount = refcount; 1575 1576 break; 1577 } 1578 1579 } 1580 1581 void dc_resume(struct dc *dc) 1582 { 1583 1584 uint32_t i; 1585 1586 for (i = 0; i < dc->link_count; i++) 1587 core_link_resume(dc->links[i]); 1588 } 1589 1590 bool dc_submit_i2c( 1591 struct dc *dc, 1592 uint32_t link_index, 1593 struct i2c_command *cmd) 1594 { 1595 1596 struct dc_link *link = dc->links[link_index]; 1597 struct ddc_service *ddc = link->ddc; 1598 1599 return dal_i2caux_submit_i2c_command( 1600 ddc->ctx->i2caux, 1601 ddc->ddc_pin, 1602 cmd); 1603 } 1604 1605 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) 1606 { 1607 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { 1608 BREAK_TO_DEBUGGER(); 1609 return false; 1610 } 1611 1612 dc_sink_retain(sink); 1613 1614 dc_link->remote_sinks[dc_link->sink_count] = sink; 1615 dc_link->sink_count++; 1616 1617 return true; 1618 } 1619 1620 struct dc_sink *dc_link_add_remote_sink( 1621 struct dc_link *link, 1622 const uint8_t *edid, 1623 int len, 1624 struct dc_sink_init_data *init_data) 1625 { 1626 struct dc_sink *dc_sink; 1627 enum dc_edid_status edid_status; 1628 1629 if (len > MAX_EDID_BUFFER_SIZE) { 1630 dm_error("Max EDID buffer size breached!\n"); 1631 return NULL; 1632 } 1633 1634 if (!init_data) { 1635 BREAK_TO_DEBUGGER(); 1636 return NULL; 1637 } 1638 1639 if (!init_data->link) { 1640 BREAK_TO_DEBUGGER(); 1641 return NULL; 1642 } 1643 1644 dc_sink = dc_sink_create(init_data); 1645 1646 if (!dc_sink) 1647 return NULL; 1648 1649 memmove(dc_sink->dc_edid.raw_edid, edid, len); 1650 dc_sink->dc_edid.length = len; 1651 1652 if (!link_add_remote_sink_helper( 1653 link, 1654 dc_sink)) 1655 goto fail_add_sink; 1656 1657 edid_status = dm_helpers_parse_edid_caps( 1658 link->ctx, 1659 &dc_sink->dc_edid, 1660 &dc_sink->edid_caps); 1661 1662 /* 1663 * Treat device as no EDID device if EDID 1664 * parsing fails 1665 */ 1666 if (edid_status != EDID_OK) { 1667 dc_sink->dc_edid.length = 0; 1668 dm_error("Bad EDID, status%d!\n", edid_status); 1669 } 1670 1671 return dc_sink; 1672 1673 fail_add_sink: 1674 dc_sink_release(dc_sink); 1675 return NULL; 1676 } 1677 1678 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) 1679 { 1680 int i; 1681 1682 if (!link->sink_count) { 1683 BREAK_TO_DEBUGGER(); 1684 return; 1685 } 1686 1687 for (i = 0; i < link->sink_count; i++) { 1688 if (link->remote_sinks[i] == sink) { 1689 dc_sink_release(sink); 1690 link->remote_sinks[i] = NULL; 1691 1692 /* shrink array to remove empty place */ 1693 while (i < link->sink_count - 1) { 1694 link->remote_sinks[i] = link->remote_sinks[i+1]; 1695 i++; 1696 } 1697 link->remote_sinks[i] = NULL; 1698 link->sink_count--; 1699 return; 1700 } 1701 } 1702 } 1703