1 /* 2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #ifndef __AMDGPU_DM_H__ 27 #define __AMDGPU_DM_H__ 28 29 #include <drm/display/drm_dp_mst_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_connector.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_plane.h> 34 #include "link_service_types.h" 35 36 /* 37 * This file contains the definition for amdgpu_display_manager 38 * and its API for amdgpu driver's use. 39 * This component provides all the display related functionality 40 * and this is the only component that calls DAL API. 41 * The API contained here intended for amdgpu driver use. 42 * The API that is called directly from KMS framework is located 43 * in amdgpu_dm_kms.h file 44 */ 45 46 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 47 48 #define AMDGPU_DM_MAX_CRTC 6 49 50 #define AMDGPU_DM_MAX_NUM_EDP 2 51 52 #define AMDGPU_DMUB_NOTIFICATION_MAX 5 53 54 /* 55 #include "include/amdgpu_dal_power_if.h" 56 #include "amdgpu_dm_irq.h" 57 */ 58 59 #include "irq_types.h" 60 #include "signal_types.h" 61 #include "amdgpu_dm_crc.h" 62 #include "mod_info_packet.h" 63 struct aux_payload; 64 struct set_config_cmd_payload; 65 enum aux_return_code_type; 66 enum set_config_status; 67 68 /* Forward declarations */ 69 struct amdgpu_device; 70 struct amdgpu_crtc; 71 struct drm_device; 72 struct dc; 73 struct amdgpu_bo; 74 struct dmub_srv; 75 struct dc_plane_state; 76 struct dmub_notification; 77 78 struct common_irq_params { 79 struct amdgpu_device *adev; 80 enum dc_irq_source irq_src; 81 atomic64_t previous_timestamp; 82 }; 83 84 /** 85 * struct dm_compressor_info - Buffer info used by frame buffer compression 86 * @cpu_addr: MMIO cpu addr 87 * @bo_ptr: Pointer to the buffer object 88 * @gpu_addr: MMIO gpu addr 89 */ 90 struct dm_compressor_info { 91 void *cpu_addr; 92 struct amdgpu_bo *bo_ptr; 93 uint64_t gpu_addr; 94 }; 95 96 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); 97 98 /** 99 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ 100 * 101 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq 102 * @dmub_notify: notification for callback function 103 * @adev: amdgpu_device pointer 104 */ 105 struct dmub_hpd_work { 106 struct work_struct handle_hpd_work; 107 struct dmub_notification *dmub_notify; 108 struct amdgpu_device *adev; 109 }; 110 111 /** 112 * struct vblank_control_work - Work data for vblank control 113 * @work: Kernel work data for the work event 114 * @dm: amdgpu display manager device 115 * @acrtc: amdgpu CRTC instance for which the event has occurred 116 * @stream: DC stream for which the event has occurred 117 * @enable: true if enabling vblank 118 */ 119 struct vblank_control_work { 120 struct work_struct work; 121 struct amdgpu_display_manager *dm; 122 struct amdgpu_crtc *acrtc; 123 struct dc_stream_state *stream; 124 bool enable; 125 }; 126 127 /** 128 * struct amdgpu_dm_backlight_caps - Information about backlight 129 * 130 * Describe the backlight support for ACPI or eDP AUX. 131 */ 132 struct amdgpu_dm_backlight_caps { 133 /** 134 * @ext_caps: Keep the data struct with all the information about the 135 * display support for HDR. 136 */ 137 union dpcd_sink_ext_caps *ext_caps; 138 /** 139 * @aux_min_input_signal: Min brightness value supported by the display 140 */ 141 u32 aux_min_input_signal; 142 /** 143 * @aux_max_input_signal: Max brightness value supported by the display 144 * in nits. 145 */ 146 u32 aux_max_input_signal; 147 /** 148 * @min_input_signal: minimum possible input in range 0-255. 149 */ 150 int min_input_signal; 151 /** 152 * @max_input_signal: maximum possible input in range 0-255. 153 */ 154 int max_input_signal; 155 /** 156 * @caps_valid: true if these values are from the ACPI interface. 157 */ 158 bool caps_valid; 159 /** 160 * @aux_support: Describes if the display supports AUX backlight. 161 */ 162 bool aux_support; 163 }; 164 165 /** 166 * struct dal_allocation - Tracks mapped FB memory for SMU communication 167 * @list: list of dal allocations 168 * @bo: GPU buffer object 169 * @cpu_ptr: CPU virtual address of the GPU buffer object 170 * @gpu_addr: GPU virtual address of the GPU buffer object 171 */ 172 struct dal_allocation { 173 struct list_head list; 174 struct amdgpu_bo *bo; 175 void *cpu_ptr; 176 u64 gpu_addr; 177 }; 178 179 /** 180 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq 181 * offload work 182 */ 183 struct hpd_rx_irq_offload_work_queue { 184 /** 185 * @wq: workqueue structure to queue offload work. 186 */ 187 struct workqueue_struct *wq; 188 /** 189 * @offload_lock: To protect fields of offload work queue. 190 */ 191 spinlock_t offload_lock; 192 /** 193 * @is_handling_link_loss: Used to prevent inserting link loss event when 194 * we're handling link loss 195 */ 196 bool is_handling_link_loss; 197 /** 198 * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message 199 * ready event when we're already handling mst message ready event 200 */ 201 bool is_handling_mst_msg_rdy_event; 202 /** 203 * @aconnector: The aconnector that this work queue is attached to 204 */ 205 struct amdgpu_dm_connector *aconnector; 206 }; 207 208 /** 209 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure 210 */ 211 struct hpd_rx_irq_offload_work { 212 /** 213 * @work: offload work 214 */ 215 struct work_struct work; 216 /** 217 * @data: reference irq data which is used while handling offload work 218 */ 219 union hpd_irq_data data; 220 /** 221 * @offload_wq: offload work queue that this work is queued to 222 */ 223 struct hpd_rx_irq_offload_work_queue *offload_wq; 224 }; 225 226 /** 227 * struct amdgpu_display_manager - Central amdgpu display manager device 228 * 229 * @dc: Display Core control structure 230 * @adev: AMDGPU base driver structure 231 * @ddev: DRM base driver structure 232 * @display_indexes_num: Max number of display streams supported 233 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables 234 * @backlight_dev: Backlight control device 235 * @backlight_link: Link on which to control backlight 236 * @backlight_caps: Capabilities of the backlight device 237 * @freesync_module: Module handling freesync calculations 238 * @hdcp_workqueue: AMDGPU content protection queue 239 * @fw_dmcu: Reference to DMCU firmware 240 * @dmcu_fw_version: Version of the DMCU firmware 241 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW 242 * @cached_state: Caches device atomic state for suspend/resume 243 * @cached_dc_state: Cached state of content streams 244 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info 245 * @force_timing_sync: set via debugfs. When set, indicates that all connected 246 * displays will be forced to synchronize. 247 * @dmcub_trace_event_en: enable dmcub trace events 248 * @dmub_outbox_params: DMUB Outbox parameters 249 * @num_of_edps: number of backlight eDPs 250 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the 251 * driver when true 252 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB 253 * transfers are done 254 * @delayed_hpd_wq: work queue used to delay DMUB HPD work 255 */ 256 struct amdgpu_display_manager { 257 258 struct dc *dc; 259 260 /** 261 * @dmub_srv: 262 * 263 * DMUB service, used for controlling the DMUB on hardware 264 * that supports it. The pointer to the dmub_srv will be 265 * NULL on hardware that does not support it. 266 */ 267 struct dmub_srv *dmub_srv; 268 269 /** 270 * @dmub_notify: 271 * 272 * Notification from DMUB. 273 */ 274 275 struct dmub_notification *dmub_notify; 276 277 /** 278 * @dmub_callback: 279 * 280 * Callback functions to handle notification from DMUB. 281 */ 282 283 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; 284 285 /** 286 * @dmub_thread_offload: 287 * 288 * Flag to indicate if callback is offload. 289 */ 290 291 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; 292 293 /** 294 * @dmub_fb_info: 295 * 296 * Framebuffer regions for the DMUB. 297 */ 298 struct dmub_srv_fb_info *dmub_fb_info; 299 300 /** 301 * @dmub_fw: 302 * 303 * DMUB firmware, required on hardware that has DMUB support. 304 */ 305 const struct firmware *dmub_fw; 306 307 /** 308 * @dmub_bo: 309 * 310 * Buffer object for the DMUB. 311 */ 312 struct amdgpu_bo *dmub_bo; 313 314 /** 315 * @dmub_bo_gpu_addr: 316 * 317 * GPU virtual address for the DMUB buffer object. 318 */ 319 u64 dmub_bo_gpu_addr; 320 321 /** 322 * @dmub_bo_cpu_addr: 323 * 324 * CPU address for the DMUB buffer object. 325 */ 326 void *dmub_bo_cpu_addr; 327 328 /** 329 * @dmcub_fw_version: 330 * 331 * DMCUB firmware version. 332 */ 333 uint32_t dmcub_fw_version; 334 335 /** 336 * @cgs_device: 337 * 338 * The Common Graphics Services device. It provides an interface for 339 * accessing registers. 340 */ 341 struct cgs_device *cgs_device; 342 343 struct amdgpu_device *adev; 344 struct drm_device *ddev; 345 u16 display_indexes_num; 346 347 /** 348 * @atomic_obj: 349 * 350 * In combination with &dm_atomic_state it helps manage 351 * global atomic state that doesn't map cleanly into existing 352 * drm resources, like &dc_context. 353 */ 354 struct drm_private_obj atomic_obj; 355 356 /** 357 * @dc_lock: 358 * 359 * Guards access to DC functions that can issue register write 360 * sequences. 361 */ 362 struct mutex dc_lock; 363 364 /** 365 * @audio_lock: 366 * 367 * Guards access to audio instance changes. 368 */ 369 struct mutex audio_lock; 370 371 /** 372 * @audio_component: 373 * 374 * Used to notify ELD changes to sound driver. 375 */ 376 struct drm_audio_component *audio_component; 377 378 /** 379 * @audio_registered: 380 * 381 * True if the audio component has been registered 382 * successfully, false otherwise. 383 */ 384 bool audio_registered; 385 386 /** 387 * @irq_handler_list_low_tab: 388 * 389 * Low priority IRQ handler table. 390 * 391 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ 392 * source. Low priority IRQ handlers are deferred to a workqueue to be 393 * processed. Hence, they can sleep. 394 * 395 * Note that handlers are called in the same order as they were 396 * registered (FIFO). 397 */ 398 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; 399 400 /** 401 * @irq_handler_list_high_tab: 402 * 403 * High priority IRQ handler table. 404 * 405 * It is a n*m table, same as &irq_handler_list_low_tab. However, 406 * handlers in this table are not deferred and are called immediately. 407 */ 408 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; 409 410 /** 411 * @pflip_params: 412 * 413 * Page flip IRQ parameters, passed to registered handlers when 414 * triggered. 415 */ 416 struct common_irq_params 417 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; 418 419 /** 420 * @vblank_params: 421 * 422 * Vertical blanking IRQ parameters, passed to registered handlers when 423 * triggered. 424 */ 425 struct common_irq_params 426 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; 427 428 /** 429 * @vline0_params: 430 * 431 * OTG vertical interrupt0 IRQ parameters, passed to registered 432 * handlers when triggered. 433 */ 434 struct common_irq_params 435 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; 436 437 /** 438 * @vupdate_params: 439 * 440 * Vertical update IRQ parameters, passed to registered handlers when 441 * triggered. 442 */ 443 struct common_irq_params 444 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; 445 446 /** 447 * @dmub_trace_params: 448 * 449 * DMUB trace event IRQ parameters, passed to registered handlers when 450 * triggered. 451 */ 452 struct common_irq_params 453 dmub_trace_params[1]; 454 455 struct common_irq_params 456 dmub_outbox_params[1]; 457 458 spinlock_t irq_handler_list_table_lock; 459 460 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP]; 461 462 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP]; 463 464 uint8_t num_of_edps; 465 466 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; 467 468 struct mod_freesync *freesync_module; 469 struct hdcp_workqueue *hdcp_workqueue; 470 471 /** 472 * @vblank_control_workqueue: 473 * 474 * Deferred work for vblank control events. 475 */ 476 struct workqueue_struct *vblank_control_workqueue; 477 478 struct drm_atomic_state *cached_state; 479 struct dc_state *cached_dc_state; 480 481 struct dm_compressor_info compressor; 482 483 const struct firmware *fw_dmcu; 484 uint32_t dmcu_fw_version; 485 /** 486 * @soc_bounding_box: 487 * 488 * gpu_info FW provided soc bounding box struct or 0 if not 489 * available in FW 490 */ 491 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 492 493 /** 494 * @active_vblank_irq_count: 495 * 496 * number of currently active vblank irqs 497 */ 498 uint32_t active_vblank_irq_count; 499 500 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 501 /** 502 * @secure_display_ctxs: 503 * 504 * Store the ROI information and the work_struct to command dmub and psp for 505 * all crtcs. 506 */ 507 struct secure_display_context *secure_display_ctxs; 508 #endif 509 /** 510 * @hpd_rx_offload_wq: 511 * 512 * Work queue to offload works of hpd_rx_irq 513 */ 514 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; 515 /** 516 * @mst_encoders: 517 * 518 * fake encoders used for DP MST. 519 */ 520 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 521 bool force_timing_sync; 522 bool disable_hpd_irq; 523 bool dmcub_trace_event_en; 524 /** 525 * @da_list: 526 * 527 * DAL fb memory allocation list, for communication with SMU. 528 */ 529 struct list_head da_list; 530 struct completion dmub_aux_transfer_done; 531 struct workqueue_struct *delayed_hpd_wq; 532 533 /** 534 * @brightness: 535 * 536 * cached backlight values. 537 */ 538 u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; 539 /** 540 * @actual_brightness: 541 * 542 * last successfully applied backlight values. 543 */ 544 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; 545 546 /** 547 * @aux_hpd_discon_quirk: 548 * 549 * quirk for hpd discon while aux is on-going. 550 * occurred on certain intel platform 551 */ 552 bool aux_hpd_discon_quirk; 553 554 /** 555 * @dpia_aux_lock: 556 * 557 * Guards access to DPIA AUX 558 */ 559 struct mutex dpia_aux_lock; 560 }; 561 562 enum dsc_clock_force_state { 563 DSC_CLK_FORCE_DEFAULT = 0, 564 DSC_CLK_FORCE_ENABLE, 565 DSC_CLK_FORCE_DISABLE, 566 }; 567 568 struct dsc_preferred_settings { 569 enum dsc_clock_force_state dsc_force_enable; 570 uint32_t dsc_num_slices_v; 571 uint32_t dsc_num_slices_h; 572 uint32_t dsc_bits_per_pixel; 573 bool dsc_force_disable_passthrough; 574 }; 575 576 enum mst_progress_status { 577 MST_STATUS_DEFAULT = 0, 578 MST_PROBE = BIT(0), 579 MST_REMOTE_EDID = BIT(1), 580 MST_ALLOCATE_NEW_PAYLOAD = BIT(2), 581 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), 582 }; 583 584 /** 585 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info 586 * 587 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this 588 * struct is useful to keep track of the display-specific information about 589 * FreeSync. 590 */ 591 struct amdgpu_hdmi_vsdb_info { 592 /** 593 * @amd_vsdb_version: Vendor Specific Data Block Version, should be 594 * used to determine which Vendor Specific InfoFrame (VSIF) to send. 595 */ 596 unsigned int amd_vsdb_version; 597 598 /** 599 * @freesync_supported: FreeSync Supported. 600 */ 601 bool freesync_supported; 602 603 /** 604 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. 605 */ 606 unsigned int min_refresh_rate_hz; 607 608 /** 609 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz 610 */ 611 unsigned int max_refresh_rate_hz; 612 }; 613 614 struct amdgpu_dm_connector { 615 616 struct drm_connector base; 617 uint32_t connector_id; 618 int bl_idx; 619 620 /* we need to mind the EDID between detect 621 and get modes due to analog/digital/tvencoder */ 622 struct edid *edid; 623 624 /* shared with amdgpu */ 625 struct amdgpu_hpd hpd; 626 627 /* number of modes generated from EDID at 'dc_sink' */ 628 int num_modes; 629 630 /* The 'old' sink - before an HPD. 631 * The 'current' sink is in dc_link->sink. */ 632 struct dc_sink *dc_sink; 633 struct dc_link *dc_link; 634 635 /** 636 * @dc_em_sink: Reference to the emulated (virtual) sink. 637 */ 638 struct dc_sink *dc_em_sink; 639 640 /* DM only */ 641 struct drm_dp_mst_topology_mgr mst_mgr; 642 struct amdgpu_dm_dp_aux dm_dp_aux; 643 struct drm_dp_mst_port *mst_output_port; 644 struct amdgpu_dm_connector *mst_root; 645 struct drm_dp_aux *dsc_aux; 646 struct mutex handle_mst_msg_ready; 647 648 /* TODO see if we can merge with ddc_bus or make a dm_connector */ 649 struct amdgpu_i2c_adapter *i2c; 650 651 /* Monitor range limits */ 652 /** 653 * @min_vfreq: Minimal frequency supported by the display in Hz. This 654 * value is set to zero when there is no FreeSync support. 655 */ 656 int min_vfreq; 657 658 /** 659 * @max_vfreq: Maximum frequency supported by the display in Hz. This 660 * value is set to zero when there is no FreeSync support. 661 */ 662 int max_vfreq ; 663 int pixel_clock_mhz; 664 665 /* Audio instance - protected by audio_lock. */ 666 int audio_inst; 667 668 struct mutex hpd_lock; 669 670 bool fake_enable; 671 bool force_yuv420_output; 672 struct dsc_preferred_settings dsc_settings; 673 union dp_downstream_port_present mst_downstream_port_present; 674 /* Cached display modes */ 675 struct drm_display_mode freesync_vid_base; 676 677 int psr_skip_count; 678 679 /* Record progress status of mst*/ 680 uint8_t mst_status; 681 682 /* Automated testing */ 683 bool timing_changed; 684 struct dc_crtc_timing *timing_requested; 685 686 /* Adaptive Sync */ 687 bool pack_sdp_v1_3; 688 enum adaptive_sync_type as_type; 689 struct amdgpu_hdmi_vsdb_info vsdb_info; 690 }; 691 692 static inline void amdgpu_dm_set_mst_status(uint8_t *status, 693 uint8_t flags, bool set) 694 { 695 if (set) 696 *status |= flags; 697 else 698 *status &= ~flags; 699 } 700 701 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 702 703 extern const struct amdgpu_ip_block_version dm_ip_block; 704 705 struct dm_plane_state { 706 struct drm_plane_state base; 707 struct dc_plane_state *dc_state; 708 }; 709 710 struct dm_crtc_state { 711 struct drm_crtc_state base; 712 struct dc_stream_state *stream; 713 714 bool cm_has_degamma; 715 bool cm_is_degamma_srgb; 716 717 bool mpo_requested; 718 719 int update_type; 720 int active_planes; 721 722 int crc_skip_count; 723 724 bool freesync_vrr_info_changed; 725 726 bool dsc_force_changed; 727 bool vrr_supported; 728 struct mod_freesync_config freesync_config; 729 struct dc_info_packet vrr_infopacket; 730 731 int abm_level; 732 }; 733 734 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 735 736 struct dm_atomic_state { 737 struct drm_private_state base; 738 739 struct dc_state *context; 740 }; 741 742 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base) 743 744 struct dm_connector_state { 745 struct drm_connector_state base; 746 747 enum amdgpu_rmx_type scaling; 748 uint8_t underscan_vborder; 749 uint8_t underscan_hborder; 750 bool underscan_enable; 751 bool freesync_capable; 752 bool update_hdcp; 753 uint8_t abm_level; 754 int vcpi_slots; 755 uint64_t pbn; 756 }; 757 758 #define to_dm_connector_state(x)\ 759 container_of((x), struct dm_connector_state, base) 760 761 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); 762 struct drm_connector_state * 763 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector); 764 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 765 struct drm_connector_state *state, 766 struct drm_property *property, 767 uint64_t val); 768 769 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 770 const struct drm_connector_state *state, 771 struct drm_property *property, 772 uint64_t *val); 773 774 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); 775 776 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 777 struct amdgpu_dm_connector *aconnector, 778 int connector_type, 779 struct dc_link *link, 780 int link_index); 781 782 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 783 struct drm_display_mode *mode); 784 785 void dm_restore_drm_connector_state(struct drm_device *dev, 786 struct drm_connector *connector); 787 788 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 789 struct edid *edid); 790 791 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); 792 793 #define MAX_COLOR_LUT_ENTRIES 4096 794 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ 795 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 796 797 void amdgpu_dm_init_color_mod(void); 798 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); 799 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); 800 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, 801 struct dc_plane_state *dc_plane_state); 802 803 void amdgpu_dm_update_connector_after_detect( 804 struct amdgpu_dm_connector *aconnector); 805 806 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 807 808 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, 809 struct aux_payload *payload, enum aux_return_code_type *operation_result); 810 811 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, 812 struct set_config_cmd_payload *payload, enum set_config_status *operation_result); 813 814 bool check_seamless_boot_capability(struct amdgpu_device *adev); 815 816 struct dc_stream_state * 817 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 818 const struct drm_display_mode *drm_mode, 819 const struct dm_connector_state *dm_state, 820 const struct dc_stream_state *old_stream); 821 822 int dm_atomic_get_state(struct drm_atomic_state *state, 823 struct dm_atomic_state **dm_state); 824 825 struct amdgpu_dm_connector * 826 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 827 struct drm_crtc *crtc); 828 829 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); 830 #endif /* __AMDGPU_DM_H__ */ 831