1 /* 2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #ifndef __AMDGPU_DM_H__ 27 #define __AMDGPU_DM_H__ 28 29 #include <drm/display/drm_dp_mst_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_connector.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_plane.h> 34 35 /* 36 * This file contains the definition for amdgpu_display_manager 37 * and its API for amdgpu driver's use. 38 * This component provides all the display related functionality 39 * and this is the only component that calls DAL API. 40 * The API contained here intended for amdgpu driver use. 41 * The API that is called directly from KMS framework is located 42 * in amdgpu_dm_kms.h file 43 */ 44 45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 46 47 #define AMDGPU_DM_MAX_CRTC 6 48 49 #define AMDGPU_DM_MAX_NUM_EDP 2 50 51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5 52 53 /* 54 #include "include/amdgpu_dal_power_if.h" 55 #include "amdgpu_dm_irq.h" 56 */ 57 58 #include "irq_types.h" 59 #include "signal_types.h" 60 #include "amdgpu_dm_crc.h" 61 struct aux_payload; 62 struct set_config_cmd_payload; 63 enum aux_return_code_type; 64 enum set_config_status; 65 66 /* Forward declarations */ 67 struct amdgpu_device; 68 struct amdgpu_crtc; 69 struct drm_device; 70 struct dc; 71 struct amdgpu_bo; 72 struct dmub_srv; 73 struct dc_plane_state; 74 struct dmub_notification; 75 76 struct common_irq_params { 77 struct amdgpu_device *adev; 78 enum dc_irq_source irq_src; 79 atomic64_t previous_timestamp; 80 }; 81 82 /** 83 * struct dm_compressor_info - Buffer info used by frame buffer compression 84 * @cpu_addr: MMIO cpu addr 85 * @bo_ptr: Pointer to the buffer object 86 * @gpu_addr: MMIO gpu addr 87 */ 88 struct dm_compressor_info { 89 void *cpu_addr; 90 struct amdgpu_bo *bo_ptr; 91 uint64_t gpu_addr; 92 }; 93 94 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); 95 96 /** 97 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ 98 * 99 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq 100 * @dmub_notify: notification for callback function 101 * @adev: amdgpu_device pointer 102 */ 103 struct dmub_hpd_work { 104 struct work_struct handle_hpd_work; 105 struct dmub_notification *dmub_notify; 106 struct amdgpu_device *adev; 107 }; 108 109 /** 110 * struct vblank_control_work - Work data for vblank control 111 * @work: Kernel work data for the work event 112 * @dm: amdgpu display manager device 113 * @acrtc: amdgpu CRTC instance for which the event has occurred 114 * @stream: DC stream for which the event has occurred 115 * @enable: true if enabling vblank 116 */ 117 struct vblank_control_work { 118 struct work_struct work; 119 struct amdgpu_display_manager *dm; 120 struct amdgpu_crtc *acrtc; 121 struct dc_stream_state *stream; 122 bool enable; 123 }; 124 125 /** 126 * struct amdgpu_dm_backlight_caps - Information about backlight 127 * 128 * Describe the backlight support for ACPI or eDP AUX. 129 */ 130 struct amdgpu_dm_backlight_caps { 131 /** 132 * @ext_caps: Keep the data struct with all the information about the 133 * display support for HDR. 134 */ 135 union dpcd_sink_ext_caps *ext_caps; 136 /** 137 * @aux_min_input_signal: Min brightness value supported by the display 138 */ 139 u32 aux_min_input_signal; 140 /** 141 * @aux_max_input_signal: Max brightness value supported by the display 142 * in nits. 143 */ 144 u32 aux_max_input_signal; 145 /** 146 * @min_input_signal: minimum possible input in range 0-255. 147 */ 148 int min_input_signal; 149 /** 150 * @max_input_signal: maximum possible input in range 0-255. 151 */ 152 int max_input_signal; 153 /** 154 * @caps_valid: true if these values are from the ACPI interface. 155 */ 156 bool caps_valid; 157 /** 158 * @aux_support: Describes if the display supports AUX backlight. 159 */ 160 bool aux_support; 161 }; 162 163 /** 164 * struct dal_allocation - Tracks mapped FB memory for SMU communication 165 * @list: list of dal allocations 166 * @bo: GPU buffer object 167 * @cpu_ptr: CPU virtual address of the GPU buffer object 168 * @gpu_addr: GPU virtual address of the GPU buffer object 169 */ 170 struct dal_allocation { 171 struct list_head list; 172 struct amdgpu_bo *bo; 173 void *cpu_ptr; 174 u64 gpu_addr; 175 }; 176 177 /** 178 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq 179 * offload work 180 */ 181 struct hpd_rx_irq_offload_work_queue { 182 /** 183 * @wq: workqueue structure to queue offload work. 184 */ 185 struct workqueue_struct *wq; 186 /** 187 * @offload_lock: To protect fields of offload work queue. 188 */ 189 spinlock_t offload_lock; 190 /** 191 * @is_handling_link_loss: Used to prevent inserting link loss event when 192 * we're handling link loss 193 */ 194 bool is_handling_link_loss; 195 /** 196 * @aconnector: The aconnector that this work queue is attached to 197 */ 198 struct amdgpu_dm_connector *aconnector; 199 }; 200 201 /** 202 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure 203 */ 204 struct hpd_rx_irq_offload_work { 205 /** 206 * @work: offload work 207 */ 208 struct work_struct work; 209 /** 210 * @data: reference irq data which is used while handling offload work 211 */ 212 union hpd_irq_data data; 213 /** 214 * @offload_wq: offload work queue that this work is queued to 215 */ 216 struct hpd_rx_irq_offload_work_queue *offload_wq; 217 }; 218 219 /** 220 * struct amdgpu_display_manager - Central amdgpu display manager device 221 * 222 * @dc: Display Core control structure 223 * @adev: AMDGPU base driver structure 224 * @ddev: DRM base driver structure 225 * @display_indexes_num: Max number of display streams supported 226 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables 227 * @backlight_dev: Backlight control device 228 * @backlight_link: Link on which to control backlight 229 * @backlight_caps: Capabilities of the backlight device 230 * @freesync_module: Module handling freesync calculations 231 * @hdcp_workqueue: AMDGPU content protection queue 232 * @fw_dmcu: Reference to DMCU firmware 233 * @dmcu_fw_version: Version of the DMCU firmware 234 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW 235 * @cached_state: Caches device atomic state for suspend/resume 236 * @cached_dc_state: Cached state of content streams 237 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info 238 * @force_timing_sync: set via debugfs. When set, indicates that all connected 239 * displays will be forced to synchronize. 240 * @dmcub_trace_event_en: enable dmcub trace events 241 * @dmub_outbox_params: DMUB Outbox parameters 242 * @num_of_edps: number of backlight eDPs 243 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the 244 * driver when true 245 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB 246 * transfers are done 247 * @delayed_hpd_wq: work queue used to delay DMUB HPD work 248 */ 249 struct amdgpu_display_manager { 250 251 struct dc *dc; 252 253 /** 254 * @dmub_srv: 255 * 256 * DMUB service, used for controlling the DMUB on hardware 257 * that supports it. The pointer to the dmub_srv will be 258 * NULL on hardware that does not support it. 259 */ 260 struct dmub_srv *dmub_srv; 261 262 /** 263 * @dmub_notify: 264 * 265 * Notification from DMUB. 266 */ 267 268 struct dmub_notification *dmub_notify; 269 270 /** 271 * @dmub_callback: 272 * 273 * Callback functions to handle notification from DMUB. 274 */ 275 276 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; 277 278 /** 279 * @dmub_thread_offload: 280 * 281 * Flag to indicate if callback is offload. 282 */ 283 284 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; 285 286 /** 287 * @dmub_fb_info: 288 * 289 * Framebuffer regions for the DMUB. 290 */ 291 struct dmub_srv_fb_info *dmub_fb_info; 292 293 /** 294 * @dmub_fw: 295 * 296 * DMUB firmware, required on hardware that has DMUB support. 297 */ 298 const struct firmware *dmub_fw; 299 300 /** 301 * @dmub_bo: 302 * 303 * Buffer object for the DMUB. 304 */ 305 struct amdgpu_bo *dmub_bo; 306 307 /** 308 * @dmub_bo_gpu_addr: 309 * 310 * GPU virtual address for the DMUB buffer object. 311 */ 312 u64 dmub_bo_gpu_addr; 313 314 /** 315 * @dmub_bo_cpu_addr: 316 * 317 * CPU address for the DMUB buffer object. 318 */ 319 void *dmub_bo_cpu_addr; 320 321 /** 322 * @dmcub_fw_version: 323 * 324 * DMCUB firmware version. 325 */ 326 uint32_t dmcub_fw_version; 327 328 /** 329 * @cgs_device: 330 * 331 * The Common Graphics Services device. It provides an interface for 332 * accessing registers. 333 */ 334 struct cgs_device *cgs_device; 335 336 struct amdgpu_device *adev; 337 struct drm_device *ddev; 338 u16 display_indexes_num; 339 340 /** 341 * @atomic_obj: 342 * 343 * In combination with &dm_atomic_state it helps manage 344 * global atomic state that doesn't map cleanly into existing 345 * drm resources, like &dc_context. 346 */ 347 struct drm_private_obj atomic_obj; 348 349 /** 350 * @dc_lock: 351 * 352 * Guards access to DC functions that can issue register write 353 * sequences. 354 */ 355 struct mutex dc_lock; 356 357 /** 358 * @audio_lock: 359 * 360 * Guards access to audio instance changes. 361 */ 362 struct mutex audio_lock; 363 364 /** 365 * @audio_component: 366 * 367 * Used to notify ELD changes to sound driver. 368 */ 369 struct drm_audio_component *audio_component; 370 371 /** 372 * @audio_registered: 373 * 374 * True if the audio component has been registered 375 * successfully, false otherwise. 376 */ 377 bool audio_registered; 378 379 /** 380 * @irq_handler_list_low_tab: 381 * 382 * Low priority IRQ handler table. 383 * 384 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ 385 * source. Low priority IRQ handlers are deferred to a workqueue to be 386 * processed. Hence, they can sleep. 387 * 388 * Note that handlers are called in the same order as they were 389 * registered (FIFO). 390 */ 391 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; 392 393 /** 394 * @irq_handler_list_high_tab: 395 * 396 * High priority IRQ handler table. 397 * 398 * It is a n*m table, same as &irq_handler_list_low_tab. However, 399 * handlers in this table are not deferred and are called immediately. 400 */ 401 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; 402 403 /** 404 * @pflip_params: 405 * 406 * Page flip IRQ parameters, passed to registered handlers when 407 * triggered. 408 */ 409 struct common_irq_params 410 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; 411 412 /** 413 * @vblank_params: 414 * 415 * Vertical blanking IRQ parameters, passed to registered handlers when 416 * triggered. 417 */ 418 struct common_irq_params 419 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; 420 421 /** 422 * @vline0_params: 423 * 424 * OTG vertical interrupt0 IRQ parameters, passed to registered 425 * handlers when triggered. 426 */ 427 struct common_irq_params 428 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; 429 430 /** 431 * @vupdate_params: 432 * 433 * Vertical update IRQ parameters, passed to registered handlers when 434 * triggered. 435 */ 436 struct common_irq_params 437 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; 438 439 /** 440 * @dmub_trace_params: 441 * 442 * DMUB trace event IRQ parameters, passed to registered handlers when 443 * triggered. 444 */ 445 struct common_irq_params 446 dmub_trace_params[1]; 447 448 struct common_irq_params 449 dmub_outbox_params[1]; 450 451 spinlock_t irq_handler_list_table_lock; 452 453 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP]; 454 455 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP]; 456 457 uint8_t num_of_edps; 458 459 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; 460 461 struct mod_freesync *freesync_module; 462 #ifdef CONFIG_DRM_AMD_DC_HDCP 463 struct hdcp_workqueue *hdcp_workqueue; 464 #endif 465 466 /** 467 * @vblank_control_workqueue: 468 * 469 * Deferred work for vblank control events. 470 */ 471 struct workqueue_struct *vblank_control_workqueue; 472 473 struct drm_atomic_state *cached_state; 474 struct dc_state *cached_dc_state; 475 476 struct dm_compressor_info compressor; 477 478 const struct firmware *fw_dmcu; 479 uint32_t dmcu_fw_version; 480 /** 481 * @soc_bounding_box: 482 * 483 * gpu_info FW provided soc bounding box struct or 0 if not 484 * available in FW 485 */ 486 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 487 488 /** 489 * @active_vblank_irq_count: 490 * 491 * number of currently active vblank irqs 492 */ 493 uint32_t active_vblank_irq_count; 494 495 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 496 /** 497 * @secure_display_ctxs: 498 * 499 * Store the ROI information and the work_struct to command dmub and psp for 500 * all crtcs. 501 */ 502 struct secure_display_context *secure_display_ctxs; 503 #endif 504 /** 505 * @hpd_rx_offload_wq: 506 * 507 * Work queue to offload works of hpd_rx_irq 508 */ 509 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; 510 /** 511 * @mst_encoders: 512 * 513 * fake encoders used for DP MST. 514 */ 515 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 516 bool force_timing_sync; 517 bool disable_hpd_irq; 518 bool dmcub_trace_event_en; 519 /** 520 * @da_list: 521 * 522 * DAL fb memory allocation list, for communication with SMU. 523 */ 524 struct list_head da_list; 525 struct completion dmub_aux_transfer_done; 526 struct workqueue_struct *delayed_hpd_wq; 527 528 /** 529 * @brightness: 530 * 531 * cached backlight values. 532 */ 533 u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; 534 /** 535 * @actual_brightness: 536 * 537 * last successfully applied backlight values. 538 */ 539 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; 540 541 /** 542 * @aux_hpd_discon_quirk: 543 * 544 * quirk for hpd discon while aux is on-going. 545 * occurred on certain intel platform 546 */ 547 bool aux_hpd_discon_quirk; 548 549 /** 550 * @dpia_aux_lock: 551 * 552 * Guards access to DPIA AUX 553 */ 554 struct mutex dpia_aux_lock; 555 }; 556 557 enum dsc_clock_force_state { 558 DSC_CLK_FORCE_DEFAULT = 0, 559 DSC_CLK_FORCE_ENABLE, 560 DSC_CLK_FORCE_DISABLE, 561 }; 562 563 struct dsc_preferred_settings { 564 enum dsc_clock_force_state dsc_force_enable; 565 uint32_t dsc_num_slices_v; 566 uint32_t dsc_num_slices_h; 567 uint32_t dsc_bits_per_pixel; 568 bool dsc_force_disable_passthrough; 569 }; 570 571 enum mst_progress_status { 572 MST_STATUS_DEFAULT = 0, 573 MST_PROBE = BIT(0), 574 MST_REMOTE_EDID = BIT(1), 575 MST_ALLOCATE_NEW_PAYLOAD = BIT(2), 576 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), 577 }; 578 579 struct amdgpu_dm_connector { 580 581 struct drm_connector base; 582 uint32_t connector_id; 583 584 /* we need to mind the EDID between detect 585 and get modes due to analog/digital/tvencoder */ 586 struct edid *edid; 587 588 /* shared with amdgpu */ 589 struct amdgpu_hpd hpd; 590 591 /* number of modes generated from EDID at 'dc_sink' */ 592 int num_modes; 593 594 /* The 'old' sink - before an HPD. 595 * The 'current' sink is in dc_link->sink. */ 596 struct dc_sink *dc_sink; 597 struct dc_link *dc_link; 598 599 /** 600 * @dc_em_sink: Reference to the emulated (virtual) sink. 601 */ 602 struct dc_sink *dc_em_sink; 603 604 /* DM only */ 605 struct drm_dp_mst_topology_mgr mst_mgr; 606 struct amdgpu_dm_dp_aux dm_dp_aux; 607 struct drm_dp_mst_port *port; 608 struct amdgpu_dm_connector *mst_port; 609 struct drm_dp_aux *dsc_aux; 610 /* TODO see if we can merge with ddc_bus or make a dm_connector */ 611 struct amdgpu_i2c_adapter *i2c; 612 613 /* Monitor range limits */ 614 /** 615 * @min_vfreq: Minimal frequency supported by the display in Hz. This 616 * value is set to zero when there is no FreeSync support. 617 */ 618 int min_vfreq; 619 620 /** 621 * @max_vfreq: Maximum frequency supported by the display in Hz. This 622 * value is set to zero when there is no FreeSync support. 623 */ 624 int max_vfreq ; 625 int pixel_clock_mhz; 626 627 /* Audio instance - protected by audio_lock. */ 628 int audio_inst; 629 630 struct mutex hpd_lock; 631 632 bool fake_enable; 633 #ifdef CONFIG_DEBUG_FS 634 uint32_t debugfs_dpcd_address; 635 uint32_t debugfs_dpcd_size; 636 #endif 637 bool force_yuv420_output; 638 struct dsc_preferred_settings dsc_settings; 639 union dp_downstream_port_present mst_downstream_port_present; 640 /* Cached display modes */ 641 struct drm_display_mode freesync_vid_base; 642 643 int psr_skip_count; 644 645 /* Record progress status of mst*/ 646 uint8_t mst_status; 647 }; 648 649 static inline void amdgpu_dm_set_mst_status(uint8_t *status, 650 uint8_t flags, bool set) 651 { 652 if (set) 653 *status |= flags; 654 else 655 *status &= ~flags; 656 } 657 658 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 659 660 extern const struct amdgpu_ip_block_version dm_ip_block; 661 662 struct dm_plane_state { 663 struct drm_plane_state base; 664 struct dc_plane_state *dc_state; 665 }; 666 667 struct dm_crtc_state { 668 struct drm_crtc_state base; 669 struct dc_stream_state *stream; 670 671 bool cm_has_degamma; 672 bool cm_is_degamma_srgb; 673 674 bool mpo_requested; 675 676 int update_type; 677 int active_planes; 678 679 int crc_skip_count; 680 681 bool freesync_vrr_info_changed; 682 683 bool dsc_force_changed; 684 bool vrr_supported; 685 struct mod_freesync_config freesync_config; 686 struct dc_info_packet vrr_infopacket; 687 688 int abm_level; 689 }; 690 691 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 692 693 struct dm_atomic_state { 694 struct drm_private_state base; 695 696 struct dc_state *context; 697 }; 698 699 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base) 700 701 struct dm_connector_state { 702 struct drm_connector_state base; 703 704 enum amdgpu_rmx_type scaling; 705 uint8_t underscan_vborder; 706 uint8_t underscan_hborder; 707 bool underscan_enable; 708 bool freesync_capable; 709 #ifdef CONFIG_DRM_AMD_DC_HDCP 710 bool update_hdcp; 711 #endif 712 uint8_t abm_level; 713 int vcpi_slots; 714 uint64_t pbn; 715 }; 716 717 /** 718 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info 719 * 720 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this 721 * struct is useful to keep track of the display-specific information about 722 * FreeSync. 723 */ 724 struct amdgpu_hdmi_vsdb_info { 725 /** 726 * @amd_vsdb_version: Vendor Specific Data Block Version, should be 727 * used to determine which Vendor Specific InfoFrame (VSIF) to send. 728 */ 729 unsigned int amd_vsdb_version; 730 731 /** 732 * @freesync_supported: FreeSync Supported. 733 */ 734 bool freesync_supported; 735 736 /** 737 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. 738 */ 739 unsigned int min_refresh_rate_hz; 740 741 /** 742 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz 743 */ 744 unsigned int max_refresh_rate_hz; 745 }; 746 747 748 #define to_dm_connector_state(x)\ 749 container_of((x), struct dm_connector_state, base) 750 751 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); 752 struct drm_connector_state * 753 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector); 754 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 755 struct drm_connector_state *state, 756 struct drm_property *property, 757 uint64_t val); 758 759 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 760 const struct drm_connector_state *state, 761 struct drm_property *property, 762 uint64_t *val); 763 764 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); 765 766 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 767 struct amdgpu_dm_connector *aconnector, 768 int connector_type, 769 struct dc_link *link, 770 int link_index); 771 772 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 773 struct drm_display_mode *mode); 774 775 void dm_restore_drm_connector_state(struct drm_device *dev, 776 struct drm_connector *connector); 777 778 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 779 struct edid *edid); 780 781 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); 782 783 #define MAX_COLOR_LUT_ENTRIES 4096 784 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ 785 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 786 787 void amdgpu_dm_init_color_mod(void); 788 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); 789 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); 790 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, 791 struct dc_plane_state *dc_plane_state); 792 793 void amdgpu_dm_update_connector_after_detect( 794 struct amdgpu_dm_connector *aconnector); 795 796 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 797 798 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, 799 struct aux_payload *payload, enum aux_return_code_type *operation_result); 800 801 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, 802 struct set_config_cmd_payload *payload, enum set_config_status *operation_result); 803 804 bool check_seamless_boot_capability(struct amdgpu_device *adev); 805 806 struct dc_stream_state * 807 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 808 const struct drm_display_mode *drm_mode, 809 const struct dm_connector_state *dm_state, 810 const struct dc_stream_state *old_stream); 811 812 int dm_atomic_get_state(struct drm_atomic_state *state, 813 struct dm_atomic_state **dm_state); 814 815 struct amdgpu_dm_connector * 816 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 817 struct drm_crtc *crtc); 818 819 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); 820 #endif /* __AMDGPU_DM_H__ */ 821