1 /* 2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #ifndef __AMDGPU_DM_H__ 27 #define __AMDGPU_DM_H__ 28 29 #include <drm/display/drm_dp_mst_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_connector.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_plane.h> 34 35 /* 36 * This file contains the definition for amdgpu_display_manager 37 * and its API for amdgpu driver's use. 38 * This component provides all the display related functionality 39 * and this is the only component that calls DAL API. 40 * The API contained here intended for amdgpu driver use. 41 * The API that is called directly from KMS framework is located 42 * in amdgpu_dm_kms.h file 43 */ 44 45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 46 47 #define AMDGPU_DM_MAX_CRTC 6 48 49 #define AMDGPU_DM_MAX_NUM_EDP 2 50 51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5 52 53 /* 54 * DMUB Async to Sync Mechanism Status 55 */ 56 #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 57 #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 58 #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 59 /* 60 #include "include/amdgpu_dal_power_if.h" 61 #include "amdgpu_dm_irq.h" 62 */ 63 64 #include "irq_types.h" 65 #include "signal_types.h" 66 #include "amdgpu_dm_crc.h" 67 struct aux_payload; 68 enum aux_return_code_type; 69 70 /* Forward declarations */ 71 struct amdgpu_device; 72 struct amdgpu_crtc; 73 struct drm_device; 74 struct dc; 75 struct amdgpu_bo; 76 struct dmub_srv; 77 struct dc_plane_state; 78 struct dmub_notification; 79 80 struct common_irq_params { 81 struct amdgpu_device *adev; 82 enum dc_irq_source irq_src; 83 atomic64_t previous_timestamp; 84 }; 85 86 /** 87 * struct dm_compressor_info - Buffer info used by frame buffer compression 88 * @cpu_addr: MMIO cpu addr 89 * @bo_ptr: Pointer to the buffer object 90 * @gpu_addr: MMIO gpu addr 91 */ 92 struct dm_compressor_info { 93 void *cpu_addr; 94 struct amdgpu_bo *bo_ptr; 95 uint64_t gpu_addr; 96 }; 97 98 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); 99 100 /** 101 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ 102 * 103 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq 104 * @dmub_notify: notification for callback function 105 * @adev: amdgpu_device pointer 106 */ 107 struct dmub_hpd_work { 108 struct work_struct handle_hpd_work; 109 struct dmub_notification *dmub_notify; 110 struct amdgpu_device *adev; 111 }; 112 113 /** 114 * struct vblank_control_work - Work data for vblank control 115 * @work: Kernel work data for the work event 116 * @dm: amdgpu display manager device 117 * @acrtc: amdgpu CRTC instance for which the event has occurred 118 * @stream: DC stream for which the event has occurred 119 * @enable: true if enabling vblank 120 */ 121 struct vblank_control_work { 122 struct work_struct work; 123 struct amdgpu_display_manager *dm; 124 struct amdgpu_crtc *acrtc; 125 struct dc_stream_state *stream; 126 bool enable; 127 }; 128 129 /** 130 * struct amdgpu_dm_backlight_caps - Information about backlight 131 * 132 * Describe the backlight support for ACPI or eDP AUX. 133 */ 134 struct amdgpu_dm_backlight_caps { 135 /** 136 * @ext_caps: Keep the data struct with all the information about the 137 * display support for HDR. 138 */ 139 union dpcd_sink_ext_caps *ext_caps; 140 /** 141 * @aux_min_input_signal: Min brightness value supported by the display 142 */ 143 u32 aux_min_input_signal; 144 /** 145 * @aux_max_input_signal: Max brightness value supported by the display 146 * in nits. 147 */ 148 u32 aux_max_input_signal; 149 /** 150 * @min_input_signal: minimum possible input in range 0-255. 151 */ 152 int min_input_signal; 153 /** 154 * @max_input_signal: maximum possible input in range 0-255. 155 */ 156 int max_input_signal; 157 /** 158 * @caps_valid: true if these values are from the ACPI interface. 159 */ 160 bool caps_valid; 161 /** 162 * @aux_support: Describes if the display supports AUX backlight. 163 */ 164 bool aux_support; 165 }; 166 167 /** 168 * struct dal_allocation - Tracks mapped FB memory for SMU communication 169 * @list: list of dal allocations 170 * @bo: GPU buffer object 171 * @cpu_ptr: CPU virtual address of the GPU buffer object 172 * @gpu_addr: GPU virtual address of the GPU buffer object 173 */ 174 struct dal_allocation { 175 struct list_head list; 176 struct amdgpu_bo *bo; 177 void *cpu_ptr; 178 u64 gpu_addr; 179 }; 180 181 /** 182 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq 183 * offload work 184 */ 185 struct hpd_rx_irq_offload_work_queue { 186 /** 187 * @wq: workqueue structure to queue offload work. 188 */ 189 struct workqueue_struct *wq; 190 /** 191 * @offload_lock: To protect fields of offload work queue. 192 */ 193 spinlock_t offload_lock; 194 /** 195 * @is_handling_link_loss: Used to prevent inserting link loss event when 196 * we're handling link loss 197 */ 198 bool is_handling_link_loss; 199 /** 200 * @aconnector: The aconnector that this work queue is attached to 201 */ 202 struct amdgpu_dm_connector *aconnector; 203 }; 204 205 /** 206 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure 207 */ 208 struct hpd_rx_irq_offload_work { 209 /** 210 * @work: offload work 211 */ 212 struct work_struct work; 213 /** 214 * @data: reference irq data which is used while handling offload work 215 */ 216 union hpd_irq_data data; 217 /** 218 * @offload_wq: offload work queue that this work is queued to 219 */ 220 struct hpd_rx_irq_offload_work_queue *offload_wq; 221 }; 222 223 /** 224 * struct amdgpu_display_manager - Central amdgpu display manager device 225 * 226 * @dc: Display Core control structure 227 * @adev: AMDGPU base driver structure 228 * @ddev: DRM base driver structure 229 * @display_indexes_num: Max number of display streams supported 230 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables 231 * @backlight_dev: Backlight control device 232 * @backlight_link: Link on which to control backlight 233 * @backlight_caps: Capabilities of the backlight device 234 * @freesync_module: Module handling freesync calculations 235 * @hdcp_workqueue: AMDGPU content protection queue 236 * @fw_dmcu: Reference to DMCU firmware 237 * @dmcu_fw_version: Version of the DMCU firmware 238 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW 239 * @cached_state: Caches device atomic state for suspend/resume 240 * @cached_dc_state: Cached state of content streams 241 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info 242 * @force_timing_sync: set via debugfs. When set, indicates that all connected 243 * displays will be forced to synchronize. 244 * @dmcub_trace_event_en: enable dmcub trace events 245 * @dmub_outbox_params: DMUB Outbox parameters 246 * @num_of_edps: number of backlight eDPs 247 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the 248 * driver when true 249 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB 250 * transfers are done 251 * @delayed_hpd_wq: work queue used to delay DMUB HPD work 252 */ 253 struct amdgpu_display_manager { 254 255 struct dc *dc; 256 257 /** 258 * @dmub_srv: 259 * 260 * DMUB service, used for controlling the DMUB on hardware 261 * that supports it. The pointer to the dmub_srv will be 262 * NULL on hardware that does not support it. 263 */ 264 struct dmub_srv *dmub_srv; 265 266 /** 267 * @dmub_notify: 268 * 269 * Notification from DMUB. 270 */ 271 272 struct dmub_notification *dmub_notify; 273 274 /** 275 * @dmub_callback: 276 * 277 * Callback functions to handle notification from DMUB. 278 */ 279 280 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; 281 282 /** 283 * @dmub_thread_offload: 284 * 285 * Flag to indicate if callback is offload. 286 */ 287 288 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; 289 290 /** 291 * @dmub_fb_info: 292 * 293 * Framebuffer regions for the DMUB. 294 */ 295 struct dmub_srv_fb_info *dmub_fb_info; 296 297 /** 298 * @dmub_fw: 299 * 300 * DMUB firmware, required on hardware that has DMUB support. 301 */ 302 const struct firmware *dmub_fw; 303 304 /** 305 * @dmub_bo: 306 * 307 * Buffer object for the DMUB. 308 */ 309 struct amdgpu_bo *dmub_bo; 310 311 /** 312 * @dmub_bo_gpu_addr: 313 * 314 * GPU virtual address for the DMUB buffer object. 315 */ 316 u64 dmub_bo_gpu_addr; 317 318 /** 319 * @dmub_bo_cpu_addr: 320 * 321 * CPU address for the DMUB buffer object. 322 */ 323 void *dmub_bo_cpu_addr; 324 325 /** 326 * @dmcub_fw_version: 327 * 328 * DMCUB firmware version. 329 */ 330 uint32_t dmcub_fw_version; 331 332 /** 333 * @cgs_device: 334 * 335 * The Common Graphics Services device. It provides an interface for 336 * accessing registers. 337 */ 338 struct cgs_device *cgs_device; 339 340 struct amdgpu_device *adev; 341 struct drm_device *ddev; 342 u16 display_indexes_num; 343 344 /** 345 * @atomic_obj: 346 * 347 * In combination with &dm_atomic_state it helps manage 348 * global atomic state that doesn't map cleanly into existing 349 * drm resources, like &dc_context. 350 */ 351 struct drm_private_obj atomic_obj; 352 353 /** 354 * @dc_lock: 355 * 356 * Guards access to DC functions that can issue register write 357 * sequences. 358 */ 359 struct mutex dc_lock; 360 361 /** 362 * @audio_lock: 363 * 364 * Guards access to audio instance changes. 365 */ 366 struct mutex audio_lock; 367 368 /** 369 * @vblank_lock: 370 * 371 * Guards access to deferred vblank work state. 372 */ 373 spinlock_t vblank_lock; 374 375 /** 376 * @audio_component: 377 * 378 * Used to notify ELD changes to sound driver. 379 */ 380 struct drm_audio_component *audio_component; 381 382 /** 383 * @audio_registered: 384 * 385 * True if the audio component has been registered 386 * successfully, false otherwise. 387 */ 388 bool audio_registered; 389 390 /** 391 * @irq_handler_list_low_tab: 392 * 393 * Low priority IRQ handler table. 394 * 395 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ 396 * source. Low priority IRQ handlers are deferred to a workqueue to be 397 * processed. Hence, they can sleep. 398 * 399 * Note that handlers are called in the same order as they were 400 * registered (FIFO). 401 */ 402 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; 403 404 /** 405 * @irq_handler_list_high_tab: 406 * 407 * High priority IRQ handler table. 408 * 409 * It is a n*m table, same as &irq_handler_list_low_tab. However, 410 * handlers in this table are not deferred and are called immediately. 411 */ 412 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; 413 414 /** 415 * @pflip_params: 416 * 417 * Page flip IRQ parameters, passed to registered handlers when 418 * triggered. 419 */ 420 struct common_irq_params 421 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; 422 423 /** 424 * @vblank_params: 425 * 426 * Vertical blanking IRQ parameters, passed to registered handlers when 427 * triggered. 428 */ 429 struct common_irq_params 430 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; 431 432 /** 433 * @vline0_params: 434 * 435 * OTG vertical interrupt0 IRQ parameters, passed to registered 436 * handlers when triggered. 437 */ 438 struct common_irq_params 439 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; 440 441 /** 442 * @vupdate_params: 443 * 444 * Vertical update IRQ parameters, passed to registered handlers when 445 * triggered. 446 */ 447 struct common_irq_params 448 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; 449 450 /** 451 * @dmub_trace_params: 452 * 453 * DMUB trace event IRQ parameters, passed to registered handlers when 454 * triggered. 455 */ 456 struct common_irq_params 457 dmub_trace_params[1]; 458 459 struct common_irq_params 460 dmub_outbox_params[1]; 461 462 spinlock_t irq_handler_list_table_lock; 463 464 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP]; 465 466 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP]; 467 468 uint8_t num_of_edps; 469 470 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; 471 472 struct mod_freesync *freesync_module; 473 #ifdef CONFIG_DRM_AMD_DC_HDCP 474 struct hdcp_workqueue *hdcp_workqueue; 475 #endif 476 477 /** 478 * @vblank_control_workqueue: 479 * 480 * Deferred work for vblank control events. 481 */ 482 struct workqueue_struct *vblank_control_workqueue; 483 484 struct drm_atomic_state *cached_state; 485 struct dc_state *cached_dc_state; 486 487 struct dm_compressor_info compressor; 488 489 const struct firmware *fw_dmcu; 490 uint32_t dmcu_fw_version; 491 /** 492 * @soc_bounding_box: 493 * 494 * gpu_info FW provided soc bounding box struct or 0 if not 495 * available in FW 496 */ 497 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 498 499 /** 500 * @active_vblank_irq_count: 501 * 502 * number of currently active vblank irqs 503 */ 504 uint32_t active_vblank_irq_count; 505 506 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 507 /** 508 * @crc_rd_wrk: 509 * 510 * Work to be executed in a separate thread to communicate with PSP. 511 */ 512 struct crc_rd_work *crc_rd_wrk; 513 #endif 514 /** 515 * @hpd_rx_offload_wq: 516 * 517 * Work queue to offload works of hpd_rx_irq 518 */ 519 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; 520 /** 521 * @mst_encoders: 522 * 523 * fake encoders used for DP MST. 524 */ 525 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 526 bool force_timing_sync; 527 bool disable_hpd_irq; 528 bool dmcub_trace_event_en; 529 /** 530 * @da_list: 531 * 532 * DAL fb memory allocation list, for communication with SMU. 533 */ 534 struct list_head da_list; 535 struct completion dmub_aux_transfer_done; 536 struct workqueue_struct *delayed_hpd_wq; 537 538 /** 539 * @brightness: 540 * 541 * cached backlight values. 542 */ 543 u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; 544 /** 545 * @actual_brightness: 546 * 547 * last successfully applied backlight values. 548 */ 549 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; 550 551 /** 552 * @aux_hpd_discon_quirk: 553 * 554 * quirk for hpd discon while aux is on-going. 555 * occurred on certain intel platform 556 */ 557 bool aux_hpd_discon_quirk; 558 }; 559 560 enum dsc_clock_force_state { 561 DSC_CLK_FORCE_DEFAULT = 0, 562 DSC_CLK_FORCE_ENABLE, 563 DSC_CLK_FORCE_DISABLE, 564 }; 565 566 struct dsc_preferred_settings { 567 enum dsc_clock_force_state dsc_force_enable; 568 uint32_t dsc_num_slices_v; 569 uint32_t dsc_num_slices_h; 570 uint32_t dsc_bits_per_pixel; 571 bool dsc_force_disable_passthrough; 572 }; 573 574 enum mst_progress_status { 575 MST_STATUS_DEFAULT = 0, 576 MST_PROBE = BIT(0), 577 MST_REMOTE_EDID = BIT(1), 578 MST_ALLOCATE_NEW_PAYLOAD = BIT(2), 579 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), 580 }; 581 582 struct amdgpu_dm_connector { 583 584 struct drm_connector base; 585 uint32_t connector_id; 586 587 /* we need to mind the EDID between detect 588 and get modes due to analog/digital/tvencoder */ 589 struct edid *edid; 590 591 /* shared with amdgpu */ 592 struct amdgpu_hpd hpd; 593 594 /* number of modes generated from EDID at 'dc_sink' */ 595 int num_modes; 596 597 /* The 'old' sink - before an HPD. 598 * The 'current' sink is in dc_link->sink. */ 599 struct dc_sink *dc_sink; 600 struct dc_link *dc_link; 601 602 /** 603 * @dc_em_sink: Reference to the emulated (virtual) sink. 604 */ 605 struct dc_sink *dc_em_sink; 606 607 /* DM only */ 608 struct drm_dp_mst_topology_mgr mst_mgr; 609 struct amdgpu_dm_dp_aux dm_dp_aux; 610 struct drm_dp_mst_port *port; 611 struct amdgpu_dm_connector *mst_port; 612 struct drm_dp_aux *dsc_aux; 613 /* TODO see if we can merge with ddc_bus or make a dm_connector */ 614 struct amdgpu_i2c_adapter *i2c; 615 616 /* Monitor range limits */ 617 /** 618 * @min_vfreq: Minimal frequency supported by the display in Hz. This 619 * value is set to zero when there is no FreeSync support. 620 */ 621 int min_vfreq; 622 623 /** 624 * @max_vfreq: Maximum frequency supported by the display in Hz. This 625 * value is set to zero when there is no FreeSync support. 626 */ 627 int max_vfreq ; 628 int pixel_clock_mhz; 629 630 /* Audio instance - protected by audio_lock. */ 631 int audio_inst; 632 633 struct mutex hpd_lock; 634 635 bool fake_enable; 636 #ifdef CONFIG_DEBUG_FS 637 uint32_t debugfs_dpcd_address; 638 uint32_t debugfs_dpcd_size; 639 #endif 640 bool force_yuv420_output; 641 struct dsc_preferred_settings dsc_settings; 642 union dp_downstream_port_present mst_downstream_port_present; 643 /* Cached display modes */ 644 struct drm_display_mode freesync_vid_base; 645 646 int psr_skip_count; 647 648 /* Record progress status of mst*/ 649 uint8_t mst_status; 650 }; 651 652 static inline void amdgpu_dm_set_mst_status(uint8_t *status, 653 uint8_t flags, bool set) 654 { 655 if (set) 656 *status |= flags; 657 else 658 *status &= ~flags; 659 } 660 661 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 662 663 extern const struct amdgpu_ip_block_version dm_ip_block; 664 665 struct dm_plane_state { 666 struct drm_plane_state base; 667 struct dc_plane_state *dc_state; 668 }; 669 670 struct dm_crtc_state { 671 struct drm_crtc_state base; 672 struct dc_stream_state *stream; 673 674 bool cm_has_degamma; 675 bool cm_is_degamma_srgb; 676 677 bool mpo_requested; 678 679 int update_type; 680 int active_planes; 681 682 int crc_skip_count; 683 684 bool freesync_vrr_info_changed; 685 686 bool dsc_force_changed; 687 bool vrr_supported; 688 struct mod_freesync_config freesync_config; 689 struct dc_info_packet vrr_infopacket; 690 691 int abm_level; 692 }; 693 694 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 695 696 struct dm_atomic_state { 697 struct drm_private_state base; 698 699 struct dc_state *context; 700 }; 701 702 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base) 703 704 struct dm_connector_state { 705 struct drm_connector_state base; 706 707 enum amdgpu_rmx_type scaling; 708 uint8_t underscan_vborder; 709 uint8_t underscan_hborder; 710 bool underscan_enable; 711 bool freesync_capable; 712 #ifdef CONFIG_DRM_AMD_DC_HDCP 713 bool update_hdcp; 714 #endif 715 uint8_t abm_level; 716 int vcpi_slots; 717 uint64_t pbn; 718 }; 719 720 /** 721 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info 722 * 723 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this 724 * struct is useful to keep track of the display-specific information about 725 * FreeSync. 726 */ 727 struct amdgpu_hdmi_vsdb_info { 728 /** 729 * @amd_vsdb_version: Vendor Specific Data Block Version, should be 730 * used to determine which Vendor Specific InfoFrame (VSIF) to send. 731 */ 732 unsigned int amd_vsdb_version; 733 734 /** 735 * @freesync_supported: FreeSync Supported. 736 */ 737 bool freesync_supported; 738 739 /** 740 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. 741 */ 742 unsigned int min_refresh_rate_hz; 743 744 /** 745 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz 746 */ 747 unsigned int max_refresh_rate_hz; 748 }; 749 750 751 #define to_dm_connector_state(x)\ 752 container_of((x), struct dm_connector_state, base) 753 754 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); 755 struct drm_connector_state * 756 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector); 757 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 758 struct drm_connector_state *state, 759 struct drm_property *property, 760 uint64_t val); 761 762 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 763 const struct drm_connector_state *state, 764 struct drm_property *property, 765 uint64_t *val); 766 767 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); 768 769 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 770 struct amdgpu_dm_connector *aconnector, 771 int connector_type, 772 struct dc_link *link, 773 int link_index); 774 775 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 776 struct drm_display_mode *mode); 777 778 void dm_restore_drm_connector_state(struct drm_device *dev, 779 struct drm_connector *connector); 780 781 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 782 struct edid *edid); 783 784 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); 785 786 #define MAX_COLOR_LUT_ENTRIES 4096 787 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ 788 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 789 790 void amdgpu_dm_init_color_mod(void); 791 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); 792 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); 793 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, 794 struct dc_plane_state *dc_plane_state); 795 796 void amdgpu_dm_update_connector_after_detect( 797 struct amdgpu_dm_connector *aconnector); 798 799 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 800 801 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, 802 struct dc_context *ctx, unsigned int link_index, 803 void *payload, void *operation_result); 804 805 bool check_seamless_boot_capability(struct amdgpu_device *adev); 806 807 struct dc_stream_state * 808 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 809 const struct drm_display_mode *drm_mode, 810 const struct dm_connector_state *dm_state, 811 const struct dc_stream_state *old_stream); 812 813 int dm_atomic_get_state(struct drm_atomic_state *state, 814 struct dm_atomic_state **dm_state); 815 816 struct amdgpu_dm_connector * 817 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 818 struct drm_crtc *crtc); 819 820 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); 821 #endif /* __AMDGPU_DM_H__ */ 822