1 /* 2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #ifndef __AMDGPU_DM_H__ 27 #define __AMDGPU_DM_H__ 28 29 #include <drm/display/drm_dp_mst_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_connector.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_plane.h> 34 35 /* 36 * This file contains the definition for amdgpu_display_manager 37 * and its API for amdgpu driver's use. 38 * This component provides all the display related functionality 39 * and this is the only component that calls DAL API. 40 * The API contained here intended for amdgpu driver use. 41 * The API that is called directly from KMS framework is located 42 * in amdgpu_dm_kms.h file 43 */ 44 45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 46 47 #define AMDGPU_DM_MAX_CRTC 6 48 49 #define AMDGPU_DM_MAX_NUM_EDP 2 50 51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5 52 53 /* 54 * DMUB Async to Sync Mechanism Status 55 */ 56 #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 57 #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 58 #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 59 /* 60 #include "include/amdgpu_dal_power_if.h" 61 #include "amdgpu_dm_irq.h" 62 */ 63 64 #include "irq_types.h" 65 #include "signal_types.h" 66 #include "amdgpu_dm_crc.h" 67 struct aux_payload; 68 enum aux_return_code_type; 69 70 /* Forward declarations */ 71 struct amdgpu_device; 72 struct amdgpu_crtc; 73 struct drm_device; 74 struct dc; 75 struct amdgpu_bo; 76 struct dmub_srv; 77 struct dc_plane_state; 78 struct dmub_notification; 79 80 struct common_irq_params { 81 struct amdgpu_device *adev; 82 enum dc_irq_source irq_src; 83 atomic64_t previous_timestamp; 84 }; 85 86 /** 87 * struct dm_compressor_info - Buffer info used by frame buffer compression 88 * @cpu_addr: MMIO cpu addr 89 * @bo_ptr: Pointer to the buffer object 90 * @gpu_addr: MMIO gpu addr 91 */ 92 struct dm_compressor_info { 93 void *cpu_addr; 94 struct amdgpu_bo *bo_ptr; 95 uint64_t gpu_addr; 96 }; 97 98 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); 99 100 /** 101 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ 102 * 103 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq 104 * @dmub_notify: notification for callback function 105 * @adev: amdgpu_device pointer 106 */ 107 struct dmub_hpd_work { 108 struct work_struct handle_hpd_work; 109 struct dmub_notification *dmub_notify; 110 struct amdgpu_device *adev; 111 }; 112 113 /** 114 * struct vblank_control_work - Work data for vblank control 115 * @work: Kernel work data for the work event 116 * @dm: amdgpu display manager device 117 * @acrtc: amdgpu CRTC instance for which the event has occurred 118 * @stream: DC stream for which the event has occurred 119 * @enable: true if enabling vblank 120 */ 121 struct vblank_control_work { 122 struct work_struct work; 123 struct amdgpu_display_manager *dm; 124 struct amdgpu_crtc *acrtc; 125 struct dc_stream_state *stream; 126 bool enable; 127 }; 128 129 /** 130 * struct amdgpu_dm_backlight_caps - Information about backlight 131 * 132 * Describe the backlight support for ACPI or eDP AUX. 133 */ 134 struct amdgpu_dm_backlight_caps { 135 /** 136 * @ext_caps: Keep the data struct with all the information about the 137 * display support for HDR. 138 */ 139 union dpcd_sink_ext_caps *ext_caps; 140 /** 141 * @aux_min_input_signal: Min brightness value supported by the display 142 */ 143 u32 aux_min_input_signal; 144 /** 145 * @aux_max_input_signal: Max brightness value supported by the display 146 * in nits. 147 */ 148 u32 aux_max_input_signal; 149 /** 150 * @min_input_signal: minimum possible input in range 0-255. 151 */ 152 int min_input_signal; 153 /** 154 * @max_input_signal: maximum possible input in range 0-255. 155 */ 156 int max_input_signal; 157 /** 158 * @caps_valid: true if these values are from the ACPI interface. 159 */ 160 bool caps_valid; 161 /** 162 * @aux_support: Describes if the display supports AUX backlight. 163 */ 164 bool aux_support; 165 }; 166 167 /** 168 * struct dal_allocation - Tracks mapped FB memory for SMU communication 169 * @list: list of dal allocations 170 * @bo: GPU buffer object 171 * @cpu_ptr: CPU virtual address of the GPU buffer object 172 * @gpu_addr: GPU virtual address of the GPU buffer object 173 */ 174 struct dal_allocation { 175 struct list_head list; 176 struct amdgpu_bo *bo; 177 void *cpu_ptr; 178 u64 gpu_addr; 179 }; 180 181 /** 182 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq 183 * offload work 184 */ 185 struct hpd_rx_irq_offload_work_queue { 186 /** 187 * @wq: workqueue structure to queue offload work. 188 */ 189 struct workqueue_struct *wq; 190 /** 191 * @offload_lock: To protect fields of offload work queue. 192 */ 193 spinlock_t offload_lock; 194 /** 195 * @is_handling_link_loss: Used to prevent inserting link loss event when 196 * we're handling link loss 197 */ 198 bool is_handling_link_loss; 199 /** 200 * @aconnector: The aconnector that this work queue is attached to 201 */ 202 struct amdgpu_dm_connector *aconnector; 203 }; 204 205 /** 206 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure 207 */ 208 struct hpd_rx_irq_offload_work { 209 /** 210 * @work: offload work 211 */ 212 struct work_struct work; 213 /** 214 * @data: reference irq data which is used while handling offload work 215 */ 216 union hpd_irq_data data; 217 /** 218 * @offload_wq: offload work queue that this work is queued to 219 */ 220 struct hpd_rx_irq_offload_work_queue *offload_wq; 221 }; 222 223 /** 224 * struct amdgpu_display_manager - Central amdgpu display manager device 225 * 226 * @dc: Display Core control structure 227 * @adev: AMDGPU base driver structure 228 * @ddev: DRM base driver structure 229 * @display_indexes_num: Max number of display streams supported 230 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables 231 * @backlight_dev: Backlight control device 232 * @backlight_link: Link on which to control backlight 233 * @backlight_caps: Capabilities of the backlight device 234 * @freesync_module: Module handling freesync calculations 235 * @hdcp_workqueue: AMDGPU content protection queue 236 * @fw_dmcu: Reference to DMCU firmware 237 * @dmcu_fw_version: Version of the DMCU firmware 238 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW 239 * @cached_state: Caches device atomic state for suspend/resume 240 * @cached_dc_state: Cached state of content streams 241 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info 242 * @force_timing_sync: set via debugfs. When set, indicates that all connected 243 * displays will be forced to synchronize. 244 * @dmcub_trace_event_en: enable dmcub trace events 245 * @dmub_outbox_params: DMUB Outbox parameters 246 * @num_of_edps: number of backlight eDPs 247 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the 248 * driver when true 249 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB 250 * transfers are done 251 * @delayed_hpd_wq: work queue used to delay DMUB HPD work 252 */ 253 struct amdgpu_display_manager { 254 255 struct dc *dc; 256 257 /** 258 * @dmub_srv: 259 * 260 * DMUB service, used for controlling the DMUB on hardware 261 * that supports it. The pointer to the dmub_srv will be 262 * NULL on hardware that does not support it. 263 */ 264 struct dmub_srv *dmub_srv; 265 266 /** 267 * @dmub_notify: 268 * 269 * Notification from DMUB. 270 */ 271 272 struct dmub_notification *dmub_notify; 273 274 /** 275 * @dmub_callback: 276 * 277 * Callback functions to handle notification from DMUB. 278 */ 279 280 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; 281 282 /** 283 * @dmub_thread_offload: 284 * 285 * Flag to indicate if callback is offload. 286 */ 287 288 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; 289 290 /** 291 * @dmub_fb_info: 292 * 293 * Framebuffer regions for the DMUB. 294 */ 295 struct dmub_srv_fb_info *dmub_fb_info; 296 297 /** 298 * @dmub_fw: 299 * 300 * DMUB firmware, required on hardware that has DMUB support. 301 */ 302 const struct firmware *dmub_fw; 303 304 /** 305 * @dmub_bo: 306 * 307 * Buffer object for the DMUB. 308 */ 309 struct amdgpu_bo *dmub_bo; 310 311 /** 312 * @dmub_bo_gpu_addr: 313 * 314 * GPU virtual address for the DMUB buffer object. 315 */ 316 u64 dmub_bo_gpu_addr; 317 318 /** 319 * @dmub_bo_cpu_addr: 320 * 321 * CPU address for the DMUB buffer object. 322 */ 323 void *dmub_bo_cpu_addr; 324 325 /** 326 * @dmcub_fw_version: 327 * 328 * DMCUB firmware version. 329 */ 330 uint32_t dmcub_fw_version; 331 332 /** 333 * @cgs_device: 334 * 335 * The Common Graphics Services device. It provides an interface for 336 * accessing registers. 337 */ 338 struct cgs_device *cgs_device; 339 340 struct amdgpu_device *adev; 341 struct drm_device *ddev; 342 u16 display_indexes_num; 343 344 /** 345 * @atomic_obj: 346 * 347 * In combination with &dm_atomic_state it helps manage 348 * global atomic state that doesn't map cleanly into existing 349 * drm resources, like &dc_context. 350 */ 351 struct drm_private_obj atomic_obj; 352 353 /** 354 * @dc_lock: 355 * 356 * Guards access to DC functions that can issue register write 357 * sequences. 358 */ 359 struct mutex dc_lock; 360 361 /** 362 * @audio_lock: 363 * 364 * Guards access to audio instance changes. 365 */ 366 struct mutex audio_lock; 367 368 /** 369 * @audio_component: 370 * 371 * Used to notify ELD changes to sound driver. 372 */ 373 struct drm_audio_component *audio_component; 374 375 /** 376 * @audio_registered: 377 * 378 * True if the audio component has been registered 379 * successfully, false otherwise. 380 */ 381 bool audio_registered; 382 383 /** 384 * @irq_handler_list_low_tab: 385 * 386 * Low priority IRQ handler table. 387 * 388 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ 389 * source. Low priority IRQ handlers are deferred to a workqueue to be 390 * processed. Hence, they can sleep. 391 * 392 * Note that handlers are called in the same order as they were 393 * registered (FIFO). 394 */ 395 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; 396 397 /** 398 * @irq_handler_list_high_tab: 399 * 400 * High priority IRQ handler table. 401 * 402 * It is a n*m table, same as &irq_handler_list_low_tab. However, 403 * handlers in this table are not deferred and are called immediately. 404 */ 405 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; 406 407 /** 408 * @pflip_params: 409 * 410 * Page flip IRQ parameters, passed to registered handlers when 411 * triggered. 412 */ 413 struct common_irq_params 414 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; 415 416 /** 417 * @vblank_params: 418 * 419 * Vertical blanking IRQ parameters, passed to registered handlers when 420 * triggered. 421 */ 422 struct common_irq_params 423 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; 424 425 /** 426 * @vline0_params: 427 * 428 * OTG vertical interrupt0 IRQ parameters, passed to registered 429 * handlers when triggered. 430 */ 431 struct common_irq_params 432 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; 433 434 /** 435 * @vupdate_params: 436 * 437 * Vertical update IRQ parameters, passed to registered handlers when 438 * triggered. 439 */ 440 struct common_irq_params 441 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; 442 443 /** 444 * @dmub_trace_params: 445 * 446 * DMUB trace event IRQ parameters, passed to registered handlers when 447 * triggered. 448 */ 449 struct common_irq_params 450 dmub_trace_params[1]; 451 452 struct common_irq_params 453 dmub_outbox_params[1]; 454 455 spinlock_t irq_handler_list_table_lock; 456 457 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP]; 458 459 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP]; 460 461 uint8_t num_of_edps; 462 463 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; 464 465 struct mod_freesync *freesync_module; 466 #ifdef CONFIG_DRM_AMD_DC_HDCP 467 struct hdcp_workqueue *hdcp_workqueue; 468 #endif 469 470 /** 471 * @vblank_control_workqueue: 472 * 473 * Deferred work for vblank control events. 474 */ 475 struct workqueue_struct *vblank_control_workqueue; 476 477 struct drm_atomic_state *cached_state; 478 struct dc_state *cached_dc_state; 479 480 struct dm_compressor_info compressor; 481 482 const struct firmware *fw_dmcu; 483 uint32_t dmcu_fw_version; 484 /** 485 * @soc_bounding_box: 486 * 487 * gpu_info FW provided soc bounding box struct or 0 if not 488 * available in FW 489 */ 490 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 491 492 /** 493 * @active_vblank_irq_count: 494 * 495 * number of currently active vblank irqs 496 */ 497 uint32_t active_vblank_irq_count; 498 499 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 500 /** 501 * @crc_rd_wrk: 502 * 503 * Work to be executed in a separate thread to communicate with PSP. 504 */ 505 struct crc_rd_work *crc_rd_wrk; 506 #endif 507 /** 508 * @hpd_rx_offload_wq: 509 * 510 * Work queue to offload works of hpd_rx_irq 511 */ 512 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; 513 /** 514 * @mst_encoders: 515 * 516 * fake encoders used for DP MST. 517 */ 518 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 519 bool force_timing_sync; 520 bool disable_hpd_irq; 521 bool dmcub_trace_event_en; 522 /** 523 * @da_list: 524 * 525 * DAL fb memory allocation list, for communication with SMU. 526 */ 527 struct list_head da_list; 528 struct completion dmub_aux_transfer_done; 529 struct workqueue_struct *delayed_hpd_wq; 530 531 /** 532 * @brightness: 533 * 534 * cached backlight values. 535 */ 536 u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; 537 /** 538 * @actual_brightness: 539 * 540 * last successfully applied backlight values. 541 */ 542 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; 543 544 /** 545 * @aux_hpd_discon_quirk: 546 * 547 * quirk for hpd discon while aux is on-going. 548 * occurred on certain intel platform 549 */ 550 bool aux_hpd_discon_quirk; 551 }; 552 553 enum dsc_clock_force_state { 554 DSC_CLK_FORCE_DEFAULT = 0, 555 DSC_CLK_FORCE_ENABLE, 556 DSC_CLK_FORCE_DISABLE, 557 }; 558 559 struct dsc_preferred_settings { 560 enum dsc_clock_force_state dsc_force_enable; 561 uint32_t dsc_num_slices_v; 562 uint32_t dsc_num_slices_h; 563 uint32_t dsc_bits_per_pixel; 564 bool dsc_force_disable_passthrough; 565 }; 566 567 enum mst_progress_status { 568 MST_STATUS_DEFAULT = 0, 569 MST_PROBE = BIT(0), 570 MST_REMOTE_EDID = BIT(1), 571 MST_ALLOCATE_NEW_PAYLOAD = BIT(2), 572 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), 573 }; 574 575 struct amdgpu_dm_connector { 576 577 struct drm_connector base; 578 uint32_t connector_id; 579 580 /* we need to mind the EDID between detect 581 and get modes due to analog/digital/tvencoder */ 582 struct edid *edid; 583 584 /* shared with amdgpu */ 585 struct amdgpu_hpd hpd; 586 587 /* number of modes generated from EDID at 'dc_sink' */ 588 int num_modes; 589 590 /* The 'old' sink - before an HPD. 591 * The 'current' sink is in dc_link->sink. */ 592 struct dc_sink *dc_sink; 593 struct dc_link *dc_link; 594 595 /** 596 * @dc_em_sink: Reference to the emulated (virtual) sink. 597 */ 598 struct dc_sink *dc_em_sink; 599 600 /* DM only */ 601 struct drm_dp_mst_topology_mgr mst_mgr; 602 struct amdgpu_dm_dp_aux dm_dp_aux; 603 struct drm_dp_mst_port *port; 604 struct amdgpu_dm_connector *mst_port; 605 struct drm_dp_aux *dsc_aux; 606 /* TODO see if we can merge with ddc_bus or make a dm_connector */ 607 struct amdgpu_i2c_adapter *i2c; 608 609 /* Monitor range limits */ 610 /** 611 * @min_vfreq: Minimal frequency supported by the display in Hz. This 612 * value is set to zero when there is no FreeSync support. 613 */ 614 int min_vfreq; 615 616 /** 617 * @max_vfreq: Maximum frequency supported by the display in Hz. This 618 * value is set to zero when there is no FreeSync support. 619 */ 620 int max_vfreq ; 621 int pixel_clock_mhz; 622 623 /* Audio instance - protected by audio_lock. */ 624 int audio_inst; 625 626 struct mutex hpd_lock; 627 628 bool fake_enable; 629 #ifdef CONFIG_DEBUG_FS 630 uint32_t debugfs_dpcd_address; 631 uint32_t debugfs_dpcd_size; 632 #endif 633 bool force_yuv420_output; 634 struct dsc_preferred_settings dsc_settings; 635 union dp_downstream_port_present mst_downstream_port_present; 636 /* Cached display modes */ 637 struct drm_display_mode freesync_vid_base; 638 639 int psr_skip_count; 640 641 /* Record progress status of mst*/ 642 uint8_t mst_status; 643 }; 644 645 static inline void amdgpu_dm_set_mst_status(uint8_t *status, 646 uint8_t flags, bool set) 647 { 648 if (set) 649 *status |= flags; 650 else 651 *status &= ~flags; 652 } 653 654 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) 655 656 extern const struct amdgpu_ip_block_version dm_ip_block; 657 658 struct dm_plane_state { 659 struct drm_plane_state base; 660 struct dc_plane_state *dc_state; 661 }; 662 663 struct dm_crtc_state { 664 struct drm_crtc_state base; 665 struct dc_stream_state *stream; 666 667 bool cm_has_degamma; 668 bool cm_is_degamma_srgb; 669 670 bool mpo_requested; 671 672 int update_type; 673 int active_planes; 674 675 int crc_skip_count; 676 677 bool freesync_vrr_info_changed; 678 679 bool dsc_force_changed; 680 bool vrr_supported; 681 struct mod_freesync_config freesync_config; 682 struct dc_info_packet vrr_infopacket; 683 684 int abm_level; 685 }; 686 687 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 688 689 struct dm_atomic_state { 690 struct drm_private_state base; 691 692 struct dc_state *context; 693 }; 694 695 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base) 696 697 struct dm_connector_state { 698 struct drm_connector_state base; 699 700 enum amdgpu_rmx_type scaling; 701 uint8_t underscan_vborder; 702 uint8_t underscan_hborder; 703 bool underscan_enable; 704 bool freesync_capable; 705 #ifdef CONFIG_DRM_AMD_DC_HDCP 706 bool update_hdcp; 707 #endif 708 uint8_t abm_level; 709 int vcpi_slots; 710 uint64_t pbn; 711 }; 712 713 /** 714 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info 715 * 716 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this 717 * struct is useful to keep track of the display-specific information about 718 * FreeSync. 719 */ 720 struct amdgpu_hdmi_vsdb_info { 721 /** 722 * @amd_vsdb_version: Vendor Specific Data Block Version, should be 723 * used to determine which Vendor Specific InfoFrame (VSIF) to send. 724 */ 725 unsigned int amd_vsdb_version; 726 727 /** 728 * @freesync_supported: FreeSync Supported. 729 */ 730 bool freesync_supported; 731 732 /** 733 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. 734 */ 735 unsigned int min_refresh_rate_hz; 736 737 /** 738 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz 739 */ 740 unsigned int max_refresh_rate_hz; 741 }; 742 743 744 #define to_dm_connector_state(x)\ 745 container_of((x), struct dm_connector_state, base) 746 747 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); 748 struct drm_connector_state * 749 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector); 750 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 751 struct drm_connector_state *state, 752 struct drm_property *property, 753 uint64_t val); 754 755 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 756 const struct drm_connector_state *state, 757 struct drm_property *property, 758 uint64_t *val); 759 760 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); 761 762 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 763 struct amdgpu_dm_connector *aconnector, 764 int connector_type, 765 struct dc_link *link, 766 int link_index); 767 768 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 769 struct drm_display_mode *mode); 770 771 void dm_restore_drm_connector_state(struct drm_device *dev, 772 struct drm_connector *connector); 773 774 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 775 struct edid *edid); 776 777 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); 778 779 #define MAX_COLOR_LUT_ENTRIES 4096 780 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ 781 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 782 783 void amdgpu_dm_init_color_mod(void); 784 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); 785 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); 786 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, 787 struct dc_plane_state *dc_plane_state); 788 789 void amdgpu_dm_update_connector_after_detect( 790 struct amdgpu_dm_connector *aconnector); 791 792 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 793 794 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, 795 struct dc_context *ctx, unsigned int link_index, 796 void *payload, void *operation_result); 797 798 bool check_seamless_boot_capability(struct amdgpu_device *adev); 799 800 struct dc_stream_state * 801 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 802 const struct drm_display_mode *drm_mode, 803 const struct dm_connector_state *dm_state, 804 const struct dc_stream_state *old_stream); 805 806 int dm_atomic_get_state(struct drm_atomic_state *state, 807 struct dm_atomic_state **dm_state); 808 809 struct amdgpu_dm_connector * 810 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 811 struct drm_crtc *crtc); 812 813 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); 814 #endif /* __AMDGPU_DM_H__ */ 815