1 /*
2  * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #ifndef __AMDGPU_DM_H__
27 #define __AMDGPU_DM_H__
28 
29 #include <drm/display/drm_dp_mst_helper.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_connector.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_plane.h>
34 
35 /*
36  * This file contains the definition for amdgpu_display_manager
37  * and its API for amdgpu driver's use.
38  * This component provides all the display related functionality
39  * and this is the only component that calls DAL API.
40  * The API contained here intended for amdgpu driver use.
41  * The API that is called directly from KMS framework is located
42  * in amdgpu_dm_kms.h file
43  */
44 
45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
46 
47 #define AMDGPU_DM_MAX_CRTC 6
48 
49 #define AMDGPU_DM_MAX_NUM_EDP 2
50 
51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
52 
53 /*
54 #include "include/amdgpu_dal_power_if.h"
55 #include "amdgpu_dm_irq.h"
56 */
57 
58 #include "irq_types.h"
59 #include "signal_types.h"
60 #include "amdgpu_dm_crc.h"
61 struct aux_payload;
62 struct set_config_cmd_payload;
63 enum aux_return_code_type;
64 enum set_config_status;
65 
66 /* Forward declarations */
67 struct amdgpu_device;
68 struct amdgpu_crtc;
69 struct drm_device;
70 struct dc;
71 struct amdgpu_bo;
72 struct dmub_srv;
73 struct dc_plane_state;
74 struct dmub_notification;
75 
76 struct common_irq_params {
77 	struct amdgpu_device *adev;
78 	enum dc_irq_source irq_src;
79 	atomic64_t previous_timestamp;
80 };
81 
82 /**
83  * struct dm_compressor_info - Buffer info used by frame buffer compression
84  * @cpu_addr: MMIO cpu addr
85  * @bo_ptr: Pointer to the buffer object
86  * @gpu_addr: MMIO gpu addr
87  */
88 struct dm_compressor_info {
89 	void *cpu_addr;
90 	struct amdgpu_bo *bo_ptr;
91 	uint64_t gpu_addr;
92 };
93 
94 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
95 
96 /**
97  * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
98  *
99  * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
100  * @dmub_notify:  notification for callback function
101  * @adev: amdgpu_device pointer
102  */
103 struct dmub_hpd_work {
104 	struct work_struct handle_hpd_work;
105 	struct dmub_notification *dmub_notify;
106 	struct amdgpu_device *adev;
107 };
108 
109 /**
110  * struct vblank_control_work - Work data for vblank control
111  * @work: Kernel work data for the work event
112  * @dm: amdgpu display manager device
113  * @acrtc: amdgpu CRTC instance for which the event has occurred
114  * @stream: DC stream for which the event has occurred
115  * @enable: true if enabling vblank
116  */
117 struct vblank_control_work {
118 	struct work_struct work;
119 	struct amdgpu_display_manager *dm;
120 	struct amdgpu_crtc *acrtc;
121 	struct dc_stream_state *stream;
122 	bool enable;
123 };
124 
125 /**
126  * struct amdgpu_dm_backlight_caps - Information about backlight
127  *
128  * Describe the backlight support for ACPI or eDP AUX.
129  */
130 struct amdgpu_dm_backlight_caps {
131 	/**
132 	 * @ext_caps: Keep the data struct with all the information about the
133 	 * display support for HDR.
134 	 */
135 	union dpcd_sink_ext_caps *ext_caps;
136 	/**
137 	 * @aux_min_input_signal: Min brightness value supported by the display
138 	 */
139 	u32 aux_min_input_signal;
140 	/**
141 	 * @aux_max_input_signal: Max brightness value supported by the display
142 	 * in nits.
143 	 */
144 	u32 aux_max_input_signal;
145 	/**
146 	 * @min_input_signal: minimum possible input in range 0-255.
147 	 */
148 	int min_input_signal;
149 	/**
150 	 * @max_input_signal: maximum possible input in range 0-255.
151 	 */
152 	int max_input_signal;
153 	/**
154 	 * @caps_valid: true if these values are from the ACPI interface.
155 	 */
156 	bool caps_valid;
157 	/**
158 	 * @aux_support: Describes if the display supports AUX backlight.
159 	 */
160 	bool aux_support;
161 };
162 
163 /**
164  * struct dal_allocation - Tracks mapped FB memory for SMU communication
165  * @list: list of dal allocations
166  * @bo: GPU buffer object
167  * @cpu_ptr: CPU virtual address of the GPU buffer object
168  * @gpu_addr: GPU virtual address of the GPU buffer object
169  */
170 struct dal_allocation {
171 	struct list_head list;
172 	struct amdgpu_bo *bo;
173 	void *cpu_ptr;
174 	u64 gpu_addr;
175 };
176 
177 /**
178  * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
179  * offload work
180  */
181 struct hpd_rx_irq_offload_work_queue {
182 	/**
183 	 * @wq: workqueue structure to queue offload work.
184 	 */
185 	struct workqueue_struct *wq;
186 	/**
187 	 * @offload_lock: To protect fields of offload work queue.
188 	 */
189 	spinlock_t offload_lock;
190 	/**
191 	 * @is_handling_link_loss: Used to prevent inserting link loss event when
192 	 * we're handling link loss
193 	 */
194 	bool is_handling_link_loss;
195 	/**
196 	 * @aconnector: The aconnector that this work queue is attached to
197 	 */
198 	struct amdgpu_dm_connector *aconnector;
199 };
200 
201 /**
202  * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
203  */
204 struct hpd_rx_irq_offload_work {
205 	/**
206 	 * @work: offload work
207 	 */
208 	struct work_struct work;
209 	/**
210 	 * @data: reference irq data which is used while handling offload work
211 	 */
212 	union hpd_irq_data data;
213 	/**
214 	 * @offload_wq: offload work queue that this work is queued to
215 	 */
216 	struct hpd_rx_irq_offload_work_queue *offload_wq;
217 };
218 
219 /**
220  * struct amdgpu_display_manager - Central amdgpu display manager device
221  *
222  * @dc: Display Core control structure
223  * @adev: AMDGPU base driver structure
224  * @ddev: DRM base driver structure
225  * @display_indexes_num: Max number of display streams supported
226  * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
227  * @backlight_dev: Backlight control device
228  * @backlight_link: Link on which to control backlight
229  * @backlight_caps: Capabilities of the backlight device
230  * @freesync_module: Module handling freesync calculations
231  * @hdcp_workqueue: AMDGPU content protection queue
232  * @fw_dmcu: Reference to DMCU firmware
233  * @dmcu_fw_version: Version of the DMCU firmware
234  * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
235  * @cached_state: Caches device atomic state for suspend/resume
236  * @cached_dc_state: Cached state of content streams
237  * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
238  * @force_timing_sync: set via debugfs. When set, indicates that all connected
239  *		       displays will be forced to synchronize.
240  * @dmcub_trace_event_en: enable dmcub trace events
241  * @dmub_outbox_params: DMUB Outbox parameters
242  * @num_of_edps: number of backlight eDPs
243  * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the
244  *		     driver when true
245  * @dmub_aux_transfer_done: struct completion used to indicate when DMUB
246  * 			    transfers are done
247  * @delayed_hpd_wq: work queue used to delay DMUB HPD work
248  */
249 struct amdgpu_display_manager {
250 
251 	struct dc *dc;
252 
253 	/**
254 	 * @dmub_srv:
255 	 *
256 	 * DMUB service, used for controlling the DMUB on hardware
257 	 * that supports it. The pointer to the dmub_srv will be
258 	 * NULL on hardware that does not support it.
259 	 */
260 	struct dmub_srv *dmub_srv;
261 
262 	/**
263 	 * @dmub_notify:
264 	 *
265 	 * Notification from DMUB.
266 	 */
267 
268 	struct dmub_notification *dmub_notify;
269 
270 	/**
271 	 * @dmub_callback:
272 	 *
273 	 * Callback functions to handle notification from DMUB.
274 	 */
275 
276 	dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
277 
278 	/**
279 	 * @dmub_thread_offload:
280 	 *
281 	 * Flag to indicate if callback is offload.
282 	 */
283 
284 	bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
285 
286 	/**
287 	 * @dmub_fb_info:
288 	 *
289 	 * Framebuffer regions for the DMUB.
290 	 */
291 	struct dmub_srv_fb_info *dmub_fb_info;
292 
293 	/**
294 	 * @dmub_fw:
295 	 *
296 	 * DMUB firmware, required on hardware that has DMUB support.
297 	 */
298 	const struct firmware *dmub_fw;
299 
300 	/**
301 	 * @dmub_bo:
302 	 *
303 	 * Buffer object for the DMUB.
304 	 */
305 	struct amdgpu_bo *dmub_bo;
306 
307 	/**
308 	 * @dmub_bo_gpu_addr:
309 	 *
310 	 * GPU virtual address for the DMUB buffer object.
311 	 */
312 	u64 dmub_bo_gpu_addr;
313 
314 	/**
315 	 * @dmub_bo_cpu_addr:
316 	 *
317 	 * CPU address for the DMUB buffer object.
318 	 */
319 	void *dmub_bo_cpu_addr;
320 
321 	/**
322 	 * @dmcub_fw_version:
323 	 *
324 	 * DMCUB firmware version.
325 	 */
326 	uint32_t dmcub_fw_version;
327 
328 	/**
329 	 * @cgs_device:
330 	 *
331 	 * The Common Graphics Services device. It provides an interface for
332 	 * accessing registers.
333 	 */
334 	struct cgs_device *cgs_device;
335 
336 	struct amdgpu_device *adev;
337 	struct drm_device *ddev;
338 	u16 display_indexes_num;
339 
340 	/**
341 	 * @atomic_obj:
342 	 *
343 	 * In combination with &dm_atomic_state it helps manage
344 	 * global atomic state that doesn't map cleanly into existing
345 	 * drm resources, like &dc_context.
346 	 */
347 	struct drm_private_obj atomic_obj;
348 
349 	/**
350 	 * @dc_lock:
351 	 *
352 	 * Guards access to DC functions that can issue register write
353 	 * sequences.
354 	 */
355 	struct mutex dc_lock;
356 
357 	/**
358 	 * @audio_lock:
359 	 *
360 	 * Guards access to audio instance changes.
361 	 */
362 	struct mutex audio_lock;
363 
364 	/**
365 	 * @audio_component:
366 	 *
367 	 * Used to notify ELD changes to sound driver.
368 	 */
369 	struct drm_audio_component *audio_component;
370 
371 	/**
372 	 * @audio_registered:
373 	 *
374 	 * True if the audio component has been registered
375 	 * successfully, false otherwise.
376 	 */
377 	bool audio_registered;
378 
379 	/**
380 	 * @irq_handler_list_low_tab:
381 	 *
382 	 * Low priority IRQ handler table.
383 	 *
384 	 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
385 	 * source. Low priority IRQ handlers are deferred to a workqueue to be
386 	 * processed. Hence, they can sleep.
387 	 *
388 	 * Note that handlers are called in the same order as they were
389 	 * registered (FIFO).
390 	 */
391 	struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
392 
393 	/**
394 	 * @irq_handler_list_high_tab:
395 	 *
396 	 * High priority IRQ handler table.
397 	 *
398 	 * It is a n*m table, same as &irq_handler_list_low_tab. However,
399 	 * handlers in this table are not deferred and are called immediately.
400 	 */
401 	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
402 
403 	/**
404 	 * @pflip_params:
405 	 *
406 	 * Page flip IRQ parameters, passed to registered handlers when
407 	 * triggered.
408 	 */
409 	struct common_irq_params
410 	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
411 
412 	/**
413 	 * @vblank_params:
414 	 *
415 	 * Vertical blanking IRQ parameters, passed to registered handlers when
416 	 * triggered.
417 	 */
418 	struct common_irq_params
419 	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
420 
421 	/**
422 	 * @vline0_params:
423 	 *
424 	 * OTG vertical interrupt0 IRQ parameters, passed to registered
425 	 * handlers when triggered.
426 	 */
427 	struct common_irq_params
428 	vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
429 
430 	/**
431 	 * @vupdate_params:
432 	 *
433 	 * Vertical update IRQ parameters, passed to registered handlers when
434 	 * triggered.
435 	 */
436 	struct common_irq_params
437 	vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
438 
439 	/**
440 	 * @dmub_trace_params:
441 	 *
442 	 * DMUB trace event IRQ parameters, passed to registered handlers when
443 	 * triggered.
444 	 */
445 	struct common_irq_params
446 	dmub_trace_params[1];
447 
448 	struct common_irq_params
449 	dmub_outbox_params[1];
450 
451 	spinlock_t irq_handler_list_table_lock;
452 
453 	struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
454 
455 	const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
456 
457 	uint8_t num_of_edps;
458 
459 	struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
460 
461 	struct mod_freesync *freesync_module;
462 #ifdef CONFIG_DRM_AMD_DC_HDCP
463 	struct hdcp_workqueue *hdcp_workqueue;
464 #endif
465 
466 	/**
467 	 * @vblank_control_workqueue:
468 	 *
469 	 * Deferred work for vblank control events.
470 	 */
471 	struct workqueue_struct *vblank_control_workqueue;
472 
473 	struct drm_atomic_state *cached_state;
474 	struct dc_state *cached_dc_state;
475 
476 	struct dm_compressor_info compressor;
477 
478 	const struct firmware *fw_dmcu;
479 	uint32_t dmcu_fw_version;
480 	/**
481 	 * @soc_bounding_box:
482 	 *
483 	 * gpu_info FW provided soc bounding box struct or 0 if not
484 	 * available in FW
485 	 */
486 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
487 
488 	/**
489 	 * @active_vblank_irq_count:
490 	 *
491 	 * number of currently active vblank irqs
492 	 */
493 	uint32_t active_vblank_irq_count;
494 
495 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
496 	/**
497 	 * @crc_rd_wrk:
498 	 *
499 	 * Work to be executed in a separate thread to communicate with PSP.
500 	 */
501 	struct crc_rd_work *crc_rd_wrk;
502 #endif
503 	/**
504 	 * @hpd_rx_offload_wq:
505 	 *
506 	 * Work queue to offload works of hpd_rx_irq
507 	 */
508 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
509 	/**
510 	 * @mst_encoders:
511 	 *
512 	 * fake encoders used for DP MST.
513 	 */
514 	struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
515 	bool force_timing_sync;
516 	bool disable_hpd_irq;
517 	bool dmcub_trace_event_en;
518 	/**
519 	 * @da_list:
520 	 *
521 	 * DAL fb memory allocation list, for communication with SMU.
522 	 */
523 	struct list_head da_list;
524 	struct completion dmub_aux_transfer_done;
525 	struct workqueue_struct *delayed_hpd_wq;
526 
527 	/**
528 	 * @brightness:
529 	 *
530 	 * cached backlight values.
531 	 */
532 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
533 	/**
534 	 * @actual_brightness:
535 	 *
536 	 * last successfully applied backlight values.
537 	 */
538 	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
539 
540 	/**
541 	 * @aux_hpd_discon_quirk:
542 	 *
543 	 * quirk for hpd discon while aux is on-going.
544 	 * occurred on certain intel platform
545 	 */
546 	bool aux_hpd_discon_quirk;
547 
548 	/**
549 	 * @dpia_aux_lock:
550 	 *
551 	 * Guards access to DPIA AUX
552 	 */
553 	struct mutex dpia_aux_lock;
554 };
555 
556 enum dsc_clock_force_state {
557 	DSC_CLK_FORCE_DEFAULT = 0,
558 	DSC_CLK_FORCE_ENABLE,
559 	DSC_CLK_FORCE_DISABLE,
560 };
561 
562 struct dsc_preferred_settings {
563 	enum dsc_clock_force_state dsc_force_enable;
564 	uint32_t dsc_num_slices_v;
565 	uint32_t dsc_num_slices_h;
566 	uint32_t dsc_bits_per_pixel;
567 	bool dsc_force_disable_passthrough;
568 };
569 
570 enum mst_progress_status {
571 	MST_STATUS_DEFAULT = 0,
572 	MST_PROBE = BIT(0),
573 	MST_REMOTE_EDID = BIT(1),
574 	MST_ALLOCATE_NEW_PAYLOAD = BIT(2),
575 	MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
576 };
577 
578 struct amdgpu_dm_connector {
579 
580 	struct drm_connector base;
581 	uint32_t connector_id;
582 
583 	/* we need to mind the EDID between detect
584 	   and get modes due to analog/digital/tvencoder */
585 	struct edid *edid;
586 
587 	/* shared with amdgpu */
588 	struct amdgpu_hpd hpd;
589 
590 	/* number of modes generated from EDID at 'dc_sink' */
591 	int num_modes;
592 
593 	/* The 'old' sink - before an HPD.
594 	 * The 'current' sink is in dc_link->sink. */
595 	struct dc_sink *dc_sink;
596 	struct dc_link *dc_link;
597 
598 	/**
599 	 * @dc_em_sink: Reference to the emulated (virtual) sink.
600 	 */
601 	struct dc_sink *dc_em_sink;
602 
603 	/* DM only */
604 	struct drm_dp_mst_topology_mgr mst_mgr;
605 	struct amdgpu_dm_dp_aux dm_dp_aux;
606 	struct drm_dp_mst_port *port;
607 	struct amdgpu_dm_connector *mst_port;
608 	struct drm_dp_aux *dsc_aux;
609 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
610 	struct amdgpu_i2c_adapter *i2c;
611 
612 	/* Monitor range limits */
613 	/**
614 	 * @min_vfreq: Minimal frequency supported by the display in Hz. This
615 	 * value is set to zero when there is no FreeSync support.
616 	 */
617 	int min_vfreq;
618 
619 	/**
620 	 * @max_vfreq: Maximum frequency supported by the display in Hz. This
621 	 * value is set to zero when there is no FreeSync support.
622 	 */
623 	int max_vfreq ;
624 	int pixel_clock_mhz;
625 
626 	/* Audio instance - protected by audio_lock. */
627 	int audio_inst;
628 
629 	struct mutex hpd_lock;
630 
631 	bool fake_enable;
632 #ifdef CONFIG_DEBUG_FS
633 	uint32_t debugfs_dpcd_address;
634 	uint32_t debugfs_dpcd_size;
635 #endif
636 	bool force_yuv420_output;
637 	struct dsc_preferred_settings dsc_settings;
638 	union dp_downstream_port_present mst_downstream_port_present;
639 	/* Cached display modes */
640 	struct drm_display_mode freesync_vid_base;
641 
642 	int psr_skip_count;
643 
644 	/* Record progress status of mst*/
645 	uint8_t mst_status;
646 };
647 
648 static inline void amdgpu_dm_set_mst_status(uint8_t *status,
649 		uint8_t flags, bool set)
650 {
651 	if (set)
652 		*status |= flags;
653 	else
654 		*status &= ~flags;
655 }
656 
657 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
658 
659 extern const struct amdgpu_ip_block_version dm_ip_block;
660 
661 struct dm_plane_state {
662 	struct drm_plane_state base;
663 	struct dc_plane_state *dc_state;
664 };
665 
666 struct dm_crtc_state {
667 	struct drm_crtc_state base;
668 	struct dc_stream_state *stream;
669 
670 	bool cm_has_degamma;
671 	bool cm_is_degamma_srgb;
672 
673 	bool mpo_requested;
674 
675 	int update_type;
676 	int active_planes;
677 
678 	int crc_skip_count;
679 
680 	bool freesync_vrr_info_changed;
681 
682 	bool dsc_force_changed;
683 	bool vrr_supported;
684 	struct mod_freesync_config freesync_config;
685 	struct dc_info_packet vrr_infopacket;
686 
687 	int abm_level;
688 };
689 
690 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
691 
692 struct dm_atomic_state {
693 	struct drm_private_state base;
694 
695 	struct dc_state *context;
696 };
697 
698 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
699 
700 struct dm_connector_state {
701 	struct drm_connector_state base;
702 
703 	enum amdgpu_rmx_type scaling;
704 	uint8_t underscan_vborder;
705 	uint8_t underscan_hborder;
706 	bool underscan_enable;
707 	bool freesync_capable;
708 #ifdef CONFIG_DRM_AMD_DC_HDCP
709 	bool update_hdcp;
710 #endif
711 	uint8_t abm_level;
712 	int vcpi_slots;
713 	uint64_t pbn;
714 };
715 
716 /**
717  * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
718  *
719  * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
720  * struct is useful to keep track of the display-specific information about
721  * FreeSync.
722  */
723 struct amdgpu_hdmi_vsdb_info {
724 	/**
725 	 * @amd_vsdb_version: Vendor Specific Data Block Version, should be
726 	 * used to determine which Vendor Specific InfoFrame (VSIF) to send.
727 	 */
728 	unsigned int amd_vsdb_version;
729 
730 	/**
731 	 * @freesync_supported: FreeSync Supported.
732 	 */
733 	bool freesync_supported;
734 
735 	/**
736 	 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
737 	 */
738 	unsigned int min_refresh_rate_hz;
739 
740 	/**
741 	 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
742 	 */
743 	unsigned int max_refresh_rate_hz;
744 };
745 
746 
747 #define to_dm_connector_state(x)\
748 	container_of((x), struct dm_connector_state, base)
749 
750 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
751 struct drm_connector_state *
752 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
753 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
754 					    struct drm_connector_state *state,
755 					    struct drm_property *property,
756 					    uint64_t val);
757 
758 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
759 					    const struct drm_connector_state *state,
760 					    struct drm_property *property,
761 					    uint64_t *val);
762 
763 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
764 
765 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
766 				     struct amdgpu_dm_connector *aconnector,
767 				     int connector_type,
768 				     struct dc_link *link,
769 				     int link_index);
770 
771 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
772 				   struct drm_display_mode *mode);
773 
774 void dm_restore_drm_connector_state(struct drm_device *dev,
775 				    struct drm_connector *connector);
776 
777 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
778 					struct edid *edid);
779 
780 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
781 
782 #define MAX_COLOR_LUT_ENTRIES 4096
783 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */
784 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
785 
786 void amdgpu_dm_init_color_mod(void);
787 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
788 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
789 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
790 				      struct dc_plane_state *dc_plane_state);
791 
792 void amdgpu_dm_update_connector_after_detect(
793 		struct amdgpu_dm_connector *aconnector);
794 
795 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
796 
797 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
798 					struct aux_payload *payload, enum aux_return_code_type *operation_result);
799 
800 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
801 					struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
802 
803 bool check_seamless_boot_capability(struct amdgpu_device *adev);
804 
805 struct dc_stream_state *
806 	create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
807 					const struct drm_display_mode *drm_mode,
808 					const struct dm_connector_state *dm_state,
809 					const struct dc_stream_state *old_stream);
810 
811 int dm_atomic_get_state(struct drm_atomic_state *state,
812 			struct dm_atomic_state **dm_state);
813 
814 struct amdgpu_dm_connector *
815 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
816 					     struct drm_crtc *crtc);
817 
818 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
819 #endif /* __AMDGPU_DM_H__ */
820