xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision c79fe9b436690209954f908a41b19e0bf575877a)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39 
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51 
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59 
60 #include "ivsrcid/ivsrcid_vislands30.h"
61 
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 
109 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117 
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120 
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130 
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 				  struct drm_atomic_state *state);
203 
204 static void handle_cursor_update(struct drm_plane *plane,
205 				 struct drm_plane_state *old_plane_state);
206 
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215 
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 				 struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234 	if (crtc >= adev->mode_info.num_crtc)
235 		return 0;
236 	else {
237 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 
239 		if (acrtc->dm_irq_params.stream == NULL) {
240 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 				  crtc);
242 			return 0;
243 		}
244 
245 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 	}
247 }
248 
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 				  u32 *vbl, u32 *position)
251 {
252 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 
254 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 		return -EINVAL;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream ==  NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		/*
266 		 * TODO rework base driver to use values directly.
267 		 * for now parse it back into reg-format
268 		 */
269 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 					 &v_blank_start,
271 					 &v_blank_end,
272 					 &h_position,
273 					 &v_position);
274 
275 		*position = v_position | (h_position << 16);
276 		*vbl = v_blank_start | (v_blank_end << 16);
277 	}
278 
279 	return 0;
280 }
281 
282 static bool dm_is_idle(void *handle)
283 {
284 	/* XXX todo */
285 	return true;
286 }
287 
288 static int dm_wait_for_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return 0;
292 }
293 
294 static bool dm_check_soft_reset(void *handle)
295 {
296 	return false;
297 }
298 
299 static int dm_soft_reset(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 		     int otg_inst)
308 {
309 	struct drm_device *dev = adev_to_drm(adev);
310 	struct drm_crtc *crtc;
311 	struct amdgpu_crtc *amdgpu_crtc;
312 
313 	if (otg_inst == -1) {
314 		WARN_ON(1);
315 		return adev->mode_info.crtcs[0];
316 	}
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	if (!e)
396 		WARN_ON(1);
397 
398 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 
400 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 	if (!vrr_active ||
402 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 				      &v_blank_end, &hpos, &vpos) ||
404 	    (vpos < v_blank_start)) {
405 		/* Update to correct count and vblank timestamp if racing with
406 		 * vblank irq. This also updates to the correct vblank timestamp
407 		 * even in VRR mode, as scanout is past the front-porch atm.
408 		 */
409 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 
411 		/* Wake up userspace by sending the pageflip event with proper
412 		 * count and timestamp of vblank of flip completion.
413 		 */
414 		if (e) {
415 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 
417 			/* Event sent, so done with vblank for this flip */
418 			drm_crtc_vblank_put(&amdgpu_crtc->base);
419 		}
420 	} else if (e) {
421 		/* VRR active and inside front-porch: vblank count and
422 		 * timestamp for pageflip event will only be up to date after
423 		 * drm_crtc_handle_vblank() has been executed from late vblank
424 		 * irq handler after start of back-porch (vline 0). We queue the
425 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 		 * updated timestamp and count, once it runs after us.
427 		 *
428 		 * We need to open-code this instead of using the helper
429 		 * drm_crtc_arm_vblank_event(), as that helper would
430 		 * call drm_crtc_accurate_vblank_count(), which we must
431 		 * not call in VRR mode while we are in front-porch!
432 		 */
433 
434 		/* sequence will be replaced by real count during send-out. */
435 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 		e->pipe = amdgpu_crtc->crtc_id;
437 
438 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439 		e = NULL;
440 	}
441 
442 	/* Keep track of vblank of this flip for flip throttling. We use the
443 	 * cooked hw counter, as that one incremented at start of this vblank
444 	 * of pageflip completion, so last_flip_vblank is the forbidden count
445 	 * for queueing new pageflips if vsync + VRR is enabled.
446 	 */
447 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 
450 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 
453 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
455 			 vrr_active, (int) !e);
456 }
457 
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460 	struct common_irq_params *irq_params = interrupt_params;
461 	struct amdgpu_device *adev = irq_params->adev;
462 	struct amdgpu_crtc *acrtc;
463 	unsigned long flags;
464 	int vrr_active;
465 
466 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
467 
468 	if (acrtc) {
469 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
470 
471 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
472 			      acrtc->crtc_id,
473 			      vrr_active);
474 
475 		/* Core vblank handling is done here after end of front-porch in
476 		 * vrr mode, as vblank timestamping will give valid results
477 		 * while now done after front-porch. This will also deliver
478 		 * page-flip completion events that have been queued to us
479 		 * if a pageflip happened inside front-porch.
480 		 */
481 		if (vrr_active) {
482 			drm_crtc_handle_vblank(&acrtc->base);
483 
484 			/* BTR processing for pre-DCE12 ASICs */
485 			if (acrtc->dm_irq_params.stream &&
486 			    adev->family < AMDGPU_FAMILY_AI) {
487 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
488 				mod_freesync_handle_v_update(
489 				    adev->dm.freesync_module,
490 				    acrtc->dm_irq_params.stream,
491 				    &acrtc->dm_irq_params.vrr_params);
492 
493 				dc_stream_adjust_vmin_vmax(
494 				    adev->dm.dc,
495 				    acrtc->dm_irq_params.stream,
496 				    &acrtc->dm_irq_params.vrr_params.adjust);
497 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
498 			}
499 		}
500 	}
501 }
502 
503 /**
504  * dm_crtc_high_irq() - Handles CRTC interrupt
505  * @interrupt_params: used for determining the CRTC instance
506  *
507  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
508  * event handler.
509  */
510 static void dm_crtc_high_irq(void *interrupt_params)
511 {
512 	struct common_irq_params *irq_params = interrupt_params;
513 	struct amdgpu_device *adev = irq_params->adev;
514 	struct amdgpu_crtc *acrtc;
515 	unsigned long flags;
516 	int vrr_active;
517 
518 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
519 	if (!acrtc)
520 		return;
521 
522 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
523 
524 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
525 		      vrr_active, acrtc->dm_irq_params.active_planes);
526 
527 	/**
528 	 * Core vblank handling at start of front-porch is only possible
529 	 * in non-vrr mode, as only there vblank timestamping will give
530 	 * valid results while done in front-porch. Otherwise defer it
531 	 * to dm_vupdate_high_irq after end of front-porch.
532 	 */
533 	if (!vrr_active)
534 		drm_crtc_handle_vblank(&acrtc->base);
535 
536 	/**
537 	 * Following stuff must happen at start of vblank, for crc
538 	 * computation and below-the-range btr support in vrr mode.
539 	 */
540 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
541 
542 	/* BTR updates need to happen before VUPDATE on Vega and above. */
543 	if (adev->family < AMDGPU_FAMILY_AI)
544 		return;
545 
546 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
547 
548 	if (acrtc->dm_irq_params.stream &&
549 	    acrtc->dm_irq_params.vrr_params.supported &&
550 	    acrtc->dm_irq_params.freesync_config.state ==
551 		    VRR_STATE_ACTIVE_VARIABLE) {
552 		mod_freesync_handle_v_update(adev->dm.freesync_module,
553 					     acrtc->dm_irq_params.stream,
554 					     &acrtc->dm_irq_params.vrr_params);
555 
556 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
557 					   &acrtc->dm_irq_params.vrr_params.adjust);
558 	}
559 
560 	/*
561 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
562 	 * In that case, pageflip completion interrupts won't fire and pageflip
563 	 * completion events won't get delivered. Prevent this by sending
564 	 * pending pageflip events from here if a flip is still pending.
565 	 *
566 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
567 	 * avoid race conditions between flip programming and completion,
568 	 * which could cause too early flip completion events.
569 	 */
570 	if (adev->family >= AMDGPU_FAMILY_RV &&
571 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
572 	    acrtc->dm_irq_params.active_planes == 0) {
573 		if (acrtc->event) {
574 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
575 			acrtc->event = NULL;
576 			drm_crtc_vblank_put(&acrtc->base);
577 		}
578 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
579 	}
580 
581 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
582 }
583 
584 static int dm_set_clockgating_state(void *handle,
585 		  enum amd_clockgating_state state)
586 {
587 	return 0;
588 }
589 
590 static int dm_set_powergating_state(void *handle,
591 		  enum amd_powergating_state state)
592 {
593 	return 0;
594 }
595 
596 /* Prototypes of private functions */
597 static int dm_early_init(void* handle);
598 
599 /* Allocate memory for FBC compressed data  */
600 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
601 {
602 	struct drm_device *dev = connector->dev;
603 	struct amdgpu_device *adev = drm_to_adev(dev);
604 	struct dm_compressor_info *compressor = &adev->dm.compressor;
605 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
606 	struct drm_display_mode *mode;
607 	unsigned long max_size = 0;
608 
609 	if (adev->dm.dc->fbc_compressor == NULL)
610 		return;
611 
612 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
613 		return;
614 
615 	if (compressor->bo_ptr)
616 		return;
617 
618 
619 	list_for_each_entry(mode, &connector->modes, head) {
620 		if (max_size < mode->htotal * mode->vtotal)
621 			max_size = mode->htotal * mode->vtotal;
622 	}
623 
624 	if (max_size) {
625 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
626 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
627 			    &compressor->gpu_addr, &compressor->cpu_addr);
628 
629 		if (r)
630 			DRM_ERROR("DM: Failed to initialize FBC\n");
631 		else {
632 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
633 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
634 		}
635 
636 	}
637 
638 }
639 
640 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
641 					  int pipe, bool *enabled,
642 					  unsigned char *buf, int max_bytes)
643 {
644 	struct drm_device *dev = dev_get_drvdata(kdev);
645 	struct amdgpu_device *adev = drm_to_adev(dev);
646 	struct drm_connector *connector;
647 	struct drm_connector_list_iter conn_iter;
648 	struct amdgpu_dm_connector *aconnector;
649 	int ret = 0;
650 
651 	*enabled = false;
652 
653 	mutex_lock(&adev->dm.audio_lock);
654 
655 	drm_connector_list_iter_begin(dev, &conn_iter);
656 	drm_for_each_connector_iter(connector, &conn_iter) {
657 		aconnector = to_amdgpu_dm_connector(connector);
658 		if (aconnector->audio_inst != port)
659 			continue;
660 
661 		*enabled = true;
662 		ret = drm_eld_size(connector->eld);
663 		memcpy(buf, connector->eld, min(max_bytes, ret));
664 
665 		break;
666 	}
667 	drm_connector_list_iter_end(&conn_iter);
668 
669 	mutex_unlock(&adev->dm.audio_lock);
670 
671 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
672 
673 	return ret;
674 }
675 
676 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
677 	.get_eld = amdgpu_dm_audio_component_get_eld,
678 };
679 
680 static int amdgpu_dm_audio_component_bind(struct device *kdev,
681 				       struct device *hda_kdev, void *data)
682 {
683 	struct drm_device *dev = dev_get_drvdata(kdev);
684 	struct amdgpu_device *adev = drm_to_adev(dev);
685 	struct drm_audio_component *acomp = data;
686 
687 	acomp->ops = &amdgpu_dm_audio_component_ops;
688 	acomp->dev = kdev;
689 	adev->dm.audio_component = acomp;
690 
691 	return 0;
692 }
693 
694 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
695 					  struct device *hda_kdev, void *data)
696 {
697 	struct drm_device *dev = dev_get_drvdata(kdev);
698 	struct amdgpu_device *adev = drm_to_adev(dev);
699 	struct drm_audio_component *acomp = data;
700 
701 	acomp->ops = NULL;
702 	acomp->dev = NULL;
703 	adev->dm.audio_component = NULL;
704 }
705 
706 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
707 	.bind	= amdgpu_dm_audio_component_bind,
708 	.unbind	= amdgpu_dm_audio_component_unbind,
709 };
710 
711 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
712 {
713 	int i, ret;
714 
715 	if (!amdgpu_audio)
716 		return 0;
717 
718 	adev->mode_info.audio.enabled = true;
719 
720 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
721 
722 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
723 		adev->mode_info.audio.pin[i].channels = -1;
724 		adev->mode_info.audio.pin[i].rate = -1;
725 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
726 		adev->mode_info.audio.pin[i].status_bits = 0;
727 		adev->mode_info.audio.pin[i].category_code = 0;
728 		adev->mode_info.audio.pin[i].connected = false;
729 		adev->mode_info.audio.pin[i].id =
730 			adev->dm.dc->res_pool->audios[i]->inst;
731 		adev->mode_info.audio.pin[i].offset = 0;
732 	}
733 
734 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
735 	if (ret < 0)
736 		return ret;
737 
738 	adev->dm.audio_registered = true;
739 
740 	return 0;
741 }
742 
743 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
744 {
745 	if (!amdgpu_audio)
746 		return;
747 
748 	if (!adev->mode_info.audio.enabled)
749 		return;
750 
751 	if (adev->dm.audio_registered) {
752 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
753 		adev->dm.audio_registered = false;
754 	}
755 
756 	/* TODO: Disable audio? */
757 
758 	adev->mode_info.audio.enabled = false;
759 }
760 
761 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
762 {
763 	struct drm_audio_component *acomp = adev->dm.audio_component;
764 
765 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
766 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
767 
768 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
769 						 pin, -1);
770 	}
771 }
772 
773 static int dm_dmub_hw_init(struct amdgpu_device *adev)
774 {
775 	const struct dmcub_firmware_header_v1_0 *hdr;
776 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
777 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
778 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
779 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
780 	struct abm *abm = adev->dm.dc->res_pool->abm;
781 	struct dmub_srv_hw_params hw_params;
782 	enum dmub_status status;
783 	const unsigned char *fw_inst_const, *fw_bss_data;
784 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
785 	bool has_hw_support;
786 
787 	if (!dmub_srv)
788 		/* DMUB isn't supported on the ASIC. */
789 		return 0;
790 
791 	if (!fb_info) {
792 		DRM_ERROR("No framebuffer info for DMUB service.\n");
793 		return -EINVAL;
794 	}
795 
796 	if (!dmub_fw) {
797 		/* Firmware required for DMUB support. */
798 		DRM_ERROR("No firmware provided for DMUB.\n");
799 		return -EINVAL;
800 	}
801 
802 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
803 	if (status != DMUB_STATUS_OK) {
804 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
805 		return -EINVAL;
806 	}
807 
808 	if (!has_hw_support) {
809 		DRM_INFO("DMUB unsupported on ASIC\n");
810 		return 0;
811 	}
812 
813 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
814 
815 	fw_inst_const = dmub_fw->data +
816 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
817 			PSP_HEADER_BYTES;
818 
819 	fw_bss_data = dmub_fw->data +
820 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
821 		      le32_to_cpu(hdr->inst_const_bytes);
822 
823 	/* Copy firmware and bios info into FB memory. */
824 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
825 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
826 
827 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
828 
829 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
830 	 * amdgpu_ucode_init_single_fw will load dmub firmware
831 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
832 	 * will be done by dm_dmub_hw_init
833 	 */
834 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
835 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
836 				fw_inst_const_size);
837 	}
838 
839 	if (fw_bss_data_size)
840 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
841 		       fw_bss_data, fw_bss_data_size);
842 
843 	/* Copy firmware bios info into FB memory. */
844 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
845 	       adev->bios_size);
846 
847 	/* Reset regions that need to be reset. */
848 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
849 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
850 
851 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
852 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
853 
854 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
855 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
856 
857 	/* Initialize hardware. */
858 	memset(&hw_params, 0, sizeof(hw_params));
859 	hw_params.fb_base = adev->gmc.fb_start;
860 	hw_params.fb_offset = adev->gmc.aper_base;
861 
862 	/* backdoor load firmware and trigger dmub running */
863 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
864 		hw_params.load_inst_const = true;
865 
866 	if (dmcu)
867 		hw_params.psp_version = dmcu->psp_version;
868 
869 	for (i = 0; i < fb_info->num_fb; ++i)
870 		hw_params.fb[i] = &fb_info->fb[i];
871 
872 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
873 	if (status != DMUB_STATUS_OK) {
874 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
875 		return -EINVAL;
876 	}
877 
878 	/* Wait for firmware load to finish. */
879 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
880 	if (status != DMUB_STATUS_OK)
881 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
882 
883 	/* Init DMCU and ABM if available. */
884 	if (dmcu && abm) {
885 		dmcu->funcs->dmcu_init(dmcu);
886 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
887 	}
888 
889 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
890 	if (!adev->dm.dc->ctx->dmub_srv) {
891 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
892 		return -ENOMEM;
893 	}
894 
895 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
896 		 adev->dm.dmcub_fw_version);
897 
898 	return 0;
899 }
900 
901 #if defined(CONFIG_DRM_AMD_DC_DCN)
902 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
903 {
904 	uint64_t pt_base;
905 	uint32_t logical_addr_low;
906 	uint32_t logical_addr_high;
907 	uint32_t agp_base, agp_bot, agp_top;
908 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
909 
910 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
911 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
912 
913 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
914 		/*
915 		 * Raven2 has a HW issue that it is unable to use the vram which
916 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
917 		 * workaround that increase system aperture high address (add 1)
918 		 * to get rid of the VM fault and hardware hang.
919 		 */
920 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
921 	else
922 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
923 
924 	agp_base = 0;
925 	agp_bot = adev->gmc.agp_start >> 24;
926 	agp_top = adev->gmc.agp_end >> 24;
927 
928 
929 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
930 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
931 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
932 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
933 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
934 	page_table_base.low_part = lower_32_bits(pt_base);
935 
936 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
937 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
938 
939 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
940 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
941 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
942 
943 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
944 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
945 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
946 
947 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
948 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
949 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
950 
951 	pa_config->is_hvm_enabled = 0;
952 
953 }
954 #endif
955 #if defined(CONFIG_DRM_AMD_DC_DCN)
956 static void event_mall_stutter(struct work_struct *work)
957 {
958 
959 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
960 	struct amdgpu_display_manager *dm = vblank_work->dm;
961 
962 	mutex_lock(&dm->dc_lock);
963 
964 	if (vblank_work->enable)
965 		dm->active_vblank_irq_count++;
966 	else
967 		dm->active_vblank_irq_count--;
968 
969 
970 	dc_allow_idle_optimizations(
971 		dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
972 
973 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
974 
975 
976 	mutex_unlock(&dm->dc_lock);
977 }
978 
979 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
980 {
981 
982 	int max_caps = dc->caps.max_links;
983 	struct vblank_workqueue *vblank_work;
984 	int i = 0;
985 
986 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
987 	if (ZERO_OR_NULL_PTR(vblank_work)) {
988 		kfree(vblank_work);
989 		return NULL;
990 	}
991 
992 	for (i = 0; i < max_caps; i++)
993 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
994 
995 	return vblank_work;
996 }
997 #endif
998 static int amdgpu_dm_init(struct amdgpu_device *adev)
999 {
1000 	struct dc_init_data init_data;
1001 #ifdef CONFIG_DRM_AMD_DC_HDCP
1002 	struct dc_callback_init init_params;
1003 #endif
1004 	int r;
1005 
1006 	adev->dm.ddev = adev_to_drm(adev);
1007 	adev->dm.adev = adev;
1008 
1009 	/* Zero all the fields */
1010 	memset(&init_data, 0, sizeof(init_data));
1011 #ifdef CONFIG_DRM_AMD_DC_HDCP
1012 	memset(&init_params, 0, sizeof(init_params));
1013 #endif
1014 
1015 	mutex_init(&adev->dm.dc_lock);
1016 	mutex_init(&adev->dm.audio_lock);
1017 #if defined(CONFIG_DRM_AMD_DC_DCN)
1018 	spin_lock_init(&adev->dm.vblank_lock);
1019 #endif
1020 
1021 	if(amdgpu_dm_irq_init(adev)) {
1022 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1023 		goto error;
1024 	}
1025 
1026 	init_data.asic_id.chip_family = adev->family;
1027 
1028 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1029 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1030 
1031 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1032 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1033 	init_data.asic_id.atombios_base_address =
1034 		adev->mode_info.atom_context->bios;
1035 
1036 	init_data.driver = adev;
1037 
1038 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1039 
1040 	if (!adev->dm.cgs_device) {
1041 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1042 		goto error;
1043 	}
1044 
1045 	init_data.cgs_device = adev->dm.cgs_device;
1046 
1047 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1048 
1049 	switch (adev->asic_type) {
1050 	case CHIP_CARRIZO:
1051 	case CHIP_STONEY:
1052 	case CHIP_RAVEN:
1053 	case CHIP_RENOIR:
1054 		init_data.flags.gpu_vm_support = true;
1055 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1056 			init_data.flags.disable_dmcu = true;
1057 		break;
1058 #if defined(CONFIG_DRM_AMD_DC_DCN)
1059 	case CHIP_VANGOGH:
1060 		init_data.flags.gpu_vm_support = true;
1061 		break;
1062 #endif
1063 	default:
1064 		break;
1065 	}
1066 
1067 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1068 		init_data.flags.fbc_support = true;
1069 
1070 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1071 		init_data.flags.multi_mon_pp_mclk_switch = true;
1072 
1073 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1074 		init_data.flags.disable_fractional_pwm = true;
1075 
1076 	init_data.flags.power_down_display_on_boot = true;
1077 
1078 	/* Display Core create. */
1079 	adev->dm.dc = dc_create(&init_data);
1080 
1081 	if (adev->dm.dc) {
1082 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1083 	} else {
1084 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1085 		goto error;
1086 	}
1087 
1088 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1089 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1090 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1091 	}
1092 
1093 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1094 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1095 
1096 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1097 		adev->dm.dc->debug.disable_stutter = true;
1098 
1099 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1100 		adev->dm.dc->debug.disable_dsc = true;
1101 
1102 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1103 		adev->dm.dc->debug.disable_clock_gate = true;
1104 
1105 	r = dm_dmub_hw_init(adev);
1106 	if (r) {
1107 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1108 		goto error;
1109 	}
1110 
1111 	dc_hardware_init(adev->dm.dc);
1112 
1113 #if defined(CONFIG_DRM_AMD_DC_DCN)
1114 	if (adev->apu_flags) {
1115 		struct dc_phy_addr_space_config pa_config;
1116 
1117 		mmhub_read_system_context(adev, &pa_config);
1118 
1119 		// Call the DC init_memory func
1120 		dc_setup_system_context(adev->dm.dc, &pa_config);
1121 	}
1122 #endif
1123 
1124 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1125 	if (!adev->dm.freesync_module) {
1126 		DRM_ERROR(
1127 		"amdgpu: failed to initialize freesync_module.\n");
1128 	} else
1129 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1130 				adev->dm.freesync_module);
1131 
1132 	amdgpu_dm_init_color_mod();
1133 
1134 #if defined(CONFIG_DRM_AMD_DC_DCN)
1135 	if (adev->dm.dc->caps.max_links > 0) {
1136 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1137 
1138 		if (!adev->dm.vblank_workqueue)
1139 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1140 		else
1141 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1142 	}
1143 #endif
1144 
1145 #ifdef CONFIG_DRM_AMD_DC_HDCP
1146 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1147 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1148 
1149 		if (!adev->dm.hdcp_workqueue)
1150 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1151 		else
1152 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1153 
1154 		dc_init_callbacks(adev->dm.dc, &init_params);
1155 	}
1156 #endif
1157 	if (amdgpu_dm_initialize_drm_device(adev)) {
1158 		DRM_ERROR(
1159 		"amdgpu: failed to initialize sw for display support.\n");
1160 		goto error;
1161 	}
1162 
1163 	/* create fake encoders for MST */
1164 	dm_dp_create_fake_mst_encoders(adev);
1165 
1166 	/* TODO: Add_display_info? */
1167 
1168 	/* TODO use dynamic cursor width */
1169 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1170 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1171 
1172 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1173 		DRM_ERROR(
1174 		"amdgpu: failed to initialize sw for display support.\n");
1175 		goto error;
1176 	}
1177 
1178 
1179 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1180 
1181 	return 0;
1182 error:
1183 	amdgpu_dm_fini(adev);
1184 
1185 	return -EINVAL;
1186 }
1187 
1188 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1189 {
1190 	int i;
1191 
1192 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1193 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1194 	}
1195 
1196 	amdgpu_dm_audio_fini(adev);
1197 
1198 	amdgpu_dm_destroy_drm_device(&adev->dm);
1199 
1200 #ifdef CONFIG_DRM_AMD_DC_HDCP
1201 	if (adev->dm.hdcp_workqueue) {
1202 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1203 		adev->dm.hdcp_workqueue = NULL;
1204 	}
1205 
1206 	if (adev->dm.dc)
1207 		dc_deinit_callbacks(adev->dm.dc);
1208 #endif
1209 	if (adev->dm.dc->ctx->dmub_srv) {
1210 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1211 		adev->dm.dc->ctx->dmub_srv = NULL;
1212 	}
1213 
1214 	if (adev->dm.dmub_bo)
1215 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1216 				      &adev->dm.dmub_bo_gpu_addr,
1217 				      &adev->dm.dmub_bo_cpu_addr);
1218 
1219 	/* DC Destroy TODO: Replace destroy DAL */
1220 	if (adev->dm.dc)
1221 		dc_destroy(&adev->dm.dc);
1222 	/*
1223 	 * TODO: pageflip, vlank interrupt
1224 	 *
1225 	 * amdgpu_dm_irq_fini(adev);
1226 	 */
1227 
1228 	if (adev->dm.cgs_device) {
1229 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1230 		adev->dm.cgs_device = NULL;
1231 	}
1232 	if (adev->dm.freesync_module) {
1233 		mod_freesync_destroy(adev->dm.freesync_module);
1234 		adev->dm.freesync_module = NULL;
1235 	}
1236 
1237 	mutex_destroy(&adev->dm.audio_lock);
1238 	mutex_destroy(&adev->dm.dc_lock);
1239 
1240 	return;
1241 }
1242 
1243 static int load_dmcu_fw(struct amdgpu_device *adev)
1244 {
1245 	const char *fw_name_dmcu = NULL;
1246 	int r;
1247 	const struct dmcu_firmware_header_v1_0 *hdr;
1248 
1249 	switch(adev->asic_type) {
1250 #if defined(CONFIG_DRM_AMD_DC_SI)
1251 	case CHIP_TAHITI:
1252 	case CHIP_PITCAIRN:
1253 	case CHIP_VERDE:
1254 	case CHIP_OLAND:
1255 #endif
1256 	case CHIP_BONAIRE:
1257 	case CHIP_HAWAII:
1258 	case CHIP_KAVERI:
1259 	case CHIP_KABINI:
1260 	case CHIP_MULLINS:
1261 	case CHIP_TONGA:
1262 	case CHIP_FIJI:
1263 	case CHIP_CARRIZO:
1264 	case CHIP_STONEY:
1265 	case CHIP_POLARIS11:
1266 	case CHIP_POLARIS10:
1267 	case CHIP_POLARIS12:
1268 	case CHIP_VEGAM:
1269 	case CHIP_VEGA10:
1270 	case CHIP_VEGA12:
1271 	case CHIP_VEGA20:
1272 	case CHIP_NAVI10:
1273 	case CHIP_NAVI14:
1274 	case CHIP_RENOIR:
1275 	case CHIP_SIENNA_CICHLID:
1276 	case CHIP_NAVY_FLOUNDER:
1277 	case CHIP_DIMGREY_CAVEFISH:
1278 	case CHIP_VANGOGH:
1279 		return 0;
1280 	case CHIP_NAVI12:
1281 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1282 		break;
1283 	case CHIP_RAVEN:
1284 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1285 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1286 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1287 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1288 		else
1289 			return 0;
1290 		break;
1291 	default:
1292 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1293 		return -EINVAL;
1294 	}
1295 
1296 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1297 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1298 		return 0;
1299 	}
1300 
1301 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1302 	if (r == -ENOENT) {
1303 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1304 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1305 		adev->dm.fw_dmcu = NULL;
1306 		return 0;
1307 	}
1308 	if (r) {
1309 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1310 			fw_name_dmcu);
1311 		return r;
1312 	}
1313 
1314 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1315 	if (r) {
1316 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1317 			fw_name_dmcu);
1318 		release_firmware(adev->dm.fw_dmcu);
1319 		adev->dm.fw_dmcu = NULL;
1320 		return r;
1321 	}
1322 
1323 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1324 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1325 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1326 	adev->firmware.fw_size +=
1327 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1328 
1329 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1330 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1331 	adev->firmware.fw_size +=
1332 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1333 
1334 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1335 
1336 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1337 
1338 	return 0;
1339 }
1340 
1341 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1342 {
1343 	struct amdgpu_device *adev = ctx;
1344 
1345 	return dm_read_reg(adev->dm.dc->ctx, address);
1346 }
1347 
1348 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1349 				     uint32_t value)
1350 {
1351 	struct amdgpu_device *adev = ctx;
1352 
1353 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1354 }
1355 
1356 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1357 {
1358 	struct dmub_srv_create_params create_params;
1359 	struct dmub_srv_region_params region_params;
1360 	struct dmub_srv_region_info region_info;
1361 	struct dmub_srv_fb_params fb_params;
1362 	struct dmub_srv_fb_info *fb_info;
1363 	struct dmub_srv *dmub_srv;
1364 	const struct dmcub_firmware_header_v1_0 *hdr;
1365 	const char *fw_name_dmub;
1366 	enum dmub_asic dmub_asic;
1367 	enum dmub_status status;
1368 	int r;
1369 
1370 	switch (adev->asic_type) {
1371 	case CHIP_RENOIR:
1372 		dmub_asic = DMUB_ASIC_DCN21;
1373 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1374 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1375 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1376 		break;
1377 	case CHIP_SIENNA_CICHLID:
1378 		dmub_asic = DMUB_ASIC_DCN30;
1379 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1380 		break;
1381 	case CHIP_NAVY_FLOUNDER:
1382 		dmub_asic = DMUB_ASIC_DCN30;
1383 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1384 		break;
1385 	case CHIP_VANGOGH:
1386 		dmub_asic = DMUB_ASIC_DCN301;
1387 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1388 		break;
1389 	case CHIP_DIMGREY_CAVEFISH:
1390 		dmub_asic = DMUB_ASIC_DCN302;
1391 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1392 		break;
1393 
1394 	default:
1395 		/* ASIC doesn't support DMUB. */
1396 		return 0;
1397 	}
1398 
1399 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1400 	if (r) {
1401 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1402 		return 0;
1403 	}
1404 
1405 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1406 	if (r) {
1407 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1408 		return 0;
1409 	}
1410 
1411 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1412 
1413 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1414 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1415 			AMDGPU_UCODE_ID_DMCUB;
1416 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1417 			adev->dm.dmub_fw;
1418 		adev->firmware.fw_size +=
1419 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1420 
1421 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1422 			 adev->dm.dmcub_fw_version);
1423 	}
1424 
1425 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1426 
1427 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1428 	dmub_srv = adev->dm.dmub_srv;
1429 
1430 	if (!dmub_srv) {
1431 		DRM_ERROR("Failed to allocate DMUB service!\n");
1432 		return -ENOMEM;
1433 	}
1434 
1435 	memset(&create_params, 0, sizeof(create_params));
1436 	create_params.user_ctx = adev;
1437 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1438 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1439 	create_params.asic = dmub_asic;
1440 
1441 	/* Create the DMUB service. */
1442 	status = dmub_srv_create(dmub_srv, &create_params);
1443 	if (status != DMUB_STATUS_OK) {
1444 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1445 		return -EINVAL;
1446 	}
1447 
1448 	/* Calculate the size of all the regions for the DMUB service. */
1449 	memset(&region_params, 0, sizeof(region_params));
1450 
1451 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1452 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1453 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1454 	region_params.vbios_size = adev->bios_size;
1455 	region_params.fw_bss_data = region_params.bss_data_size ?
1456 		adev->dm.dmub_fw->data +
1457 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1458 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1459 	region_params.fw_inst_const =
1460 		adev->dm.dmub_fw->data +
1461 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1462 		PSP_HEADER_BYTES;
1463 
1464 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1465 					   &region_info);
1466 
1467 	if (status != DMUB_STATUS_OK) {
1468 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1469 		return -EINVAL;
1470 	}
1471 
1472 	/*
1473 	 * Allocate a framebuffer based on the total size of all the regions.
1474 	 * TODO: Move this into GART.
1475 	 */
1476 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1477 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1478 				    &adev->dm.dmub_bo_gpu_addr,
1479 				    &adev->dm.dmub_bo_cpu_addr);
1480 	if (r)
1481 		return r;
1482 
1483 	/* Rebase the regions on the framebuffer address. */
1484 	memset(&fb_params, 0, sizeof(fb_params));
1485 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1486 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1487 	fb_params.region_info = &region_info;
1488 
1489 	adev->dm.dmub_fb_info =
1490 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1491 	fb_info = adev->dm.dmub_fb_info;
1492 
1493 	if (!fb_info) {
1494 		DRM_ERROR(
1495 			"Failed to allocate framebuffer info for DMUB service!\n");
1496 		return -ENOMEM;
1497 	}
1498 
1499 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1500 	if (status != DMUB_STATUS_OK) {
1501 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1502 		return -EINVAL;
1503 	}
1504 
1505 	return 0;
1506 }
1507 
1508 static int dm_sw_init(void *handle)
1509 {
1510 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1511 	int r;
1512 
1513 	r = dm_dmub_sw_init(adev);
1514 	if (r)
1515 		return r;
1516 
1517 	return load_dmcu_fw(adev);
1518 }
1519 
1520 static int dm_sw_fini(void *handle)
1521 {
1522 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1523 
1524 	kfree(adev->dm.dmub_fb_info);
1525 	adev->dm.dmub_fb_info = NULL;
1526 
1527 	if (adev->dm.dmub_srv) {
1528 		dmub_srv_destroy(adev->dm.dmub_srv);
1529 		adev->dm.dmub_srv = NULL;
1530 	}
1531 
1532 	release_firmware(adev->dm.dmub_fw);
1533 	adev->dm.dmub_fw = NULL;
1534 
1535 	release_firmware(adev->dm.fw_dmcu);
1536 	adev->dm.fw_dmcu = NULL;
1537 
1538 	return 0;
1539 }
1540 
1541 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1542 {
1543 	struct amdgpu_dm_connector *aconnector;
1544 	struct drm_connector *connector;
1545 	struct drm_connector_list_iter iter;
1546 	int ret = 0;
1547 
1548 	drm_connector_list_iter_begin(dev, &iter);
1549 	drm_for_each_connector_iter(connector, &iter) {
1550 		aconnector = to_amdgpu_dm_connector(connector);
1551 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1552 		    aconnector->mst_mgr.aux) {
1553 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1554 					 aconnector,
1555 					 aconnector->base.base.id);
1556 
1557 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1558 			if (ret < 0) {
1559 				DRM_ERROR("DM_MST: Failed to start MST\n");
1560 				aconnector->dc_link->type =
1561 					dc_connection_single;
1562 				break;
1563 			}
1564 		}
1565 	}
1566 	drm_connector_list_iter_end(&iter);
1567 
1568 	return ret;
1569 }
1570 
1571 static int dm_late_init(void *handle)
1572 {
1573 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1574 
1575 	struct dmcu_iram_parameters params;
1576 	unsigned int linear_lut[16];
1577 	int i;
1578 	struct dmcu *dmcu = NULL;
1579 	bool ret = true;
1580 
1581 	dmcu = adev->dm.dc->res_pool->dmcu;
1582 
1583 	for (i = 0; i < 16; i++)
1584 		linear_lut[i] = 0xFFFF * i / 15;
1585 
1586 	params.set = 0;
1587 	params.backlight_ramping_start = 0xCCCC;
1588 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1589 	params.backlight_lut_array_size = 16;
1590 	params.backlight_lut_array = linear_lut;
1591 
1592 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1593 	 * 0xFFFF x 0.01 = 0x28F
1594 	 */
1595 	params.min_abm_backlight = 0x28F;
1596 
1597 	/* In the case where abm is implemented on dmcub,
1598 	 * dmcu object will be null.
1599 	 * ABM 2.4 and up are implemented on dmcub.
1600 	 */
1601 	if (dmcu)
1602 		ret = dmcu_load_iram(dmcu, params);
1603 	else if (adev->dm.dc->ctx->dmub_srv)
1604 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1605 
1606 	if (!ret)
1607 		return -EINVAL;
1608 
1609 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1610 }
1611 
1612 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1613 {
1614 	struct amdgpu_dm_connector *aconnector;
1615 	struct drm_connector *connector;
1616 	struct drm_connector_list_iter iter;
1617 	struct drm_dp_mst_topology_mgr *mgr;
1618 	int ret;
1619 	bool need_hotplug = false;
1620 
1621 	drm_connector_list_iter_begin(dev, &iter);
1622 	drm_for_each_connector_iter(connector, &iter) {
1623 		aconnector = to_amdgpu_dm_connector(connector);
1624 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1625 		    aconnector->mst_port)
1626 			continue;
1627 
1628 		mgr = &aconnector->mst_mgr;
1629 
1630 		if (suspend) {
1631 			drm_dp_mst_topology_mgr_suspend(mgr);
1632 		} else {
1633 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1634 			if (ret < 0) {
1635 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1636 				need_hotplug = true;
1637 			}
1638 		}
1639 	}
1640 	drm_connector_list_iter_end(&iter);
1641 
1642 	if (need_hotplug)
1643 		drm_kms_helper_hotplug_event(dev);
1644 }
1645 
1646 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1647 {
1648 	struct smu_context *smu = &adev->smu;
1649 	int ret = 0;
1650 
1651 	if (!is_support_sw_smu(adev))
1652 		return 0;
1653 
1654 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1655 	 * on window driver dc implementation.
1656 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1657 	 * should be passed to smu during boot up and resume from s3.
1658 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1659 	 * dcn20_resource_construct
1660 	 * then call pplib functions below to pass the settings to smu:
1661 	 * smu_set_watermarks_for_clock_ranges
1662 	 * smu_set_watermarks_table
1663 	 * navi10_set_watermarks_table
1664 	 * smu_write_watermarks_table
1665 	 *
1666 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1667 	 * dc has implemented different flow for window driver:
1668 	 * dc_hardware_init / dc_set_power_state
1669 	 * dcn10_init_hw
1670 	 * notify_wm_ranges
1671 	 * set_wm_ranges
1672 	 * -- Linux
1673 	 * smu_set_watermarks_for_clock_ranges
1674 	 * renoir_set_watermarks_table
1675 	 * smu_write_watermarks_table
1676 	 *
1677 	 * For Linux,
1678 	 * dc_hardware_init -> amdgpu_dm_init
1679 	 * dc_set_power_state --> dm_resume
1680 	 *
1681 	 * therefore, this function apply to navi10/12/14 but not Renoir
1682 	 * *
1683 	 */
1684 	switch(adev->asic_type) {
1685 	case CHIP_NAVI10:
1686 	case CHIP_NAVI14:
1687 	case CHIP_NAVI12:
1688 		break;
1689 	default:
1690 		return 0;
1691 	}
1692 
1693 	ret = smu_write_watermarks_table(smu);
1694 	if (ret) {
1695 		DRM_ERROR("Failed to update WMTABLE!\n");
1696 		return ret;
1697 	}
1698 
1699 	return 0;
1700 }
1701 
1702 /**
1703  * dm_hw_init() - Initialize DC device
1704  * @handle: The base driver device containing the amdgpu_dm device.
1705  *
1706  * Initialize the &struct amdgpu_display_manager device. This involves calling
1707  * the initializers of each DM component, then populating the struct with them.
1708  *
1709  * Although the function implies hardware initialization, both hardware and
1710  * software are initialized here. Splitting them out to their relevant init
1711  * hooks is a future TODO item.
1712  *
1713  * Some notable things that are initialized here:
1714  *
1715  * - Display Core, both software and hardware
1716  * - DC modules that we need (freesync and color management)
1717  * - DRM software states
1718  * - Interrupt sources and handlers
1719  * - Vblank support
1720  * - Debug FS entries, if enabled
1721  */
1722 static int dm_hw_init(void *handle)
1723 {
1724 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1725 	/* Create DAL display manager */
1726 	amdgpu_dm_init(adev);
1727 	amdgpu_dm_hpd_init(adev);
1728 
1729 	return 0;
1730 }
1731 
1732 /**
1733  * dm_hw_fini() - Teardown DC device
1734  * @handle: The base driver device containing the amdgpu_dm device.
1735  *
1736  * Teardown components within &struct amdgpu_display_manager that require
1737  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1738  * were loaded. Also flush IRQ workqueues and disable them.
1739  */
1740 static int dm_hw_fini(void *handle)
1741 {
1742 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1743 
1744 	amdgpu_dm_hpd_fini(adev);
1745 
1746 	amdgpu_dm_irq_fini(adev);
1747 	amdgpu_dm_fini(adev);
1748 	return 0;
1749 }
1750 
1751 
1752 static int dm_enable_vblank(struct drm_crtc *crtc);
1753 static void dm_disable_vblank(struct drm_crtc *crtc);
1754 
1755 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1756 				 struct dc_state *state, bool enable)
1757 {
1758 	enum dc_irq_source irq_source;
1759 	struct amdgpu_crtc *acrtc;
1760 	int rc = -EBUSY;
1761 	int i = 0;
1762 
1763 	for (i = 0; i < state->stream_count; i++) {
1764 		acrtc = get_crtc_by_otg_inst(
1765 				adev, state->stream_status[i].primary_otg_inst);
1766 
1767 		if (acrtc && state->stream_status[i].plane_count != 0) {
1768 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1769 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1770 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1771 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1772 			if (rc)
1773 				DRM_WARN("Failed to %s pflip interrupts\n",
1774 					 enable ? "enable" : "disable");
1775 
1776 			if (enable) {
1777 				rc = dm_enable_vblank(&acrtc->base);
1778 				if (rc)
1779 					DRM_WARN("Failed to enable vblank interrupts\n");
1780 			} else {
1781 				dm_disable_vblank(&acrtc->base);
1782 			}
1783 
1784 		}
1785 	}
1786 
1787 }
1788 
1789 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1790 {
1791 	struct dc_state *context = NULL;
1792 	enum dc_status res = DC_ERROR_UNEXPECTED;
1793 	int i;
1794 	struct dc_stream_state *del_streams[MAX_PIPES];
1795 	int del_streams_count = 0;
1796 
1797 	memset(del_streams, 0, sizeof(del_streams));
1798 
1799 	context = dc_create_state(dc);
1800 	if (context == NULL)
1801 		goto context_alloc_fail;
1802 
1803 	dc_resource_state_copy_construct_current(dc, context);
1804 
1805 	/* First remove from context all streams */
1806 	for (i = 0; i < context->stream_count; i++) {
1807 		struct dc_stream_state *stream = context->streams[i];
1808 
1809 		del_streams[del_streams_count++] = stream;
1810 	}
1811 
1812 	/* Remove all planes for removed streams and then remove the streams */
1813 	for (i = 0; i < del_streams_count; i++) {
1814 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1815 			res = DC_FAIL_DETACH_SURFACES;
1816 			goto fail;
1817 		}
1818 
1819 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1820 		if (res != DC_OK)
1821 			goto fail;
1822 	}
1823 
1824 
1825 	res = dc_validate_global_state(dc, context, false);
1826 
1827 	if (res != DC_OK) {
1828 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1829 		goto fail;
1830 	}
1831 
1832 	res = dc_commit_state(dc, context);
1833 
1834 fail:
1835 	dc_release_state(context);
1836 
1837 context_alloc_fail:
1838 	return res;
1839 }
1840 
1841 static int dm_suspend(void *handle)
1842 {
1843 	struct amdgpu_device *adev = handle;
1844 	struct amdgpu_display_manager *dm = &adev->dm;
1845 	int ret = 0;
1846 
1847 	if (amdgpu_in_reset(adev)) {
1848 		mutex_lock(&dm->dc_lock);
1849 
1850 #if defined(CONFIG_DRM_AMD_DC_DCN)
1851 		dc_allow_idle_optimizations(adev->dm.dc, false);
1852 #endif
1853 
1854 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1855 
1856 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1857 
1858 		amdgpu_dm_commit_zero_streams(dm->dc);
1859 
1860 		amdgpu_dm_irq_suspend(adev);
1861 
1862 		return ret;
1863 	}
1864 
1865 	WARN_ON(adev->dm.cached_state);
1866 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1867 
1868 	s3_handle_mst(adev_to_drm(adev), true);
1869 
1870 	amdgpu_dm_irq_suspend(adev);
1871 
1872 
1873 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1874 
1875 	return 0;
1876 }
1877 
1878 static struct amdgpu_dm_connector *
1879 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1880 					     struct drm_crtc *crtc)
1881 {
1882 	uint32_t i;
1883 	struct drm_connector_state *new_con_state;
1884 	struct drm_connector *connector;
1885 	struct drm_crtc *crtc_from_state;
1886 
1887 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1888 		crtc_from_state = new_con_state->crtc;
1889 
1890 		if (crtc_from_state == crtc)
1891 			return to_amdgpu_dm_connector(connector);
1892 	}
1893 
1894 	return NULL;
1895 }
1896 
1897 static void emulated_link_detect(struct dc_link *link)
1898 {
1899 	struct dc_sink_init_data sink_init_data = { 0 };
1900 	struct display_sink_capability sink_caps = { 0 };
1901 	enum dc_edid_status edid_status;
1902 	struct dc_context *dc_ctx = link->ctx;
1903 	struct dc_sink *sink = NULL;
1904 	struct dc_sink *prev_sink = NULL;
1905 
1906 	link->type = dc_connection_none;
1907 	prev_sink = link->local_sink;
1908 
1909 	if (prev_sink)
1910 		dc_sink_release(prev_sink);
1911 
1912 	switch (link->connector_signal) {
1913 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1914 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1915 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1916 		break;
1917 	}
1918 
1919 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1920 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1921 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1922 		break;
1923 	}
1924 
1925 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1926 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1927 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1928 		break;
1929 	}
1930 
1931 	case SIGNAL_TYPE_LVDS: {
1932 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1933 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1934 		break;
1935 	}
1936 
1937 	case SIGNAL_TYPE_EDP: {
1938 		sink_caps.transaction_type =
1939 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1940 		sink_caps.signal = SIGNAL_TYPE_EDP;
1941 		break;
1942 	}
1943 
1944 	case SIGNAL_TYPE_DISPLAY_PORT: {
1945 		sink_caps.transaction_type =
1946 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1947 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1948 		break;
1949 	}
1950 
1951 	default:
1952 		DC_ERROR("Invalid connector type! signal:%d\n",
1953 			link->connector_signal);
1954 		return;
1955 	}
1956 
1957 	sink_init_data.link = link;
1958 	sink_init_data.sink_signal = sink_caps.signal;
1959 
1960 	sink = dc_sink_create(&sink_init_data);
1961 	if (!sink) {
1962 		DC_ERROR("Failed to create sink!\n");
1963 		return;
1964 	}
1965 
1966 	/* dc_sink_create returns a new reference */
1967 	link->local_sink = sink;
1968 
1969 	edid_status = dm_helpers_read_local_edid(
1970 			link->ctx,
1971 			link,
1972 			sink);
1973 
1974 	if (edid_status != EDID_OK)
1975 		DC_ERROR("Failed to read EDID");
1976 
1977 }
1978 
1979 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1980 				     struct amdgpu_display_manager *dm)
1981 {
1982 	struct {
1983 		struct dc_surface_update surface_updates[MAX_SURFACES];
1984 		struct dc_plane_info plane_infos[MAX_SURFACES];
1985 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1986 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1987 		struct dc_stream_update stream_update;
1988 	} * bundle;
1989 	int k, m;
1990 
1991 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1992 
1993 	if (!bundle) {
1994 		dm_error("Failed to allocate update bundle\n");
1995 		goto cleanup;
1996 	}
1997 
1998 	for (k = 0; k < dc_state->stream_count; k++) {
1999 		bundle->stream_update.stream = dc_state->streams[k];
2000 
2001 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2002 			bundle->surface_updates[m].surface =
2003 				dc_state->stream_status->plane_states[m];
2004 			bundle->surface_updates[m].surface->force_full_update =
2005 				true;
2006 		}
2007 		dc_commit_updates_for_stream(
2008 			dm->dc, bundle->surface_updates,
2009 			dc_state->stream_status->plane_count,
2010 			dc_state->streams[k], &bundle->stream_update, dc_state);
2011 	}
2012 
2013 cleanup:
2014 	kfree(bundle);
2015 
2016 	return;
2017 }
2018 
2019 static void dm_set_dpms_off(struct dc_link *link)
2020 {
2021 	struct dc_stream_state *stream_state;
2022 	struct amdgpu_dm_connector *aconnector = link->priv;
2023 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2024 	struct dc_stream_update stream_update;
2025 	bool dpms_off = true;
2026 
2027 	memset(&stream_update, 0, sizeof(stream_update));
2028 	stream_update.dpms_off = &dpms_off;
2029 
2030 	mutex_lock(&adev->dm.dc_lock);
2031 	stream_state = dc_stream_find_from_link(link);
2032 
2033 	if (stream_state == NULL) {
2034 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2035 		mutex_unlock(&adev->dm.dc_lock);
2036 		return;
2037 	}
2038 
2039 	stream_update.stream = stream_state;
2040 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2041 				     stream_state, &stream_update,
2042 				     stream_state->ctx->dc->current_state);
2043 	mutex_unlock(&adev->dm.dc_lock);
2044 }
2045 
2046 static int dm_resume(void *handle)
2047 {
2048 	struct amdgpu_device *adev = handle;
2049 	struct drm_device *ddev = adev_to_drm(adev);
2050 	struct amdgpu_display_manager *dm = &adev->dm;
2051 	struct amdgpu_dm_connector *aconnector;
2052 	struct drm_connector *connector;
2053 	struct drm_connector_list_iter iter;
2054 	struct drm_crtc *crtc;
2055 	struct drm_crtc_state *new_crtc_state;
2056 	struct dm_crtc_state *dm_new_crtc_state;
2057 	struct drm_plane *plane;
2058 	struct drm_plane_state *new_plane_state;
2059 	struct dm_plane_state *dm_new_plane_state;
2060 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2061 	enum dc_connection_type new_connection_type = dc_connection_none;
2062 	struct dc_state *dc_state;
2063 	int i, r, j;
2064 
2065 	if (amdgpu_in_reset(adev)) {
2066 		dc_state = dm->cached_dc_state;
2067 
2068 		r = dm_dmub_hw_init(adev);
2069 		if (r)
2070 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2071 
2072 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2073 		dc_resume(dm->dc);
2074 
2075 		amdgpu_dm_irq_resume_early(adev);
2076 
2077 		for (i = 0; i < dc_state->stream_count; i++) {
2078 			dc_state->streams[i]->mode_changed = true;
2079 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2080 				dc_state->stream_status->plane_states[j]->update_flags.raw
2081 					= 0xffffffff;
2082 			}
2083 		}
2084 
2085 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2086 
2087 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2088 
2089 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2090 
2091 		dc_release_state(dm->cached_dc_state);
2092 		dm->cached_dc_state = NULL;
2093 
2094 		amdgpu_dm_irq_resume_late(adev);
2095 
2096 		mutex_unlock(&dm->dc_lock);
2097 
2098 		return 0;
2099 	}
2100 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2101 	dc_release_state(dm_state->context);
2102 	dm_state->context = dc_create_state(dm->dc);
2103 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2104 	dc_resource_state_construct(dm->dc, dm_state->context);
2105 
2106 	/* Before powering on DC we need to re-initialize DMUB. */
2107 	r = dm_dmub_hw_init(adev);
2108 	if (r)
2109 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2110 
2111 	/* power on hardware */
2112 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2113 
2114 	/* program HPD filter */
2115 	dc_resume(dm->dc);
2116 
2117 	/*
2118 	 * early enable HPD Rx IRQ, should be done before set mode as short
2119 	 * pulse interrupts are used for MST
2120 	 */
2121 	amdgpu_dm_irq_resume_early(adev);
2122 
2123 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2124 	s3_handle_mst(ddev, false);
2125 
2126 	/* Do detection*/
2127 	drm_connector_list_iter_begin(ddev, &iter);
2128 	drm_for_each_connector_iter(connector, &iter) {
2129 		aconnector = to_amdgpu_dm_connector(connector);
2130 
2131 		/*
2132 		 * this is the case when traversing through already created
2133 		 * MST connectors, should be skipped
2134 		 */
2135 		if (aconnector->mst_port)
2136 			continue;
2137 
2138 		mutex_lock(&aconnector->hpd_lock);
2139 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2140 			DRM_ERROR("KMS: Failed to detect connector\n");
2141 
2142 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2143 			emulated_link_detect(aconnector->dc_link);
2144 		else
2145 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2146 
2147 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2148 			aconnector->fake_enable = false;
2149 
2150 		if (aconnector->dc_sink)
2151 			dc_sink_release(aconnector->dc_sink);
2152 		aconnector->dc_sink = NULL;
2153 		amdgpu_dm_update_connector_after_detect(aconnector);
2154 		mutex_unlock(&aconnector->hpd_lock);
2155 	}
2156 	drm_connector_list_iter_end(&iter);
2157 
2158 	/* Force mode set in atomic commit */
2159 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2160 		new_crtc_state->active_changed = true;
2161 
2162 	/*
2163 	 * atomic_check is expected to create the dc states. We need to release
2164 	 * them here, since they were duplicated as part of the suspend
2165 	 * procedure.
2166 	 */
2167 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2168 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2169 		if (dm_new_crtc_state->stream) {
2170 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2171 			dc_stream_release(dm_new_crtc_state->stream);
2172 			dm_new_crtc_state->stream = NULL;
2173 		}
2174 	}
2175 
2176 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2177 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2178 		if (dm_new_plane_state->dc_state) {
2179 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2180 			dc_plane_state_release(dm_new_plane_state->dc_state);
2181 			dm_new_plane_state->dc_state = NULL;
2182 		}
2183 	}
2184 
2185 	drm_atomic_helper_resume(ddev, dm->cached_state);
2186 
2187 	dm->cached_state = NULL;
2188 
2189 	amdgpu_dm_irq_resume_late(adev);
2190 
2191 	amdgpu_dm_smu_write_watermarks_table(adev);
2192 
2193 	return 0;
2194 }
2195 
2196 /**
2197  * DOC: DM Lifecycle
2198  *
2199  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2200  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2201  * the base driver's device list to be initialized and torn down accordingly.
2202  *
2203  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2204  */
2205 
2206 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2207 	.name = "dm",
2208 	.early_init = dm_early_init,
2209 	.late_init = dm_late_init,
2210 	.sw_init = dm_sw_init,
2211 	.sw_fini = dm_sw_fini,
2212 	.hw_init = dm_hw_init,
2213 	.hw_fini = dm_hw_fini,
2214 	.suspend = dm_suspend,
2215 	.resume = dm_resume,
2216 	.is_idle = dm_is_idle,
2217 	.wait_for_idle = dm_wait_for_idle,
2218 	.check_soft_reset = dm_check_soft_reset,
2219 	.soft_reset = dm_soft_reset,
2220 	.set_clockgating_state = dm_set_clockgating_state,
2221 	.set_powergating_state = dm_set_powergating_state,
2222 };
2223 
2224 const struct amdgpu_ip_block_version dm_ip_block =
2225 {
2226 	.type = AMD_IP_BLOCK_TYPE_DCE,
2227 	.major = 1,
2228 	.minor = 0,
2229 	.rev = 0,
2230 	.funcs = &amdgpu_dm_funcs,
2231 };
2232 
2233 
2234 /**
2235  * DOC: atomic
2236  *
2237  * *WIP*
2238  */
2239 
2240 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2241 	.fb_create = amdgpu_display_user_framebuffer_create,
2242 	.get_format_info = amd_get_format_info,
2243 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2244 	.atomic_check = amdgpu_dm_atomic_check,
2245 	.atomic_commit = drm_atomic_helper_commit,
2246 };
2247 
2248 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2249 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2250 };
2251 
2252 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2253 {
2254 	u32 max_cll, min_cll, max, min, q, r;
2255 	struct amdgpu_dm_backlight_caps *caps;
2256 	struct amdgpu_display_manager *dm;
2257 	struct drm_connector *conn_base;
2258 	struct amdgpu_device *adev;
2259 	struct dc_link *link = NULL;
2260 	static const u8 pre_computed_values[] = {
2261 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2262 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2263 
2264 	if (!aconnector || !aconnector->dc_link)
2265 		return;
2266 
2267 	link = aconnector->dc_link;
2268 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2269 		return;
2270 
2271 	conn_base = &aconnector->base;
2272 	adev = drm_to_adev(conn_base->dev);
2273 	dm = &adev->dm;
2274 	caps = &dm->backlight_caps;
2275 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2276 	caps->aux_support = false;
2277 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2278 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2279 
2280 	if (caps->ext_caps->bits.oled == 1 ||
2281 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2282 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2283 		caps->aux_support = true;
2284 
2285 	/* From the specification (CTA-861-G), for calculating the maximum
2286 	 * luminance we need to use:
2287 	 *	Luminance = 50*2**(CV/32)
2288 	 * Where CV is a one-byte value.
2289 	 * For calculating this expression we may need float point precision;
2290 	 * to avoid this complexity level, we take advantage that CV is divided
2291 	 * by a constant. From the Euclids division algorithm, we know that CV
2292 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2293 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2294 	 * need to pre-compute the value of r/32. For pre-computing the values
2295 	 * We just used the following Ruby line:
2296 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2297 	 * The results of the above expressions can be verified at
2298 	 * pre_computed_values.
2299 	 */
2300 	q = max_cll >> 5;
2301 	r = max_cll % 32;
2302 	max = (1 << q) * pre_computed_values[r];
2303 
2304 	// min luminance: maxLum * (CV/255)^2 / 100
2305 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2306 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2307 
2308 	caps->aux_max_input_signal = max;
2309 	caps->aux_min_input_signal = min;
2310 }
2311 
2312 void amdgpu_dm_update_connector_after_detect(
2313 		struct amdgpu_dm_connector *aconnector)
2314 {
2315 	struct drm_connector *connector = &aconnector->base;
2316 	struct drm_device *dev = connector->dev;
2317 	struct dc_sink *sink;
2318 
2319 	/* MST handled by drm_mst framework */
2320 	if (aconnector->mst_mgr.mst_state == true)
2321 		return;
2322 
2323 	sink = aconnector->dc_link->local_sink;
2324 	if (sink)
2325 		dc_sink_retain(sink);
2326 
2327 	/*
2328 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2329 	 * the connector sink is set to either fake or physical sink depends on link status.
2330 	 * Skip if already done during boot.
2331 	 */
2332 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2333 			&& aconnector->dc_em_sink) {
2334 
2335 		/*
2336 		 * For S3 resume with headless use eml_sink to fake stream
2337 		 * because on resume connector->sink is set to NULL
2338 		 */
2339 		mutex_lock(&dev->mode_config.mutex);
2340 
2341 		if (sink) {
2342 			if (aconnector->dc_sink) {
2343 				amdgpu_dm_update_freesync_caps(connector, NULL);
2344 				/*
2345 				 * retain and release below are used to
2346 				 * bump up refcount for sink because the link doesn't point
2347 				 * to it anymore after disconnect, so on next crtc to connector
2348 				 * reshuffle by UMD we will get into unwanted dc_sink release
2349 				 */
2350 				dc_sink_release(aconnector->dc_sink);
2351 			}
2352 			aconnector->dc_sink = sink;
2353 			dc_sink_retain(aconnector->dc_sink);
2354 			amdgpu_dm_update_freesync_caps(connector,
2355 					aconnector->edid);
2356 		} else {
2357 			amdgpu_dm_update_freesync_caps(connector, NULL);
2358 			if (!aconnector->dc_sink) {
2359 				aconnector->dc_sink = aconnector->dc_em_sink;
2360 				dc_sink_retain(aconnector->dc_sink);
2361 			}
2362 		}
2363 
2364 		mutex_unlock(&dev->mode_config.mutex);
2365 
2366 		if (sink)
2367 			dc_sink_release(sink);
2368 		return;
2369 	}
2370 
2371 	/*
2372 	 * TODO: temporary guard to look for proper fix
2373 	 * if this sink is MST sink, we should not do anything
2374 	 */
2375 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2376 		dc_sink_release(sink);
2377 		return;
2378 	}
2379 
2380 	if (aconnector->dc_sink == sink) {
2381 		/*
2382 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2383 		 * Do nothing!!
2384 		 */
2385 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2386 				aconnector->connector_id);
2387 		if (sink)
2388 			dc_sink_release(sink);
2389 		return;
2390 	}
2391 
2392 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2393 		aconnector->connector_id, aconnector->dc_sink, sink);
2394 
2395 	mutex_lock(&dev->mode_config.mutex);
2396 
2397 	/*
2398 	 * 1. Update status of the drm connector
2399 	 * 2. Send an event and let userspace tell us what to do
2400 	 */
2401 	if (sink) {
2402 		/*
2403 		 * TODO: check if we still need the S3 mode update workaround.
2404 		 * If yes, put it here.
2405 		 */
2406 		if (aconnector->dc_sink) {
2407 			amdgpu_dm_update_freesync_caps(connector, NULL);
2408 			dc_sink_release(aconnector->dc_sink);
2409 		}
2410 
2411 		aconnector->dc_sink = sink;
2412 		dc_sink_retain(aconnector->dc_sink);
2413 		if (sink->dc_edid.length == 0) {
2414 			aconnector->edid = NULL;
2415 			if (aconnector->dc_link->aux_mode) {
2416 				drm_dp_cec_unset_edid(
2417 					&aconnector->dm_dp_aux.aux);
2418 			}
2419 		} else {
2420 			aconnector->edid =
2421 				(struct edid *)sink->dc_edid.raw_edid;
2422 
2423 			drm_connector_update_edid_property(connector,
2424 							   aconnector->edid);
2425 			if (aconnector->dc_link->aux_mode)
2426 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2427 						    aconnector->edid);
2428 		}
2429 
2430 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2431 		update_connector_ext_caps(aconnector);
2432 	} else {
2433 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2434 		amdgpu_dm_update_freesync_caps(connector, NULL);
2435 		drm_connector_update_edid_property(connector, NULL);
2436 		aconnector->num_modes = 0;
2437 		dc_sink_release(aconnector->dc_sink);
2438 		aconnector->dc_sink = NULL;
2439 		aconnector->edid = NULL;
2440 #ifdef CONFIG_DRM_AMD_DC_HDCP
2441 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2442 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2443 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2444 #endif
2445 	}
2446 
2447 	mutex_unlock(&dev->mode_config.mutex);
2448 
2449 	update_subconnector_property(aconnector);
2450 
2451 	if (sink)
2452 		dc_sink_release(sink);
2453 }
2454 
2455 static void handle_hpd_irq(void *param)
2456 {
2457 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2458 	struct drm_connector *connector = &aconnector->base;
2459 	struct drm_device *dev = connector->dev;
2460 	enum dc_connection_type new_connection_type = dc_connection_none;
2461 #ifdef CONFIG_DRM_AMD_DC_HDCP
2462 	struct amdgpu_device *adev = drm_to_adev(dev);
2463 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2464 #endif
2465 
2466 	/*
2467 	 * In case of failure or MST no need to update connector status or notify the OS
2468 	 * since (for MST case) MST does this in its own context.
2469 	 */
2470 	mutex_lock(&aconnector->hpd_lock);
2471 
2472 #ifdef CONFIG_DRM_AMD_DC_HDCP
2473 	if (adev->dm.hdcp_workqueue) {
2474 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2475 		dm_con_state->update_hdcp = true;
2476 	}
2477 #endif
2478 	if (aconnector->fake_enable)
2479 		aconnector->fake_enable = false;
2480 
2481 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2482 		DRM_ERROR("KMS: Failed to detect connector\n");
2483 
2484 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2485 		emulated_link_detect(aconnector->dc_link);
2486 
2487 
2488 		drm_modeset_lock_all(dev);
2489 		dm_restore_drm_connector_state(dev, connector);
2490 		drm_modeset_unlock_all(dev);
2491 
2492 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2493 			drm_kms_helper_hotplug_event(dev);
2494 
2495 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2496 		if (new_connection_type == dc_connection_none &&
2497 		    aconnector->dc_link->type == dc_connection_none)
2498 			dm_set_dpms_off(aconnector->dc_link);
2499 
2500 		amdgpu_dm_update_connector_after_detect(aconnector);
2501 
2502 		drm_modeset_lock_all(dev);
2503 		dm_restore_drm_connector_state(dev, connector);
2504 		drm_modeset_unlock_all(dev);
2505 
2506 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2507 			drm_kms_helper_hotplug_event(dev);
2508 	}
2509 	mutex_unlock(&aconnector->hpd_lock);
2510 
2511 }
2512 
2513 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2514 {
2515 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2516 	uint8_t dret;
2517 	bool new_irq_handled = false;
2518 	int dpcd_addr;
2519 	int dpcd_bytes_to_read;
2520 
2521 	const int max_process_count = 30;
2522 	int process_count = 0;
2523 
2524 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2525 
2526 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2527 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2528 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2529 		dpcd_addr = DP_SINK_COUNT;
2530 	} else {
2531 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2532 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2533 		dpcd_addr = DP_SINK_COUNT_ESI;
2534 	}
2535 
2536 	dret = drm_dp_dpcd_read(
2537 		&aconnector->dm_dp_aux.aux,
2538 		dpcd_addr,
2539 		esi,
2540 		dpcd_bytes_to_read);
2541 
2542 	while (dret == dpcd_bytes_to_read &&
2543 		process_count < max_process_count) {
2544 		uint8_t retry;
2545 		dret = 0;
2546 
2547 		process_count++;
2548 
2549 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2550 		/* handle HPD short pulse irq */
2551 		if (aconnector->mst_mgr.mst_state)
2552 			drm_dp_mst_hpd_irq(
2553 				&aconnector->mst_mgr,
2554 				esi,
2555 				&new_irq_handled);
2556 
2557 		if (new_irq_handled) {
2558 			/* ACK at DPCD to notify down stream */
2559 			const int ack_dpcd_bytes_to_write =
2560 				dpcd_bytes_to_read - 1;
2561 
2562 			for (retry = 0; retry < 3; retry++) {
2563 				uint8_t wret;
2564 
2565 				wret = drm_dp_dpcd_write(
2566 					&aconnector->dm_dp_aux.aux,
2567 					dpcd_addr + 1,
2568 					&esi[1],
2569 					ack_dpcd_bytes_to_write);
2570 				if (wret == ack_dpcd_bytes_to_write)
2571 					break;
2572 			}
2573 
2574 			/* check if there is new irq to be handled */
2575 			dret = drm_dp_dpcd_read(
2576 				&aconnector->dm_dp_aux.aux,
2577 				dpcd_addr,
2578 				esi,
2579 				dpcd_bytes_to_read);
2580 
2581 			new_irq_handled = false;
2582 		} else {
2583 			break;
2584 		}
2585 	}
2586 
2587 	if (process_count == max_process_count)
2588 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2589 }
2590 
2591 static void handle_hpd_rx_irq(void *param)
2592 {
2593 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2594 	struct drm_connector *connector = &aconnector->base;
2595 	struct drm_device *dev = connector->dev;
2596 	struct dc_link *dc_link = aconnector->dc_link;
2597 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2598 	bool result = false;
2599 	enum dc_connection_type new_connection_type = dc_connection_none;
2600 	struct amdgpu_device *adev = drm_to_adev(dev);
2601 	union hpd_irq_data hpd_irq_data;
2602 
2603 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2604 
2605 	/*
2606 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2607 	 * conflict, after implement i2c helper, this mutex should be
2608 	 * retired.
2609 	 */
2610 	if (dc_link->type != dc_connection_mst_branch)
2611 		mutex_lock(&aconnector->hpd_lock);
2612 
2613 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2614 
2615 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2616 		(dc_link->type == dc_connection_mst_branch)) {
2617 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2618 			result = true;
2619 			dm_handle_hpd_rx_irq(aconnector);
2620 			goto out;
2621 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2622 			result = false;
2623 			dm_handle_hpd_rx_irq(aconnector);
2624 			goto out;
2625 		}
2626 	}
2627 
2628 	mutex_lock(&adev->dm.dc_lock);
2629 #ifdef CONFIG_DRM_AMD_DC_HDCP
2630 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2631 #else
2632 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2633 #endif
2634 	mutex_unlock(&adev->dm.dc_lock);
2635 
2636 out:
2637 	if (result && !is_mst_root_connector) {
2638 		/* Downstream Port status changed. */
2639 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2640 			DRM_ERROR("KMS: Failed to detect connector\n");
2641 
2642 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2643 			emulated_link_detect(dc_link);
2644 
2645 			if (aconnector->fake_enable)
2646 				aconnector->fake_enable = false;
2647 
2648 			amdgpu_dm_update_connector_after_detect(aconnector);
2649 
2650 
2651 			drm_modeset_lock_all(dev);
2652 			dm_restore_drm_connector_state(dev, connector);
2653 			drm_modeset_unlock_all(dev);
2654 
2655 			drm_kms_helper_hotplug_event(dev);
2656 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2657 
2658 			if (aconnector->fake_enable)
2659 				aconnector->fake_enable = false;
2660 
2661 			amdgpu_dm_update_connector_after_detect(aconnector);
2662 
2663 
2664 			drm_modeset_lock_all(dev);
2665 			dm_restore_drm_connector_state(dev, connector);
2666 			drm_modeset_unlock_all(dev);
2667 
2668 			drm_kms_helper_hotplug_event(dev);
2669 		}
2670 	}
2671 #ifdef CONFIG_DRM_AMD_DC_HDCP
2672 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2673 		if (adev->dm.hdcp_workqueue)
2674 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2675 	}
2676 #endif
2677 
2678 	if (dc_link->type != dc_connection_mst_branch) {
2679 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2680 		mutex_unlock(&aconnector->hpd_lock);
2681 	}
2682 }
2683 
2684 static void register_hpd_handlers(struct amdgpu_device *adev)
2685 {
2686 	struct drm_device *dev = adev_to_drm(adev);
2687 	struct drm_connector *connector;
2688 	struct amdgpu_dm_connector *aconnector;
2689 	const struct dc_link *dc_link;
2690 	struct dc_interrupt_params int_params = {0};
2691 
2692 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2693 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2694 
2695 	list_for_each_entry(connector,
2696 			&dev->mode_config.connector_list, head)	{
2697 
2698 		aconnector = to_amdgpu_dm_connector(connector);
2699 		dc_link = aconnector->dc_link;
2700 
2701 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2702 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2703 			int_params.irq_source = dc_link->irq_source_hpd;
2704 
2705 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2706 					handle_hpd_irq,
2707 					(void *) aconnector);
2708 		}
2709 
2710 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2711 
2712 			/* Also register for DP short pulse (hpd_rx). */
2713 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2714 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2715 
2716 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2717 					handle_hpd_rx_irq,
2718 					(void *) aconnector);
2719 		}
2720 	}
2721 }
2722 
2723 #if defined(CONFIG_DRM_AMD_DC_SI)
2724 /* Register IRQ sources and initialize IRQ callbacks */
2725 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2726 {
2727 	struct dc *dc = adev->dm.dc;
2728 	struct common_irq_params *c_irq_params;
2729 	struct dc_interrupt_params int_params = {0};
2730 	int r;
2731 	int i;
2732 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2733 
2734 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2735 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2736 
2737 	/*
2738 	 * Actions of amdgpu_irq_add_id():
2739 	 * 1. Register a set() function with base driver.
2740 	 *    Base driver will call set() function to enable/disable an
2741 	 *    interrupt in DC hardware.
2742 	 * 2. Register amdgpu_dm_irq_handler().
2743 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2744 	 *    coming from DC hardware.
2745 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2746 	 *    for acknowledging and handling. */
2747 
2748 	/* Use VBLANK interrupt */
2749 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2750 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2751 		if (r) {
2752 			DRM_ERROR("Failed to add crtc irq id!\n");
2753 			return r;
2754 		}
2755 
2756 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2757 		int_params.irq_source =
2758 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2759 
2760 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2761 
2762 		c_irq_params->adev = adev;
2763 		c_irq_params->irq_src = int_params.irq_source;
2764 
2765 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2766 				dm_crtc_high_irq, c_irq_params);
2767 	}
2768 
2769 	/* Use GRPH_PFLIP interrupt */
2770 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2771 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2772 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2773 		if (r) {
2774 			DRM_ERROR("Failed to add page flip irq id!\n");
2775 			return r;
2776 		}
2777 
2778 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2779 		int_params.irq_source =
2780 			dc_interrupt_to_irq_source(dc, i, 0);
2781 
2782 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2783 
2784 		c_irq_params->adev = adev;
2785 		c_irq_params->irq_src = int_params.irq_source;
2786 
2787 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2788 				dm_pflip_high_irq, c_irq_params);
2789 
2790 	}
2791 
2792 	/* HPD */
2793 	r = amdgpu_irq_add_id(adev, client_id,
2794 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2795 	if (r) {
2796 		DRM_ERROR("Failed to add hpd irq id!\n");
2797 		return r;
2798 	}
2799 
2800 	register_hpd_handlers(adev);
2801 
2802 	return 0;
2803 }
2804 #endif
2805 
2806 /* Register IRQ sources and initialize IRQ callbacks */
2807 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2808 {
2809 	struct dc *dc = adev->dm.dc;
2810 	struct common_irq_params *c_irq_params;
2811 	struct dc_interrupt_params int_params = {0};
2812 	int r;
2813 	int i;
2814 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2815 
2816 	if (adev->asic_type >= CHIP_VEGA10)
2817 		client_id = SOC15_IH_CLIENTID_DCE;
2818 
2819 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2820 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2821 
2822 	/*
2823 	 * Actions of amdgpu_irq_add_id():
2824 	 * 1. Register a set() function with base driver.
2825 	 *    Base driver will call set() function to enable/disable an
2826 	 *    interrupt in DC hardware.
2827 	 * 2. Register amdgpu_dm_irq_handler().
2828 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2829 	 *    coming from DC hardware.
2830 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2831 	 *    for acknowledging and handling. */
2832 
2833 	/* Use VBLANK interrupt */
2834 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2835 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2836 		if (r) {
2837 			DRM_ERROR("Failed to add crtc irq id!\n");
2838 			return r;
2839 		}
2840 
2841 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2842 		int_params.irq_source =
2843 			dc_interrupt_to_irq_source(dc, i, 0);
2844 
2845 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2846 
2847 		c_irq_params->adev = adev;
2848 		c_irq_params->irq_src = int_params.irq_source;
2849 
2850 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2851 				dm_crtc_high_irq, c_irq_params);
2852 	}
2853 
2854 	/* Use VUPDATE interrupt */
2855 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2856 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2857 		if (r) {
2858 			DRM_ERROR("Failed to add vupdate irq id!\n");
2859 			return r;
2860 		}
2861 
2862 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2863 		int_params.irq_source =
2864 			dc_interrupt_to_irq_source(dc, i, 0);
2865 
2866 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2867 
2868 		c_irq_params->adev = adev;
2869 		c_irq_params->irq_src = int_params.irq_source;
2870 
2871 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2872 				dm_vupdate_high_irq, c_irq_params);
2873 	}
2874 
2875 	/* Use GRPH_PFLIP interrupt */
2876 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2877 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2878 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2879 		if (r) {
2880 			DRM_ERROR("Failed to add page flip irq id!\n");
2881 			return r;
2882 		}
2883 
2884 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2885 		int_params.irq_source =
2886 			dc_interrupt_to_irq_source(dc, i, 0);
2887 
2888 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2889 
2890 		c_irq_params->adev = adev;
2891 		c_irq_params->irq_src = int_params.irq_source;
2892 
2893 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2894 				dm_pflip_high_irq, c_irq_params);
2895 
2896 	}
2897 
2898 	/* HPD */
2899 	r = amdgpu_irq_add_id(adev, client_id,
2900 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2901 	if (r) {
2902 		DRM_ERROR("Failed to add hpd irq id!\n");
2903 		return r;
2904 	}
2905 
2906 	register_hpd_handlers(adev);
2907 
2908 	return 0;
2909 }
2910 
2911 #if defined(CONFIG_DRM_AMD_DC_DCN)
2912 /* Register IRQ sources and initialize IRQ callbacks */
2913 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2914 {
2915 	struct dc *dc = adev->dm.dc;
2916 	struct common_irq_params *c_irq_params;
2917 	struct dc_interrupt_params int_params = {0};
2918 	int r;
2919 	int i;
2920 
2921 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2922 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2923 
2924 	/*
2925 	 * Actions of amdgpu_irq_add_id():
2926 	 * 1. Register a set() function with base driver.
2927 	 *    Base driver will call set() function to enable/disable an
2928 	 *    interrupt in DC hardware.
2929 	 * 2. Register amdgpu_dm_irq_handler().
2930 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2931 	 *    coming from DC hardware.
2932 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2933 	 *    for acknowledging and handling.
2934 	 */
2935 
2936 	/* Use VSTARTUP interrupt */
2937 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2938 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2939 			i++) {
2940 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2941 
2942 		if (r) {
2943 			DRM_ERROR("Failed to add crtc irq id!\n");
2944 			return r;
2945 		}
2946 
2947 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2948 		int_params.irq_source =
2949 			dc_interrupt_to_irq_source(dc, i, 0);
2950 
2951 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2952 
2953 		c_irq_params->adev = adev;
2954 		c_irq_params->irq_src = int_params.irq_source;
2955 
2956 		amdgpu_dm_irq_register_interrupt(
2957 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2958 	}
2959 
2960 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2961 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2962 	 * to trigger at end of each vblank, regardless of state of the lock,
2963 	 * matching DCE behaviour.
2964 	 */
2965 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2966 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2967 	     i++) {
2968 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2969 
2970 		if (r) {
2971 			DRM_ERROR("Failed to add vupdate irq id!\n");
2972 			return r;
2973 		}
2974 
2975 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2976 		int_params.irq_source =
2977 			dc_interrupt_to_irq_source(dc, i, 0);
2978 
2979 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2980 
2981 		c_irq_params->adev = adev;
2982 		c_irq_params->irq_src = int_params.irq_source;
2983 
2984 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2985 				dm_vupdate_high_irq, c_irq_params);
2986 	}
2987 
2988 	/* Use GRPH_PFLIP interrupt */
2989 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2990 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2991 			i++) {
2992 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2993 		if (r) {
2994 			DRM_ERROR("Failed to add page flip irq id!\n");
2995 			return r;
2996 		}
2997 
2998 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2999 		int_params.irq_source =
3000 			dc_interrupt_to_irq_source(dc, i, 0);
3001 
3002 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3003 
3004 		c_irq_params->adev = adev;
3005 		c_irq_params->irq_src = int_params.irq_source;
3006 
3007 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3008 				dm_pflip_high_irq, c_irq_params);
3009 
3010 	}
3011 
3012 	/* HPD */
3013 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3014 			&adev->hpd_irq);
3015 	if (r) {
3016 		DRM_ERROR("Failed to add hpd irq id!\n");
3017 		return r;
3018 	}
3019 
3020 	register_hpd_handlers(adev);
3021 
3022 	return 0;
3023 }
3024 #endif
3025 
3026 /*
3027  * Acquires the lock for the atomic state object and returns
3028  * the new atomic state.
3029  *
3030  * This should only be called during atomic check.
3031  */
3032 static int dm_atomic_get_state(struct drm_atomic_state *state,
3033 			       struct dm_atomic_state **dm_state)
3034 {
3035 	struct drm_device *dev = state->dev;
3036 	struct amdgpu_device *adev = drm_to_adev(dev);
3037 	struct amdgpu_display_manager *dm = &adev->dm;
3038 	struct drm_private_state *priv_state;
3039 
3040 	if (*dm_state)
3041 		return 0;
3042 
3043 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3044 	if (IS_ERR(priv_state))
3045 		return PTR_ERR(priv_state);
3046 
3047 	*dm_state = to_dm_atomic_state(priv_state);
3048 
3049 	return 0;
3050 }
3051 
3052 static struct dm_atomic_state *
3053 dm_atomic_get_new_state(struct drm_atomic_state *state)
3054 {
3055 	struct drm_device *dev = state->dev;
3056 	struct amdgpu_device *adev = drm_to_adev(dev);
3057 	struct amdgpu_display_manager *dm = &adev->dm;
3058 	struct drm_private_obj *obj;
3059 	struct drm_private_state *new_obj_state;
3060 	int i;
3061 
3062 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3063 		if (obj->funcs == dm->atomic_obj.funcs)
3064 			return to_dm_atomic_state(new_obj_state);
3065 	}
3066 
3067 	return NULL;
3068 }
3069 
3070 static struct drm_private_state *
3071 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3072 {
3073 	struct dm_atomic_state *old_state, *new_state;
3074 
3075 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3076 	if (!new_state)
3077 		return NULL;
3078 
3079 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3080 
3081 	old_state = to_dm_atomic_state(obj->state);
3082 
3083 	if (old_state && old_state->context)
3084 		new_state->context = dc_copy_state(old_state->context);
3085 
3086 	if (!new_state->context) {
3087 		kfree(new_state);
3088 		return NULL;
3089 	}
3090 
3091 	return &new_state->base;
3092 }
3093 
3094 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3095 				    struct drm_private_state *state)
3096 {
3097 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3098 
3099 	if (dm_state && dm_state->context)
3100 		dc_release_state(dm_state->context);
3101 
3102 	kfree(dm_state);
3103 }
3104 
3105 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3106 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3107 	.atomic_destroy_state = dm_atomic_destroy_state,
3108 };
3109 
3110 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3111 {
3112 	struct dm_atomic_state *state;
3113 	int r;
3114 
3115 	adev->mode_info.mode_config_initialized = true;
3116 
3117 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3118 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3119 
3120 	adev_to_drm(adev)->mode_config.max_width = 16384;
3121 	adev_to_drm(adev)->mode_config.max_height = 16384;
3122 
3123 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3124 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3125 	/* indicates support for immediate flip */
3126 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3127 
3128 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3129 
3130 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3131 	if (!state)
3132 		return -ENOMEM;
3133 
3134 	state->context = dc_create_state(adev->dm.dc);
3135 	if (!state->context) {
3136 		kfree(state);
3137 		return -ENOMEM;
3138 	}
3139 
3140 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3141 
3142 	drm_atomic_private_obj_init(adev_to_drm(adev),
3143 				    &adev->dm.atomic_obj,
3144 				    &state->base,
3145 				    &dm_atomic_state_funcs);
3146 
3147 	r = amdgpu_display_modeset_create_props(adev);
3148 	if (r) {
3149 		dc_release_state(state->context);
3150 		kfree(state);
3151 		return r;
3152 	}
3153 
3154 	r = amdgpu_dm_audio_init(adev);
3155 	if (r) {
3156 		dc_release_state(state->context);
3157 		kfree(state);
3158 		return r;
3159 	}
3160 
3161 	return 0;
3162 }
3163 
3164 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3165 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3166 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3167 
3168 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3169 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3170 
3171 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3172 {
3173 #if defined(CONFIG_ACPI)
3174 	struct amdgpu_dm_backlight_caps caps;
3175 
3176 	memset(&caps, 0, sizeof(caps));
3177 
3178 	if (dm->backlight_caps.caps_valid)
3179 		return;
3180 
3181 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3182 	if (caps.caps_valid) {
3183 		dm->backlight_caps.caps_valid = true;
3184 		if (caps.aux_support)
3185 			return;
3186 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3187 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3188 	} else {
3189 		dm->backlight_caps.min_input_signal =
3190 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3191 		dm->backlight_caps.max_input_signal =
3192 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3193 	}
3194 #else
3195 	if (dm->backlight_caps.aux_support)
3196 		return;
3197 
3198 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3199 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3200 #endif
3201 }
3202 
3203 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3204 {
3205 	bool rc;
3206 
3207 	if (!link)
3208 		return 1;
3209 
3210 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3211 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3212 
3213 	return rc ? 0 : 1;
3214 }
3215 
3216 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3217 				unsigned *min, unsigned *max)
3218 {
3219 	if (!caps)
3220 		return 0;
3221 
3222 	if (caps->aux_support) {
3223 		// Firmware limits are in nits, DC API wants millinits.
3224 		*max = 1000 * caps->aux_max_input_signal;
3225 		*min = 1000 * caps->aux_min_input_signal;
3226 	} else {
3227 		// Firmware limits are 8-bit, PWM control is 16-bit.
3228 		*max = 0x101 * caps->max_input_signal;
3229 		*min = 0x101 * caps->min_input_signal;
3230 	}
3231 	return 1;
3232 }
3233 
3234 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3235 					uint32_t brightness)
3236 {
3237 	unsigned min, max;
3238 
3239 	if (!get_brightness_range(caps, &min, &max))
3240 		return brightness;
3241 
3242 	// Rescale 0..255 to min..max
3243 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3244 				       AMDGPU_MAX_BL_LEVEL);
3245 }
3246 
3247 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3248 				      uint32_t brightness)
3249 {
3250 	unsigned min, max;
3251 
3252 	if (!get_brightness_range(caps, &min, &max))
3253 		return brightness;
3254 
3255 	if (brightness < min)
3256 		return 0;
3257 	// Rescale min..max to 0..255
3258 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3259 				 max - min);
3260 }
3261 
3262 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3263 {
3264 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3265 	struct amdgpu_dm_backlight_caps caps;
3266 	struct dc_link *link = NULL;
3267 	u32 brightness;
3268 	bool rc;
3269 
3270 	amdgpu_dm_update_backlight_caps(dm);
3271 	caps = dm->backlight_caps;
3272 
3273 	link = (struct dc_link *)dm->backlight_link;
3274 
3275 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3276 	// Change brightness based on AUX property
3277 	if (caps.aux_support)
3278 		return set_backlight_via_aux(link, brightness);
3279 
3280 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3281 
3282 	return rc ? 0 : 1;
3283 }
3284 
3285 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3286 {
3287 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3288 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3289 
3290 	if (ret == DC_ERROR_UNEXPECTED)
3291 		return bd->props.brightness;
3292 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3293 }
3294 
3295 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3296 	.options = BL_CORE_SUSPENDRESUME,
3297 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3298 	.update_status	= amdgpu_dm_backlight_update_status,
3299 };
3300 
3301 static void
3302 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3303 {
3304 	char bl_name[16];
3305 	struct backlight_properties props = { 0 };
3306 
3307 	amdgpu_dm_update_backlight_caps(dm);
3308 
3309 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3310 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3311 	props.type = BACKLIGHT_RAW;
3312 
3313 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3314 		 adev_to_drm(dm->adev)->primary->index);
3315 
3316 	dm->backlight_dev = backlight_device_register(bl_name,
3317 						      adev_to_drm(dm->adev)->dev,
3318 						      dm,
3319 						      &amdgpu_dm_backlight_ops,
3320 						      &props);
3321 
3322 	if (IS_ERR(dm->backlight_dev))
3323 		DRM_ERROR("DM: Backlight registration failed!\n");
3324 	else
3325 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3326 }
3327 
3328 #endif
3329 
3330 static int initialize_plane(struct amdgpu_display_manager *dm,
3331 			    struct amdgpu_mode_info *mode_info, int plane_id,
3332 			    enum drm_plane_type plane_type,
3333 			    const struct dc_plane_cap *plane_cap)
3334 {
3335 	struct drm_plane *plane;
3336 	unsigned long possible_crtcs;
3337 	int ret = 0;
3338 
3339 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3340 	if (!plane) {
3341 		DRM_ERROR("KMS: Failed to allocate plane\n");
3342 		return -ENOMEM;
3343 	}
3344 	plane->type = plane_type;
3345 
3346 	/*
3347 	 * HACK: IGT tests expect that the primary plane for a CRTC
3348 	 * can only have one possible CRTC. Only expose support for
3349 	 * any CRTC if they're not going to be used as a primary plane
3350 	 * for a CRTC - like overlay or underlay planes.
3351 	 */
3352 	possible_crtcs = 1 << plane_id;
3353 	if (plane_id >= dm->dc->caps.max_streams)
3354 		possible_crtcs = 0xff;
3355 
3356 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3357 
3358 	if (ret) {
3359 		DRM_ERROR("KMS: Failed to initialize plane\n");
3360 		kfree(plane);
3361 		return ret;
3362 	}
3363 
3364 	if (mode_info)
3365 		mode_info->planes[plane_id] = plane;
3366 
3367 	return ret;
3368 }
3369 
3370 
3371 static void register_backlight_device(struct amdgpu_display_manager *dm,
3372 				      struct dc_link *link)
3373 {
3374 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3375 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3376 
3377 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3378 	    link->type != dc_connection_none) {
3379 		/*
3380 		 * Event if registration failed, we should continue with
3381 		 * DM initialization because not having a backlight control
3382 		 * is better then a black screen.
3383 		 */
3384 		amdgpu_dm_register_backlight_device(dm);
3385 
3386 		if (dm->backlight_dev)
3387 			dm->backlight_link = link;
3388 	}
3389 #endif
3390 }
3391 
3392 
3393 /*
3394  * In this architecture, the association
3395  * connector -> encoder -> crtc
3396  * id not really requried. The crtc and connector will hold the
3397  * display_index as an abstraction to use with DAL component
3398  *
3399  * Returns 0 on success
3400  */
3401 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3402 {
3403 	struct amdgpu_display_manager *dm = &adev->dm;
3404 	int32_t i;
3405 	struct amdgpu_dm_connector *aconnector = NULL;
3406 	struct amdgpu_encoder *aencoder = NULL;
3407 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3408 	uint32_t link_cnt;
3409 	int32_t primary_planes;
3410 	enum dc_connection_type new_connection_type = dc_connection_none;
3411 	const struct dc_plane_cap *plane;
3412 
3413 	dm->display_indexes_num = dm->dc->caps.max_streams;
3414 	/* Update the actual used number of crtc */
3415 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3416 
3417 	link_cnt = dm->dc->caps.max_links;
3418 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3419 		DRM_ERROR("DM: Failed to initialize mode config\n");
3420 		return -EINVAL;
3421 	}
3422 
3423 	/* There is one primary plane per CRTC */
3424 	primary_planes = dm->dc->caps.max_streams;
3425 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3426 
3427 	/*
3428 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3429 	 * Order is reversed to match iteration order in atomic check.
3430 	 */
3431 	for (i = (primary_planes - 1); i >= 0; i--) {
3432 		plane = &dm->dc->caps.planes[i];
3433 
3434 		if (initialize_plane(dm, mode_info, i,
3435 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3436 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3437 			goto fail;
3438 		}
3439 	}
3440 
3441 	/*
3442 	 * Initialize overlay planes, index starting after primary planes.
3443 	 * These planes have a higher DRM index than the primary planes since
3444 	 * they should be considered as having a higher z-order.
3445 	 * Order is reversed to match iteration order in atomic check.
3446 	 *
3447 	 * Only support DCN for now, and only expose one so we don't encourage
3448 	 * userspace to use up all the pipes.
3449 	 */
3450 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3451 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3452 
3453 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3454 			continue;
3455 
3456 		if (!plane->blends_with_above || !plane->blends_with_below)
3457 			continue;
3458 
3459 		if (!plane->pixel_format_support.argb8888)
3460 			continue;
3461 
3462 		if (initialize_plane(dm, NULL, primary_planes + i,
3463 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3464 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3465 			goto fail;
3466 		}
3467 
3468 		/* Only create one overlay plane. */
3469 		break;
3470 	}
3471 
3472 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3473 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3474 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3475 			goto fail;
3476 		}
3477 
3478 	/* loops over all connectors on the board */
3479 	for (i = 0; i < link_cnt; i++) {
3480 		struct dc_link *link = NULL;
3481 
3482 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3483 			DRM_ERROR(
3484 				"KMS: Cannot support more than %d display indexes\n",
3485 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3486 			continue;
3487 		}
3488 
3489 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3490 		if (!aconnector)
3491 			goto fail;
3492 
3493 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3494 		if (!aencoder)
3495 			goto fail;
3496 
3497 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3498 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3499 			goto fail;
3500 		}
3501 
3502 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3503 			DRM_ERROR("KMS: Failed to initialize connector\n");
3504 			goto fail;
3505 		}
3506 
3507 		link = dc_get_link_at_index(dm->dc, i);
3508 
3509 		if (!dc_link_detect_sink(link, &new_connection_type))
3510 			DRM_ERROR("KMS: Failed to detect connector\n");
3511 
3512 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3513 			emulated_link_detect(link);
3514 			amdgpu_dm_update_connector_after_detect(aconnector);
3515 
3516 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3517 			amdgpu_dm_update_connector_after_detect(aconnector);
3518 			register_backlight_device(dm, link);
3519 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3520 				amdgpu_dm_set_psr_caps(link);
3521 		}
3522 
3523 
3524 	}
3525 
3526 	/* Software is initialized. Now we can register interrupt handlers. */
3527 	switch (adev->asic_type) {
3528 #if defined(CONFIG_DRM_AMD_DC_SI)
3529 	case CHIP_TAHITI:
3530 	case CHIP_PITCAIRN:
3531 	case CHIP_VERDE:
3532 	case CHIP_OLAND:
3533 		if (dce60_register_irq_handlers(dm->adev)) {
3534 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3535 			goto fail;
3536 		}
3537 		break;
3538 #endif
3539 	case CHIP_BONAIRE:
3540 	case CHIP_HAWAII:
3541 	case CHIP_KAVERI:
3542 	case CHIP_KABINI:
3543 	case CHIP_MULLINS:
3544 	case CHIP_TONGA:
3545 	case CHIP_FIJI:
3546 	case CHIP_CARRIZO:
3547 	case CHIP_STONEY:
3548 	case CHIP_POLARIS11:
3549 	case CHIP_POLARIS10:
3550 	case CHIP_POLARIS12:
3551 	case CHIP_VEGAM:
3552 	case CHIP_VEGA10:
3553 	case CHIP_VEGA12:
3554 	case CHIP_VEGA20:
3555 		if (dce110_register_irq_handlers(dm->adev)) {
3556 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3557 			goto fail;
3558 		}
3559 		break;
3560 #if defined(CONFIG_DRM_AMD_DC_DCN)
3561 	case CHIP_RAVEN:
3562 	case CHIP_NAVI12:
3563 	case CHIP_NAVI10:
3564 	case CHIP_NAVI14:
3565 	case CHIP_RENOIR:
3566 	case CHIP_SIENNA_CICHLID:
3567 	case CHIP_NAVY_FLOUNDER:
3568 	case CHIP_DIMGREY_CAVEFISH:
3569 	case CHIP_VANGOGH:
3570 		if (dcn10_register_irq_handlers(dm->adev)) {
3571 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3572 			goto fail;
3573 		}
3574 		break;
3575 #endif
3576 	default:
3577 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3578 		goto fail;
3579 	}
3580 
3581 	return 0;
3582 fail:
3583 	kfree(aencoder);
3584 	kfree(aconnector);
3585 
3586 	return -EINVAL;
3587 }
3588 
3589 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3590 {
3591 	drm_mode_config_cleanup(dm->ddev);
3592 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3593 	return;
3594 }
3595 
3596 /******************************************************************************
3597  * amdgpu_display_funcs functions
3598  *****************************************************************************/
3599 
3600 /*
3601  * dm_bandwidth_update - program display watermarks
3602  *
3603  * @adev: amdgpu_device pointer
3604  *
3605  * Calculate and program the display watermarks and line buffer allocation.
3606  */
3607 static void dm_bandwidth_update(struct amdgpu_device *adev)
3608 {
3609 	/* TODO: implement later */
3610 }
3611 
3612 static const struct amdgpu_display_funcs dm_display_funcs = {
3613 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3614 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3615 	.backlight_set_level = NULL, /* never called for DC */
3616 	.backlight_get_level = NULL, /* never called for DC */
3617 	.hpd_sense = NULL,/* called unconditionally */
3618 	.hpd_set_polarity = NULL, /* called unconditionally */
3619 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3620 	.page_flip_get_scanoutpos =
3621 		dm_crtc_get_scanoutpos,/* called unconditionally */
3622 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3623 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3624 };
3625 
3626 #if defined(CONFIG_DEBUG_KERNEL_DC)
3627 
3628 static ssize_t s3_debug_store(struct device *device,
3629 			      struct device_attribute *attr,
3630 			      const char *buf,
3631 			      size_t count)
3632 {
3633 	int ret;
3634 	int s3_state;
3635 	struct drm_device *drm_dev = dev_get_drvdata(device);
3636 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3637 
3638 	ret = kstrtoint(buf, 0, &s3_state);
3639 
3640 	if (ret == 0) {
3641 		if (s3_state) {
3642 			dm_resume(adev);
3643 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3644 		} else
3645 			dm_suspend(adev);
3646 	}
3647 
3648 	return ret == 0 ? count : 0;
3649 }
3650 
3651 DEVICE_ATTR_WO(s3_debug);
3652 
3653 #endif
3654 
3655 static int dm_early_init(void *handle)
3656 {
3657 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3658 
3659 	switch (adev->asic_type) {
3660 #if defined(CONFIG_DRM_AMD_DC_SI)
3661 	case CHIP_TAHITI:
3662 	case CHIP_PITCAIRN:
3663 	case CHIP_VERDE:
3664 		adev->mode_info.num_crtc = 6;
3665 		adev->mode_info.num_hpd = 6;
3666 		adev->mode_info.num_dig = 6;
3667 		break;
3668 	case CHIP_OLAND:
3669 		adev->mode_info.num_crtc = 2;
3670 		adev->mode_info.num_hpd = 2;
3671 		adev->mode_info.num_dig = 2;
3672 		break;
3673 #endif
3674 	case CHIP_BONAIRE:
3675 	case CHIP_HAWAII:
3676 		adev->mode_info.num_crtc = 6;
3677 		adev->mode_info.num_hpd = 6;
3678 		adev->mode_info.num_dig = 6;
3679 		break;
3680 	case CHIP_KAVERI:
3681 		adev->mode_info.num_crtc = 4;
3682 		adev->mode_info.num_hpd = 6;
3683 		adev->mode_info.num_dig = 7;
3684 		break;
3685 	case CHIP_KABINI:
3686 	case CHIP_MULLINS:
3687 		adev->mode_info.num_crtc = 2;
3688 		adev->mode_info.num_hpd = 6;
3689 		adev->mode_info.num_dig = 6;
3690 		break;
3691 	case CHIP_FIJI:
3692 	case CHIP_TONGA:
3693 		adev->mode_info.num_crtc = 6;
3694 		adev->mode_info.num_hpd = 6;
3695 		adev->mode_info.num_dig = 7;
3696 		break;
3697 	case CHIP_CARRIZO:
3698 		adev->mode_info.num_crtc = 3;
3699 		adev->mode_info.num_hpd = 6;
3700 		adev->mode_info.num_dig = 9;
3701 		break;
3702 	case CHIP_STONEY:
3703 		adev->mode_info.num_crtc = 2;
3704 		adev->mode_info.num_hpd = 6;
3705 		adev->mode_info.num_dig = 9;
3706 		break;
3707 	case CHIP_POLARIS11:
3708 	case CHIP_POLARIS12:
3709 		adev->mode_info.num_crtc = 5;
3710 		adev->mode_info.num_hpd = 5;
3711 		adev->mode_info.num_dig = 5;
3712 		break;
3713 	case CHIP_POLARIS10:
3714 	case CHIP_VEGAM:
3715 		adev->mode_info.num_crtc = 6;
3716 		adev->mode_info.num_hpd = 6;
3717 		adev->mode_info.num_dig = 6;
3718 		break;
3719 	case CHIP_VEGA10:
3720 	case CHIP_VEGA12:
3721 	case CHIP_VEGA20:
3722 		adev->mode_info.num_crtc = 6;
3723 		adev->mode_info.num_hpd = 6;
3724 		adev->mode_info.num_dig = 6;
3725 		break;
3726 #if defined(CONFIG_DRM_AMD_DC_DCN)
3727 	case CHIP_RAVEN:
3728 	case CHIP_RENOIR:
3729 	case CHIP_VANGOGH:
3730 		adev->mode_info.num_crtc = 4;
3731 		adev->mode_info.num_hpd = 4;
3732 		adev->mode_info.num_dig = 4;
3733 		break;
3734 	case CHIP_NAVI10:
3735 	case CHIP_NAVI12:
3736 	case CHIP_SIENNA_CICHLID:
3737 	case CHIP_NAVY_FLOUNDER:
3738 		adev->mode_info.num_crtc = 6;
3739 		adev->mode_info.num_hpd = 6;
3740 		adev->mode_info.num_dig = 6;
3741 		break;
3742 	case CHIP_NAVI14:
3743 	case CHIP_DIMGREY_CAVEFISH:
3744 		adev->mode_info.num_crtc = 5;
3745 		adev->mode_info.num_hpd = 5;
3746 		adev->mode_info.num_dig = 5;
3747 		break;
3748 #endif
3749 	default:
3750 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3751 		return -EINVAL;
3752 	}
3753 
3754 	amdgpu_dm_set_irq_funcs(adev);
3755 
3756 	if (adev->mode_info.funcs == NULL)
3757 		adev->mode_info.funcs = &dm_display_funcs;
3758 
3759 	/*
3760 	 * Note: Do NOT change adev->audio_endpt_rreg and
3761 	 * adev->audio_endpt_wreg because they are initialised in
3762 	 * amdgpu_device_init()
3763 	 */
3764 #if defined(CONFIG_DEBUG_KERNEL_DC)
3765 	device_create_file(
3766 		adev_to_drm(adev)->dev,
3767 		&dev_attr_s3_debug);
3768 #endif
3769 
3770 	return 0;
3771 }
3772 
3773 static bool modeset_required(struct drm_crtc_state *crtc_state,
3774 			     struct dc_stream_state *new_stream,
3775 			     struct dc_stream_state *old_stream)
3776 {
3777 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3778 }
3779 
3780 static bool modereset_required(struct drm_crtc_state *crtc_state)
3781 {
3782 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3783 }
3784 
3785 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3786 {
3787 	drm_encoder_cleanup(encoder);
3788 	kfree(encoder);
3789 }
3790 
3791 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3792 	.destroy = amdgpu_dm_encoder_destroy,
3793 };
3794 
3795 
3796 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3797 					 struct drm_framebuffer *fb,
3798 					 int *min_downscale, int *max_upscale)
3799 {
3800 	struct amdgpu_device *adev = drm_to_adev(dev);
3801 	struct dc *dc = adev->dm.dc;
3802 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3803 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3804 
3805 	switch (fb->format->format) {
3806 	case DRM_FORMAT_P010:
3807 	case DRM_FORMAT_NV12:
3808 	case DRM_FORMAT_NV21:
3809 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3810 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3811 		break;
3812 
3813 	case DRM_FORMAT_XRGB16161616F:
3814 	case DRM_FORMAT_ARGB16161616F:
3815 	case DRM_FORMAT_XBGR16161616F:
3816 	case DRM_FORMAT_ABGR16161616F:
3817 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3818 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3819 		break;
3820 
3821 	default:
3822 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3823 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3824 		break;
3825 	}
3826 
3827 	/*
3828 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3829 	 * scaling factor of 1.0 == 1000 units.
3830 	 */
3831 	if (*max_upscale == 1)
3832 		*max_upscale = 1000;
3833 
3834 	if (*min_downscale == 1)
3835 		*min_downscale = 1000;
3836 }
3837 
3838 
3839 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3840 				struct dc_scaling_info *scaling_info)
3841 {
3842 	int scale_w, scale_h, min_downscale, max_upscale;
3843 
3844 	memset(scaling_info, 0, sizeof(*scaling_info));
3845 
3846 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3847 	scaling_info->src_rect.x = state->src_x >> 16;
3848 	scaling_info->src_rect.y = state->src_y >> 16;
3849 
3850 	scaling_info->src_rect.width = state->src_w >> 16;
3851 	if (scaling_info->src_rect.width == 0)
3852 		return -EINVAL;
3853 
3854 	scaling_info->src_rect.height = state->src_h >> 16;
3855 	if (scaling_info->src_rect.height == 0)
3856 		return -EINVAL;
3857 
3858 	scaling_info->dst_rect.x = state->crtc_x;
3859 	scaling_info->dst_rect.y = state->crtc_y;
3860 
3861 	if (state->crtc_w == 0)
3862 		return -EINVAL;
3863 
3864 	scaling_info->dst_rect.width = state->crtc_w;
3865 
3866 	if (state->crtc_h == 0)
3867 		return -EINVAL;
3868 
3869 	scaling_info->dst_rect.height = state->crtc_h;
3870 
3871 	/* DRM doesn't specify clipping on destination output. */
3872 	scaling_info->clip_rect = scaling_info->dst_rect;
3873 
3874 	/* Validate scaling per-format with DC plane caps */
3875 	if (state->plane && state->plane->dev && state->fb) {
3876 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3877 					     &min_downscale, &max_upscale);
3878 	} else {
3879 		min_downscale = 250;
3880 		max_upscale = 16000;
3881 	}
3882 
3883 	scale_w = scaling_info->dst_rect.width * 1000 /
3884 		  scaling_info->src_rect.width;
3885 
3886 	if (scale_w < min_downscale || scale_w > max_upscale)
3887 		return -EINVAL;
3888 
3889 	scale_h = scaling_info->dst_rect.height * 1000 /
3890 		  scaling_info->src_rect.height;
3891 
3892 	if (scale_h < min_downscale || scale_h > max_upscale)
3893 		return -EINVAL;
3894 
3895 	/*
3896 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3897 	 * assume reasonable defaults based on the format.
3898 	 */
3899 
3900 	return 0;
3901 }
3902 
3903 static void
3904 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3905 				 uint64_t tiling_flags)
3906 {
3907 	/* Fill GFX8 params */
3908 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3909 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3910 
3911 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3912 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3913 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3914 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3915 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3916 
3917 		/* XXX fix me for VI */
3918 		tiling_info->gfx8.num_banks = num_banks;
3919 		tiling_info->gfx8.array_mode =
3920 				DC_ARRAY_2D_TILED_THIN1;
3921 		tiling_info->gfx8.tile_split = tile_split;
3922 		tiling_info->gfx8.bank_width = bankw;
3923 		tiling_info->gfx8.bank_height = bankh;
3924 		tiling_info->gfx8.tile_aspect = mtaspect;
3925 		tiling_info->gfx8.tile_mode =
3926 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3927 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3928 			== DC_ARRAY_1D_TILED_THIN1) {
3929 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3930 	}
3931 
3932 	tiling_info->gfx8.pipe_config =
3933 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3934 }
3935 
3936 static void
3937 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3938 				  union dc_tiling_info *tiling_info)
3939 {
3940 	tiling_info->gfx9.num_pipes =
3941 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3942 	tiling_info->gfx9.num_banks =
3943 		adev->gfx.config.gb_addr_config_fields.num_banks;
3944 	tiling_info->gfx9.pipe_interleave =
3945 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3946 	tiling_info->gfx9.num_shader_engines =
3947 		adev->gfx.config.gb_addr_config_fields.num_se;
3948 	tiling_info->gfx9.max_compressed_frags =
3949 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3950 	tiling_info->gfx9.num_rb_per_se =
3951 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3952 	tiling_info->gfx9.shaderEnable = 1;
3953 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3954 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3955 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3956 	    adev->asic_type == CHIP_VANGOGH)
3957 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3958 }
3959 
3960 static int
3961 validate_dcc(struct amdgpu_device *adev,
3962 	     const enum surface_pixel_format format,
3963 	     const enum dc_rotation_angle rotation,
3964 	     const union dc_tiling_info *tiling_info,
3965 	     const struct dc_plane_dcc_param *dcc,
3966 	     const struct dc_plane_address *address,
3967 	     const struct plane_size *plane_size)
3968 {
3969 	struct dc *dc = adev->dm.dc;
3970 	struct dc_dcc_surface_param input;
3971 	struct dc_surface_dcc_cap output;
3972 
3973 	memset(&input, 0, sizeof(input));
3974 	memset(&output, 0, sizeof(output));
3975 
3976 	if (!dcc->enable)
3977 		return 0;
3978 
3979 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3980 	    !dc->cap_funcs.get_dcc_compression_cap)
3981 		return -EINVAL;
3982 
3983 	input.format = format;
3984 	input.surface_size.width = plane_size->surface_size.width;
3985 	input.surface_size.height = plane_size->surface_size.height;
3986 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3987 
3988 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3989 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3990 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3991 		input.scan = SCAN_DIRECTION_VERTICAL;
3992 
3993 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3994 		return -EINVAL;
3995 
3996 	if (!output.capable)
3997 		return -EINVAL;
3998 
3999 	if (dcc->independent_64b_blks == 0 &&
4000 	    output.grph.rgb.independent_64b_blks != 0)
4001 		return -EINVAL;
4002 
4003 	return 0;
4004 }
4005 
4006 static bool
4007 modifier_has_dcc(uint64_t modifier)
4008 {
4009 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4010 }
4011 
4012 static unsigned
4013 modifier_gfx9_swizzle_mode(uint64_t modifier)
4014 {
4015 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4016 		return 0;
4017 
4018 	return AMD_FMT_MOD_GET(TILE, modifier);
4019 }
4020 
4021 static const struct drm_format_info *
4022 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4023 {
4024 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4025 }
4026 
4027 static void
4028 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4029 				    union dc_tiling_info *tiling_info,
4030 				    uint64_t modifier)
4031 {
4032 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4033 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4034 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4035 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4036 
4037 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4038 
4039 	if (!IS_AMD_FMT_MOD(modifier))
4040 		return;
4041 
4042 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4043 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4044 
4045 	if (adev->family >= AMDGPU_FAMILY_NV) {
4046 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4047 	} else {
4048 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4049 
4050 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4051 	}
4052 }
4053 
4054 enum dm_micro_swizzle {
4055 	MICRO_SWIZZLE_Z = 0,
4056 	MICRO_SWIZZLE_S = 1,
4057 	MICRO_SWIZZLE_D = 2,
4058 	MICRO_SWIZZLE_R = 3
4059 };
4060 
4061 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4062 					  uint32_t format,
4063 					  uint64_t modifier)
4064 {
4065 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4066 	const struct drm_format_info *info = drm_format_info(format);
4067 
4068 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4069 
4070 	if (!info)
4071 		return false;
4072 
4073 	/*
4074 	 * We always have to allow this modifier, because core DRM still
4075 	 * checks LINEAR support if userspace does not provide modifers.
4076 	 */
4077 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4078 		return true;
4079 
4080 	/*
4081 	 * The arbitrary tiling support for multiplane formats has not been hooked
4082 	 * up.
4083 	 */
4084 	if (info->num_planes > 1)
4085 		return false;
4086 
4087 	/*
4088 	 * For D swizzle the canonical modifier depends on the bpp, so check
4089 	 * it here.
4090 	 */
4091 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4092 	    adev->family >= AMDGPU_FAMILY_NV) {
4093 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4094 			return false;
4095 	}
4096 
4097 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4098 	    info->cpp[0] < 8)
4099 		return false;
4100 
4101 	if (modifier_has_dcc(modifier)) {
4102 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4103 		if (info->cpp[0] != 4)
4104 			return false;
4105 	}
4106 
4107 	return true;
4108 }
4109 
4110 static void
4111 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4112 {
4113 	if (!*mods)
4114 		return;
4115 
4116 	if (*cap - *size < 1) {
4117 		uint64_t new_cap = *cap * 2;
4118 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4119 
4120 		if (!new_mods) {
4121 			kfree(*mods);
4122 			*mods = NULL;
4123 			return;
4124 		}
4125 
4126 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4127 		kfree(*mods);
4128 		*mods = new_mods;
4129 		*cap = new_cap;
4130 	}
4131 
4132 	(*mods)[*size] = mod;
4133 	*size += 1;
4134 }
4135 
4136 static void
4137 add_gfx9_modifiers(const struct amdgpu_device *adev,
4138 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4139 {
4140 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4141 	int pipe_xor_bits = min(8, pipes +
4142 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4143 	int bank_xor_bits = min(8 - pipe_xor_bits,
4144 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4145 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4146 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4147 
4148 
4149 	if (adev->family == AMDGPU_FAMILY_RV) {
4150 		/* Raven2 and later */
4151 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4152 
4153 		/*
4154 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4155 		 * doesn't support _D on DCN
4156 		 */
4157 
4158 		if (has_constant_encode) {
4159 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4160 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4161 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4162 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4163 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4164 				    AMD_FMT_MOD_SET(DCC, 1) |
4165 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4166 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4167 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4168 		}
4169 
4170 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4171 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4172 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4173 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4174 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4175 			    AMD_FMT_MOD_SET(DCC, 1) |
4176 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4177 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4178 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4179 
4180 		if (has_constant_encode) {
4181 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4182 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4183 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4184 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4185 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4186 				    AMD_FMT_MOD_SET(DCC, 1) |
4187 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4188 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4189 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4190 
4191 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4192 				    AMD_FMT_MOD_SET(RB, rb) |
4193 				    AMD_FMT_MOD_SET(PIPE, pipes));
4194 		}
4195 
4196 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4198 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4199 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4200 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4201 			    AMD_FMT_MOD_SET(DCC, 1) |
4202 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4203 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4204 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4205 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4206 			    AMD_FMT_MOD_SET(RB, rb) |
4207 			    AMD_FMT_MOD_SET(PIPE, pipes));
4208 	}
4209 
4210 	/*
4211 	 * Only supported for 64bpp on Raven, will be filtered on format in
4212 	 * dm_plane_format_mod_supported.
4213 	 */
4214 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4215 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4216 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4217 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4218 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4219 
4220 	if (adev->family == AMDGPU_FAMILY_RV) {
4221 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4222 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4223 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4224 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4225 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4226 	}
4227 
4228 	/*
4229 	 * Only supported for 64bpp on Raven, will be filtered on format in
4230 	 * dm_plane_format_mod_supported.
4231 	 */
4232 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4233 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4234 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4235 
4236 	if (adev->family == AMDGPU_FAMILY_RV) {
4237 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4238 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4239 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4240 	}
4241 }
4242 
4243 static void
4244 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4245 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4246 {
4247 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4248 
4249 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4251 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4252 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4253 		    AMD_FMT_MOD_SET(DCC, 1) |
4254 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4255 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4256 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4257 
4258 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4259 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4260 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4261 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4262 		    AMD_FMT_MOD_SET(DCC, 1) |
4263 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4264 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4265 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4266 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4267 
4268 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4269 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4270 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4271 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4272 
4273 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4274 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4275 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4276 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4277 
4278 
4279 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4280 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4281 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4282 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4283 
4284 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4285 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4286 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4287 }
4288 
4289 static void
4290 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4291 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4292 {
4293 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4294 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4295 
4296 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4297 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4298 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4299 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4300 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4301 		    AMD_FMT_MOD_SET(DCC, 1) |
4302 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4303 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4304 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4305 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4306 
4307 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4308 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4309 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4310 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4311 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4312 		    AMD_FMT_MOD_SET(DCC, 1) |
4313 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4314 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4315 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4316 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4317 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4318 
4319 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4321 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4322 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4323 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4324 
4325 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4326 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4327 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4328 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4329 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4330 
4331 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4332 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4333 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4334 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4335 
4336 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4337 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4338 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4339 }
4340 
4341 static int
4342 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4343 {
4344 	uint64_t size = 0, capacity = 128;
4345 	*mods = NULL;
4346 
4347 	/* We have not hooked up any pre-GFX9 modifiers. */
4348 	if (adev->family < AMDGPU_FAMILY_AI)
4349 		return 0;
4350 
4351 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4352 
4353 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4354 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4355 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4356 		return *mods ? 0 : -ENOMEM;
4357 	}
4358 
4359 	switch (adev->family) {
4360 	case AMDGPU_FAMILY_AI:
4361 	case AMDGPU_FAMILY_RV:
4362 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4363 		break;
4364 	case AMDGPU_FAMILY_NV:
4365 	case AMDGPU_FAMILY_VGH:
4366 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4367 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4368 		else
4369 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4370 		break;
4371 	}
4372 
4373 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4374 
4375 	/* INVALID marks the end of the list. */
4376 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4377 
4378 	if (!*mods)
4379 		return -ENOMEM;
4380 
4381 	return 0;
4382 }
4383 
4384 static int
4385 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4386 					  const struct amdgpu_framebuffer *afb,
4387 					  const enum surface_pixel_format format,
4388 					  const enum dc_rotation_angle rotation,
4389 					  const struct plane_size *plane_size,
4390 					  union dc_tiling_info *tiling_info,
4391 					  struct dc_plane_dcc_param *dcc,
4392 					  struct dc_plane_address *address,
4393 					  const bool force_disable_dcc)
4394 {
4395 	const uint64_t modifier = afb->base.modifier;
4396 	int ret;
4397 
4398 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4399 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4400 
4401 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4402 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4403 
4404 		dcc->enable = 1;
4405 		dcc->meta_pitch = afb->base.pitches[1];
4406 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4407 
4408 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4409 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4410 	}
4411 
4412 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4413 	if (ret)
4414 		return ret;
4415 
4416 	return 0;
4417 }
4418 
4419 static int
4420 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4421 			     const struct amdgpu_framebuffer *afb,
4422 			     const enum surface_pixel_format format,
4423 			     const enum dc_rotation_angle rotation,
4424 			     const uint64_t tiling_flags,
4425 			     union dc_tiling_info *tiling_info,
4426 			     struct plane_size *plane_size,
4427 			     struct dc_plane_dcc_param *dcc,
4428 			     struct dc_plane_address *address,
4429 			     bool tmz_surface,
4430 			     bool force_disable_dcc)
4431 {
4432 	const struct drm_framebuffer *fb = &afb->base;
4433 	int ret;
4434 
4435 	memset(tiling_info, 0, sizeof(*tiling_info));
4436 	memset(plane_size, 0, sizeof(*plane_size));
4437 	memset(dcc, 0, sizeof(*dcc));
4438 	memset(address, 0, sizeof(*address));
4439 
4440 	address->tmz_surface = tmz_surface;
4441 
4442 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4443 		uint64_t addr = afb->address + fb->offsets[0];
4444 
4445 		plane_size->surface_size.x = 0;
4446 		plane_size->surface_size.y = 0;
4447 		plane_size->surface_size.width = fb->width;
4448 		plane_size->surface_size.height = fb->height;
4449 		plane_size->surface_pitch =
4450 			fb->pitches[0] / fb->format->cpp[0];
4451 
4452 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4453 		address->grph.addr.low_part = lower_32_bits(addr);
4454 		address->grph.addr.high_part = upper_32_bits(addr);
4455 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4456 		uint64_t luma_addr = afb->address + fb->offsets[0];
4457 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4458 
4459 		plane_size->surface_size.x = 0;
4460 		plane_size->surface_size.y = 0;
4461 		plane_size->surface_size.width = fb->width;
4462 		plane_size->surface_size.height = fb->height;
4463 		plane_size->surface_pitch =
4464 			fb->pitches[0] / fb->format->cpp[0];
4465 
4466 		plane_size->chroma_size.x = 0;
4467 		plane_size->chroma_size.y = 0;
4468 		/* TODO: set these based on surface format */
4469 		plane_size->chroma_size.width = fb->width / 2;
4470 		plane_size->chroma_size.height = fb->height / 2;
4471 
4472 		plane_size->chroma_pitch =
4473 			fb->pitches[1] / fb->format->cpp[1];
4474 
4475 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4476 		address->video_progressive.luma_addr.low_part =
4477 			lower_32_bits(luma_addr);
4478 		address->video_progressive.luma_addr.high_part =
4479 			upper_32_bits(luma_addr);
4480 		address->video_progressive.chroma_addr.low_part =
4481 			lower_32_bits(chroma_addr);
4482 		address->video_progressive.chroma_addr.high_part =
4483 			upper_32_bits(chroma_addr);
4484 	}
4485 
4486 	if (adev->family >= AMDGPU_FAMILY_AI) {
4487 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4488 								rotation, plane_size,
4489 								tiling_info, dcc,
4490 								address,
4491 								force_disable_dcc);
4492 		if (ret)
4493 			return ret;
4494 	} else {
4495 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4496 	}
4497 
4498 	return 0;
4499 }
4500 
4501 static void
4502 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4503 			       bool *per_pixel_alpha, bool *global_alpha,
4504 			       int *global_alpha_value)
4505 {
4506 	*per_pixel_alpha = false;
4507 	*global_alpha = false;
4508 	*global_alpha_value = 0xff;
4509 
4510 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4511 		return;
4512 
4513 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4514 		static const uint32_t alpha_formats[] = {
4515 			DRM_FORMAT_ARGB8888,
4516 			DRM_FORMAT_RGBA8888,
4517 			DRM_FORMAT_ABGR8888,
4518 		};
4519 		uint32_t format = plane_state->fb->format->format;
4520 		unsigned int i;
4521 
4522 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4523 			if (format == alpha_formats[i]) {
4524 				*per_pixel_alpha = true;
4525 				break;
4526 			}
4527 		}
4528 	}
4529 
4530 	if (plane_state->alpha < 0xffff) {
4531 		*global_alpha = true;
4532 		*global_alpha_value = plane_state->alpha >> 8;
4533 	}
4534 }
4535 
4536 static int
4537 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4538 			    const enum surface_pixel_format format,
4539 			    enum dc_color_space *color_space)
4540 {
4541 	bool full_range;
4542 
4543 	*color_space = COLOR_SPACE_SRGB;
4544 
4545 	/* DRM color properties only affect non-RGB formats. */
4546 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4547 		return 0;
4548 
4549 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4550 
4551 	switch (plane_state->color_encoding) {
4552 	case DRM_COLOR_YCBCR_BT601:
4553 		if (full_range)
4554 			*color_space = COLOR_SPACE_YCBCR601;
4555 		else
4556 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4557 		break;
4558 
4559 	case DRM_COLOR_YCBCR_BT709:
4560 		if (full_range)
4561 			*color_space = COLOR_SPACE_YCBCR709;
4562 		else
4563 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4564 		break;
4565 
4566 	case DRM_COLOR_YCBCR_BT2020:
4567 		if (full_range)
4568 			*color_space = COLOR_SPACE_2020_YCBCR;
4569 		else
4570 			return -EINVAL;
4571 		break;
4572 
4573 	default:
4574 		return -EINVAL;
4575 	}
4576 
4577 	return 0;
4578 }
4579 
4580 static int
4581 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4582 			    const struct drm_plane_state *plane_state,
4583 			    const uint64_t tiling_flags,
4584 			    struct dc_plane_info *plane_info,
4585 			    struct dc_plane_address *address,
4586 			    bool tmz_surface,
4587 			    bool force_disable_dcc)
4588 {
4589 	const struct drm_framebuffer *fb = plane_state->fb;
4590 	const struct amdgpu_framebuffer *afb =
4591 		to_amdgpu_framebuffer(plane_state->fb);
4592 	struct drm_format_name_buf format_name;
4593 	int ret;
4594 
4595 	memset(plane_info, 0, sizeof(*plane_info));
4596 
4597 	switch (fb->format->format) {
4598 	case DRM_FORMAT_C8:
4599 		plane_info->format =
4600 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4601 		break;
4602 	case DRM_FORMAT_RGB565:
4603 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4604 		break;
4605 	case DRM_FORMAT_XRGB8888:
4606 	case DRM_FORMAT_ARGB8888:
4607 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4608 		break;
4609 	case DRM_FORMAT_XRGB2101010:
4610 	case DRM_FORMAT_ARGB2101010:
4611 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4612 		break;
4613 	case DRM_FORMAT_XBGR2101010:
4614 	case DRM_FORMAT_ABGR2101010:
4615 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4616 		break;
4617 	case DRM_FORMAT_XBGR8888:
4618 	case DRM_FORMAT_ABGR8888:
4619 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4620 		break;
4621 	case DRM_FORMAT_NV21:
4622 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4623 		break;
4624 	case DRM_FORMAT_NV12:
4625 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4626 		break;
4627 	case DRM_FORMAT_P010:
4628 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4629 		break;
4630 	case DRM_FORMAT_XRGB16161616F:
4631 	case DRM_FORMAT_ARGB16161616F:
4632 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4633 		break;
4634 	case DRM_FORMAT_XBGR16161616F:
4635 	case DRM_FORMAT_ABGR16161616F:
4636 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4637 		break;
4638 	default:
4639 		DRM_ERROR(
4640 			"Unsupported screen format %s\n",
4641 			drm_get_format_name(fb->format->format, &format_name));
4642 		return -EINVAL;
4643 	}
4644 
4645 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4646 	case DRM_MODE_ROTATE_0:
4647 		plane_info->rotation = ROTATION_ANGLE_0;
4648 		break;
4649 	case DRM_MODE_ROTATE_90:
4650 		plane_info->rotation = ROTATION_ANGLE_90;
4651 		break;
4652 	case DRM_MODE_ROTATE_180:
4653 		plane_info->rotation = ROTATION_ANGLE_180;
4654 		break;
4655 	case DRM_MODE_ROTATE_270:
4656 		plane_info->rotation = ROTATION_ANGLE_270;
4657 		break;
4658 	default:
4659 		plane_info->rotation = ROTATION_ANGLE_0;
4660 		break;
4661 	}
4662 
4663 	plane_info->visible = true;
4664 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4665 
4666 	plane_info->layer_index = 0;
4667 
4668 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4669 					  &plane_info->color_space);
4670 	if (ret)
4671 		return ret;
4672 
4673 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4674 					   plane_info->rotation, tiling_flags,
4675 					   &plane_info->tiling_info,
4676 					   &plane_info->plane_size,
4677 					   &plane_info->dcc, address, tmz_surface,
4678 					   force_disable_dcc);
4679 	if (ret)
4680 		return ret;
4681 
4682 	fill_blending_from_plane_state(
4683 		plane_state, &plane_info->per_pixel_alpha,
4684 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4685 
4686 	return 0;
4687 }
4688 
4689 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4690 				    struct dc_plane_state *dc_plane_state,
4691 				    struct drm_plane_state *plane_state,
4692 				    struct drm_crtc_state *crtc_state)
4693 {
4694 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4695 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4696 	struct dc_scaling_info scaling_info;
4697 	struct dc_plane_info plane_info;
4698 	int ret;
4699 	bool force_disable_dcc = false;
4700 
4701 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4702 	if (ret)
4703 		return ret;
4704 
4705 	dc_plane_state->src_rect = scaling_info.src_rect;
4706 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4707 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4708 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4709 
4710 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4711 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4712 					  afb->tiling_flags,
4713 					  &plane_info,
4714 					  &dc_plane_state->address,
4715 					  afb->tmz_surface,
4716 					  force_disable_dcc);
4717 	if (ret)
4718 		return ret;
4719 
4720 	dc_plane_state->format = plane_info.format;
4721 	dc_plane_state->color_space = plane_info.color_space;
4722 	dc_plane_state->format = plane_info.format;
4723 	dc_plane_state->plane_size = plane_info.plane_size;
4724 	dc_plane_state->rotation = plane_info.rotation;
4725 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4726 	dc_plane_state->stereo_format = plane_info.stereo_format;
4727 	dc_plane_state->tiling_info = plane_info.tiling_info;
4728 	dc_plane_state->visible = plane_info.visible;
4729 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4730 	dc_plane_state->global_alpha = plane_info.global_alpha;
4731 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4732 	dc_plane_state->dcc = plane_info.dcc;
4733 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4734 
4735 	/*
4736 	 * Always set input transfer function, since plane state is refreshed
4737 	 * every time.
4738 	 */
4739 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4740 	if (ret)
4741 		return ret;
4742 
4743 	return 0;
4744 }
4745 
4746 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4747 					   const struct dm_connector_state *dm_state,
4748 					   struct dc_stream_state *stream)
4749 {
4750 	enum amdgpu_rmx_type rmx_type;
4751 
4752 	struct rect src = { 0 }; /* viewport in composition space*/
4753 	struct rect dst = { 0 }; /* stream addressable area */
4754 
4755 	/* no mode. nothing to be done */
4756 	if (!mode)
4757 		return;
4758 
4759 	/* Full screen scaling by default */
4760 	src.width = mode->hdisplay;
4761 	src.height = mode->vdisplay;
4762 	dst.width = stream->timing.h_addressable;
4763 	dst.height = stream->timing.v_addressable;
4764 
4765 	if (dm_state) {
4766 		rmx_type = dm_state->scaling;
4767 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4768 			if (src.width * dst.height <
4769 					src.height * dst.width) {
4770 				/* height needs less upscaling/more downscaling */
4771 				dst.width = src.width *
4772 						dst.height / src.height;
4773 			} else {
4774 				/* width needs less upscaling/more downscaling */
4775 				dst.height = src.height *
4776 						dst.width / src.width;
4777 			}
4778 		} else if (rmx_type == RMX_CENTER) {
4779 			dst = src;
4780 		}
4781 
4782 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4783 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4784 
4785 		if (dm_state->underscan_enable) {
4786 			dst.x += dm_state->underscan_hborder / 2;
4787 			dst.y += dm_state->underscan_vborder / 2;
4788 			dst.width -= dm_state->underscan_hborder;
4789 			dst.height -= dm_state->underscan_vborder;
4790 		}
4791 	}
4792 
4793 	stream->src = src;
4794 	stream->dst = dst;
4795 
4796 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4797 			dst.x, dst.y, dst.width, dst.height);
4798 
4799 }
4800 
4801 static enum dc_color_depth
4802 convert_color_depth_from_display_info(const struct drm_connector *connector,
4803 				      bool is_y420, int requested_bpc)
4804 {
4805 	uint8_t bpc;
4806 
4807 	if (is_y420) {
4808 		bpc = 8;
4809 
4810 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4811 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4812 			bpc = 16;
4813 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4814 			bpc = 12;
4815 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4816 			bpc = 10;
4817 	} else {
4818 		bpc = (uint8_t)connector->display_info.bpc;
4819 		/* Assume 8 bpc by default if no bpc is specified. */
4820 		bpc = bpc ? bpc : 8;
4821 	}
4822 
4823 	if (requested_bpc > 0) {
4824 		/*
4825 		 * Cap display bpc based on the user requested value.
4826 		 *
4827 		 * The value for state->max_bpc may not correctly updated
4828 		 * depending on when the connector gets added to the state
4829 		 * or if this was called outside of atomic check, so it
4830 		 * can't be used directly.
4831 		 */
4832 		bpc = min_t(u8, bpc, requested_bpc);
4833 
4834 		/* Round down to the nearest even number. */
4835 		bpc = bpc - (bpc & 1);
4836 	}
4837 
4838 	switch (bpc) {
4839 	case 0:
4840 		/*
4841 		 * Temporary Work around, DRM doesn't parse color depth for
4842 		 * EDID revision before 1.4
4843 		 * TODO: Fix edid parsing
4844 		 */
4845 		return COLOR_DEPTH_888;
4846 	case 6:
4847 		return COLOR_DEPTH_666;
4848 	case 8:
4849 		return COLOR_DEPTH_888;
4850 	case 10:
4851 		return COLOR_DEPTH_101010;
4852 	case 12:
4853 		return COLOR_DEPTH_121212;
4854 	case 14:
4855 		return COLOR_DEPTH_141414;
4856 	case 16:
4857 		return COLOR_DEPTH_161616;
4858 	default:
4859 		return COLOR_DEPTH_UNDEFINED;
4860 	}
4861 }
4862 
4863 static enum dc_aspect_ratio
4864 get_aspect_ratio(const struct drm_display_mode *mode_in)
4865 {
4866 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4867 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4868 }
4869 
4870 static enum dc_color_space
4871 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4872 {
4873 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4874 
4875 	switch (dc_crtc_timing->pixel_encoding)	{
4876 	case PIXEL_ENCODING_YCBCR422:
4877 	case PIXEL_ENCODING_YCBCR444:
4878 	case PIXEL_ENCODING_YCBCR420:
4879 	{
4880 		/*
4881 		 * 27030khz is the separation point between HDTV and SDTV
4882 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4883 		 * respectively
4884 		 */
4885 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4886 			if (dc_crtc_timing->flags.Y_ONLY)
4887 				color_space =
4888 					COLOR_SPACE_YCBCR709_LIMITED;
4889 			else
4890 				color_space = COLOR_SPACE_YCBCR709;
4891 		} else {
4892 			if (dc_crtc_timing->flags.Y_ONLY)
4893 				color_space =
4894 					COLOR_SPACE_YCBCR601_LIMITED;
4895 			else
4896 				color_space = COLOR_SPACE_YCBCR601;
4897 		}
4898 
4899 	}
4900 	break;
4901 	case PIXEL_ENCODING_RGB:
4902 		color_space = COLOR_SPACE_SRGB;
4903 		break;
4904 
4905 	default:
4906 		WARN_ON(1);
4907 		break;
4908 	}
4909 
4910 	return color_space;
4911 }
4912 
4913 static bool adjust_colour_depth_from_display_info(
4914 	struct dc_crtc_timing *timing_out,
4915 	const struct drm_display_info *info)
4916 {
4917 	enum dc_color_depth depth = timing_out->display_color_depth;
4918 	int normalized_clk;
4919 	do {
4920 		normalized_clk = timing_out->pix_clk_100hz / 10;
4921 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4922 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4923 			normalized_clk /= 2;
4924 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4925 		switch (depth) {
4926 		case COLOR_DEPTH_888:
4927 			break;
4928 		case COLOR_DEPTH_101010:
4929 			normalized_clk = (normalized_clk * 30) / 24;
4930 			break;
4931 		case COLOR_DEPTH_121212:
4932 			normalized_clk = (normalized_clk * 36) / 24;
4933 			break;
4934 		case COLOR_DEPTH_161616:
4935 			normalized_clk = (normalized_clk * 48) / 24;
4936 			break;
4937 		default:
4938 			/* The above depths are the only ones valid for HDMI. */
4939 			return false;
4940 		}
4941 		if (normalized_clk <= info->max_tmds_clock) {
4942 			timing_out->display_color_depth = depth;
4943 			return true;
4944 		}
4945 	} while (--depth > COLOR_DEPTH_666);
4946 	return false;
4947 }
4948 
4949 static void fill_stream_properties_from_drm_display_mode(
4950 	struct dc_stream_state *stream,
4951 	const struct drm_display_mode *mode_in,
4952 	const struct drm_connector *connector,
4953 	const struct drm_connector_state *connector_state,
4954 	const struct dc_stream_state *old_stream,
4955 	int requested_bpc)
4956 {
4957 	struct dc_crtc_timing *timing_out = &stream->timing;
4958 	const struct drm_display_info *info = &connector->display_info;
4959 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4960 	struct hdmi_vendor_infoframe hv_frame;
4961 	struct hdmi_avi_infoframe avi_frame;
4962 
4963 	memset(&hv_frame, 0, sizeof(hv_frame));
4964 	memset(&avi_frame, 0, sizeof(avi_frame));
4965 
4966 	timing_out->h_border_left = 0;
4967 	timing_out->h_border_right = 0;
4968 	timing_out->v_border_top = 0;
4969 	timing_out->v_border_bottom = 0;
4970 	/* TODO: un-hardcode */
4971 	if (drm_mode_is_420_only(info, mode_in)
4972 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4973 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4974 	else if (drm_mode_is_420_also(info, mode_in)
4975 			&& aconnector->force_yuv420_output)
4976 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4977 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4978 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4979 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4980 	else
4981 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4982 
4983 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4984 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4985 		connector,
4986 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4987 		requested_bpc);
4988 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4989 	timing_out->hdmi_vic = 0;
4990 
4991 	if(old_stream) {
4992 		timing_out->vic = old_stream->timing.vic;
4993 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4994 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4995 	} else {
4996 		timing_out->vic = drm_match_cea_mode(mode_in);
4997 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4998 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4999 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5000 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5001 	}
5002 
5003 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5004 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5005 		timing_out->vic = avi_frame.video_code;
5006 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5007 		timing_out->hdmi_vic = hv_frame.vic;
5008 	}
5009 
5010 	timing_out->h_addressable = mode_in->hdisplay;
5011 	timing_out->h_total = mode_in->htotal;
5012 	timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5013 	timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5014 	timing_out->v_total = mode_in->vtotal;
5015 	timing_out->v_addressable = mode_in->vdisplay;
5016 	timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5017 	timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5018 	timing_out->pix_clk_100hz = mode_in->clock * 10;
5019 
5020 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5021 
5022 	stream->output_color_space = get_output_color_space(timing_out);
5023 
5024 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5025 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5026 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5027 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5028 		    drm_mode_is_420_also(info, mode_in) &&
5029 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5030 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5031 			adjust_colour_depth_from_display_info(timing_out, info);
5032 		}
5033 	}
5034 }
5035 
5036 static void fill_audio_info(struct audio_info *audio_info,
5037 			    const struct drm_connector *drm_connector,
5038 			    const struct dc_sink *dc_sink)
5039 {
5040 	int i = 0;
5041 	int cea_revision = 0;
5042 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5043 
5044 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5045 	audio_info->product_id = edid_caps->product_id;
5046 
5047 	cea_revision = drm_connector->display_info.cea_rev;
5048 
5049 	strscpy(audio_info->display_name,
5050 		edid_caps->display_name,
5051 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5052 
5053 	if (cea_revision >= 3) {
5054 		audio_info->mode_count = edid_caps->audio_mode_count;
5055 
5056 		for (i = 0; i < audio_info->mode_count; ++i) {
5057 			audio_info->modes[i].format_code =
5058 					(enum audio_format_code)
5059 					(edid_caps->audio_modes[i].format_code);
5060 			audio_info->modes[i].channel_count =
5061 					edid_caps->audio_modes[i].channel_count;
5062 			audio_info->modes[i].sample_rates.all =
5063 					edid_caps->audio_modes[i].sample_rate;
5064 			audio_info->modes[i].sample_size =
5065 					edid_caps->audio_modes[i].sample_size;
5066 		}
5067 	}
5068 
5069 	audio_info->flags.all = edid_caps->speaker_flags;
5070 
5071 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5072 	if (drm_connector->latency_present[0]) {
5073 		audio_info->video_latency = drm_connector->video_latency[0];
5074 		audio_info->audio_latency = drm_connector->audio_latency[0];
5075 	}
5076 
5077 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5078 
5079 }
5080 
5081 static void
5082 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5083 				      struct drm_display_mode *dst_mode)
5084 {
5085 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5086 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5087 	dst_mode->crtc_clock = src_mode->crtc_clock;
5088 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5089 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5090 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5091 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5092 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5093 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5094 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5095 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5096 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5097 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5098 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5099 }
5100 
5101 static void
5102 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5103 					const struct drm_display_mode *native_mode,
5104 					bool scale_enabled)
5105 {
5106 	if (scale_enabled) {
5107 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5108 	} else if (native_mode->clock == drm_mode->clock &&
5109 			native_mode->htotal == drm_mode->htotal &&
5110 			native_mode->vtotal == drm_mode->vtotal) {
5111 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5112 	} else {
5113 		/* no scaling nor amdgpu inserted, no need to patch */
5114 	}
5115 }
5116 
5117 static struct dc_sink *
5118 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5119 {
5120 	struct dc_sink_init_data sink_init_data = { 0 };
5121 	struct dc_sink *sink = NULL;
5122 	sink_init_data.link = aconnector->dc_link;
5123 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5124 
5125 	sink = dc_sink_create(&sink_init_data);
5126 	if (!sink) {
5127 		DRM_ERROR("Failed to create sink!\n");
5128 		return NULL;
5129 	}
5130 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5131 
5132 	return sink;
5133 }
5134 
5135 static void set_multisync_trigger_params(
5136 		struct dc_stream_state *stream)
5137 {
5138 	if (stream->triggered_crtc_reset.enabled) {
5139 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5140 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5141 	}
5142 }
5143 
5144 static void set_master_stream(struct dc_stream_state *stream_set[],
5145 			      int stream_count)
5146 {
5147 	int j, highest_rfr = 0, master_stream = 0;
5148 
5149 	for (j = 0;  j < stream_count; j++) {
5150 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5151 			int refresh_rate = 0;
5152 
5153 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5154 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5155 			if (refresh_rate > highest_rfr) {
5156 				highest_rfr = refresh_rate;
5157 				master_stream = j;
5158 			}
5159 		}
5160 	}
5161 	for (j = 0;  j < stream_count; j++) {
5162 		if (stream_set[j])
5163 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5164 	}
5165 }
5166 
5167 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5168 {
5169 	int i = 0;
5170 
5171 	if (context->stream_count < 2)
5172 		return;
5173 	for (i = 0; i < context->stream_count ; i++) {
5174 		if (!context->streams[i])
5175 			continue;
5176 		/*
5177 		 * TODO: add a function to read AMD VSDB bits and set
5178 		 * crtc_sync_master.multi_sync_enabled flag
5179 		 * For now it's set to false
5180 		 */
5181 		set_multisync_trigger_params(context->streams[i]);
5182 	}
5183 	set_master_stream(context->streams, context->stream_count);
5184 }
5185 
5186 static struct drm_display_mode *
5187 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5188 			  bool use_probed_modes)
5189 {
5190 	struct drm_display_mode *m, *m_pref = NULL;
5191 	u16 current_refresh, highest_refresh;
5192 	struct list_head *list_head = use_probed_modes ?
5193 						    &aconnector->base.probed_modes :
5194 						    &aconnector->base.modes;
5195 
5196 	if (aconnector->freesync_vid_base.clock != 0)
5197 		return &aconnector->freesync_vid_base;
5198 
5199 	/* Find the preferred mode */
5200 	list_for_each_entry (m, list_head, head) {
5201 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5202 			m_pref = m;
5203 			break;
5204 		}
5205 	}
5206 
5207 	if (!m_pref) {
5208 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5209 		m_pref = list_first_entry_or_null(
5210 			&aconnector->base.modes, struct drm_display_mode, head);
5211 		if (!m_pref) {
5212 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5213 			return NULL;
5214 		}
5215 	}
5216 
5217 	highest_refresh = drm_mode_vrefresh(m_pref);
5218 
5219 	/*
5220 	 * Find the mode with highest refresh rate with same resolution.
5221 	 * For some monitors, preferred mode is not the mode with highest
5222 	 * supported refresh rate.
5223 	 */
5224 	list_for_each_entry (m, list_head, head) {
5225 		current_refresh  = drm_mode_vrefresh(m);
5226 
5227 		if (m->hdisplay == m_pref->hdisplay &&
5228 		    m->vdisplay == m_pref->vdisplay &&
5229 		    highest_refresh < current_refresh) {
5230 			highest_refresh = current_refresh;
5231 			m_pref = m;
5232 		}
5233 	}
5234 
5235 	aconnector->freesync_vid_base = *m_pref;
5236 	return m_pref;
5237 }
5238 
5239 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5240 				   struct amdgpu_dm_connector *aconnector)
5241 {
5242 	struct drm_display_mode *high_mode;
5243 	int timing_diff;
5244 
5245 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5246 	if (!high_mode || !mode)
5247 		return false;
5248 
5249 	timing_diff = high_mode->vtotal - mode->vtotal;
5250 
5251 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5252 	    high_mode->hdisplay != mode->hdisplay ||
5253 	    high_mode->vdisplay != mode->vdisplay ||
5254 	    high_mode->hsync_start != mode->hsync_start ||
5255 	    high_mode->hsync_end != mode->hsync_end ||
5256 	    high_mode->htotal != mode->htotal ||
5257 	    high_mode->hskew != mode->hskew ||
5258 	    high_mode->vscan != mode->vscan ||
5259 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5260 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5261 		return false;
5262 	else
5263 		return true;
5264 }
5265 
5266 static struct dc_stream_state *
5267 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5268 		       const struct drm_display_mode *drm_mode,
5269 		       const struct dm_connector_state *dm_state,
5270 		       const struct dc_stream_state *old_stream,
5271 		       int requested_bpc)
5272 {
5273 	struct drm_display_mode *preferred_mode = NULL;
5274 	struct drm_connector *drm_connector;
5275 	const struct drm_connector_state *con_state =
5276 		dm_state ? &dm_state->base : NULL;
5277 	struct dc_stream_state *stream = NULL;
5278 	struct drm_display_mode mode = *drm_mode;
5279 	struct drm_display_mode saved_mode;
5280 	struct drm_display_mode *freesync_mode = NULL;
5281 	bool native_mode_found = false;
5282 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5283 	int mode_refresh;
5284 	int preferred_refresh = 0;
5285 #if defined(CONFIG_DRM_AMD_DC_DCN)
5286 	struct dsc_dec_dpcd_caps dsc_caps;
5287 	uint32_t link_bandwidth_kbps;
5288 #endif
5289 	struct dc_sink *sink = NULL;
5290 
5291 	memset(&saved_mode, 0, sizeof(saved_mode));
5292 
5293 	if (aconnector == NULL) {
5294 		DRM_ERROR("aconnector is NULL!\n");
5295 		return stream;
5296 	}
5297 
5298 	drm_connector = &aconnector->base;
5299 
5300 	if (!aconnector->dc_sink) {
5301 		sink = create_fake_sink(aconnector);
5302 		if (!sink)
5303 			return stream;
5304 	} else {
5305 		sink = aconnector->dc_sink;
5306 		dc_sink_retain(sink);
5307 	}
5308 
5309 	stream = dc_create_stream_for_sink(sink);
5310 
5311 	if (stream == NULL) {
5312 		DRM_ERROR("Failed to create stream for sink!\n");
5313 		goto finish;
5314 	}
5315 
5316 	stream->dm_stream_context = aconnector;
5317 
5318 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5319 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5320 
5321 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5322 		/* Search for preferred mode */
5323 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5324 			native_mode_found = true;
5325 			break;
5326 		}
5327 	}
5328 	if (!native_mode_found)
5329 		preferred_mode = list_first_entry_or_null(
5330 				&aconnector->base.modes,
5331 				struct drm_display_mode,
5332 				head);
5333 
5334 	mode_refresh = drm_mode_vrefresh(&mode);
5335 
5336 	if (preferred_mode == NULL) {
5337 		/*
5338 		 * This may not be an error, the use case is when we have no
5339 		 * usermode calls to reset and set mode upon hotplug. In this
5340 		 * case, we call set mode ourselves to restore the previous mode
5341 		 * and the modelist may not be filled in in time.
5342 		 */
5343 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5344 	} else {
5345 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5346 				 is_freesync_video_mode(&mode, aconnector);
5347 		if (recalculate_timing) {
5348 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5349 			saved_mode = mode;
5350 			mode = *freesync_mode;
5351 		} else {
5352 			decide_crtc_timing_for_drm_display_mode(
5353 				&mode, preferred_mode,
5354 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5355 		}
5356 
5357 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5358 	}
5359 
5360 	if (recalculate_timing)
5361 		drm_mode_set_crtcinfo(&saved_mode, 0);
5362 	else
5363 		drm_mode_set_crtcinfo(&mode, 0);
5364 
5365        /*
5366 	* If scaling is enabled and refresh rate didn't change
5367 	* we copy the vic and polarities of the old timings
5368 	*/
5369 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5370 		fill_stream_properties_from_drm_display_mode(
5371 			stream, &mode, &aconnector->base, con_state, NULL,
5372 			requested_bpc);
5373 	else
5374 		fill_stream_properties_from_drm_display_mode(
5375 			stream, &mode, &aconnector->base, con_state, old_stream,
5376 			requested_bpc);
5377 
5378 	stream->timing.flags.DSC = 0;
5379 
5380 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5381 #if defined(CONFIG_DRM_AMD_DC_DCN)
5382 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5383 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5384 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5385 				      &dsc_caps);
5386 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5387 							     dc_link_get_link_cap(aconnector->dc_link));
5388 
5389 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5390 			/* Set DSC policy according to dsc_clock_en */
5391 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5392 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5393 
5394 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5395 						  &dsc_caps,
5396 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5397 						  0,
5398 						  link_bandwidth_kbps,
5399 						  &stream->timing,
5400 						  &stream->timing.dsc_cfg))
5401 				stream->timing.flags.DSC = 1;
5402 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5403 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5404 				stream->timing.flags.DSC = 1;
5405 
5406 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5407 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5408 
5409 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5410 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5411 
5412 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5413 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5414 		}
5415 #endif
5416 	}
5417 
5418 	update_stream_scaling_settings(&mode, dm_state, stream);
5419 
5420 	fill_audio_info(
5421 		&stream->audio_info,
5422 		drm_connector,
5423 		sink);
5424 
5425 	update_stream_signal(stream, sink);
5426 
5427 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5428 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5429 
5430 	if (stream->link->psr_settings.psr_feature_enabled) {
5431 		//
5432 		// should decide stream support vsc sdp colorimetry capability
5433 		// before building vsc info packet
5434 		//
5435 		stream->use_vsc_sdp_for_colorimetry = false;
5436 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5437 			stream->use_vsc_sdp_for_colorimetry =
5438 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5439 		} else {
5440 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5441 				stream->use_vsc_sdp_for_colorimetry = true;
5442 		}
5443 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5444 	}
5445 finish:
5446 	dc_sink_release(sink);
5447 
5448 	return stream;
5449 }
5450 
5451 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5452 {
5453 	drm_crtc_cleanup(crtc);
5454 	kfree(crtc);
5455 }
5456 
5457 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5458 				  struct drm_crtc_state *state)
5459 {
5460 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5461 
5462 	/* TODO Destroy dc_stream objects are stream object is flattened */
5463 	if (cur->stream)
5464 		dc_stream_release(cur->stream);
5465 
5466 
5467 	__drm_atomic_helper_crtc_destroy_state(state);
5468 
5469 
5470 	kfree(state);
5471 }
5472 
5473 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5474 {
5475 	struct dm_crtc_state *state;
5476 
5477 	if (crtc->state)
5478 		dm_crtc_destroy_state(crtc, crtc->state);
5479 
5480 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5481 	if (WARN_ON(!state))
5482 		return;
5483 
5484 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5485 }
5486 
5487 static struct drm_crtc_state *
5488 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5489 {
5490 	struct dm_crtc_state *state, *cur;
5491 
5492 	cur = to_dm_crtc_state(crtc->state);
5493 
5494 	if (WARN_ON(!crtc->state))
5495 		return NULL;
5496 
5497 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5498 	if (!state)
5499 		return NULL;
5500 
5501 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5502 
5503 	if (cur->stream) {
5504 		state->stream = cur->stream;
5505 		dc_stream_retain(state->stream);
5506 	}
5507 
5508 	state->active_planes = cur->active_planes;
5509 	state->vrr_infopacket = cur->vrr_infopacket;
5510 	state->abm_level = cur->abm_level;
5511 	state->vrr_supported = cur->vrr_supported;
5512 	state->freesync_config = cur->freesync_config;
5513 	state->crc_src = cur->crc_src;
5514 	state->cm_has_degamma = cur->cm_has_degamma;
5515 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5516 
5517 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5518 
5519 	return &state->base;
5520 }
5521 
5522 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5523 {
5524 	enum dc_irq_source irq_source;
5525 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5526 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5527 	int rc;
5528 
5529 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5530 
5531 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5532 
5533 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5534 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5535 	return rc;
5536 }
5537 
5538 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5539 {
5540 	enum dc_irq_source irq_source;
5541 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5542 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5543 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5544 #if defined(CONFIG_DRM_AMD_DC_DCN)
5545 	struct amdgpu_display_manager *dm = &adev->dm;
5546 	unsigned long flags;
5547 #endif
5548 	int rc = 0;
5549 
5550 	if (enable) {
5551 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5552 		if (amdgpu_dm_vrr_active(acrtc_state))
5553 			rc = dm_set_vupdate_irq(crtc, true);
5554 	} else {
5555 		/* vblank irq off -> vupdate irq off */
5556 		rc = dm_set_vupdate_irq(crtc, false);
5557 	}
5558 
5559 	if (rc)
5560 		return rc;
5561 
5562 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5563 
5564 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5565 		return -EBUSY;
5566 
5567 	if (amdgpu_in_reset(adev))
5568 		return 0;
5569 
5570 #if defined(CONFIG_DRM_AMD_DC_DCN)
5571 	spin_lock_irqsave(&dm->vblank_lock, flags);
5572 	dm->vblank_workqueue->dm = dm;
5573 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5574 	dm->vblank_workqueue->enable = enable;
5575 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5576 	schedule_work(&dm->vblank_workqueue->mall_work);
5577 #endif
5578 
5579 	return 0;
5580 }
5581 
5582 static int dm_enable_vblank(struct drm_crtc *crtc)
5583 {
5584 	return dm_set_vblank(crtc, true);
5585 }
5586 
5587 static void dm_disable_vblank(struct drm_crtc *crtc)
5588 {
5589 	dm_set_vblank(crtc, false);
5590 }
5591 
5592 /* Implemented only the options currently availible for the driver */
5593 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5594 	.reset = dm_crtc_reset_state,
5595 	.destroy = amdgpu_dm_crtc_destroy,
5596 	.set_config = drm_atomic_helper_set_config,
5597 	.page_flip = drm_atomic_helper_page_flip,
5598 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5599 	.atomic_destroy_state = dm_crtc_destroy_state,
5600 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5601 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5602 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5603 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5604 	.enable_vblank = dm_enable_vblank,
5605 	.disable_vblank = dm_disable_vblank,
5606 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5607 };
5608 
5609 static enum drm_connector_status
5610 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5611 {
5612 	bool connected;
5613 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5614 
5615 	/*
5616 	 * Notes:
5617 	 * 1. This interface is NOT called in context of HPD irq.
5618 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5619 	 * makes it a bad place for *any* MST-related activity.
5620 	 */
5621 
5622 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5623 	    !aconnector->fake_enable)
5624 		connected = (aconnector->dc_sink != NULL);
5625 	else
5626 		connected = (aconnector->base.force == DRM_FORCE_ON);
5627 
5628 	update_subconnector_property(aconnector);
5629 
5630 	return (connected ? connector_status_connected :
5631 			connector_status_disconnected);
5632 }
5633 
5634 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5635 					    struct drm_connector_state *connector_state,
5636 					    struct drm_property *property,
5637 					    uint64_t val)
5638 {
5639 	struct drm_device *dev = connector->dev;
5640 	struct amdgpu_device *adev = drm_to_adev(dev);
5641 	struct dm_connector_state *dm_old_state =
5642 		to_dm_connector_state(connector->state);
5643 	struct dm_connector_state *dm_new_state =
5644 		to_dm_connector_state(connector_state);
5645 
5646 	int ret = -EINVAL;
5647 
5648 	if (property == dev->mode_config.scaling_mode_property) {
5649 		enum amdgpu_rmx_type rmx_type;
5650 
5651 		switch (val) {
5652 		case DRM_MODE_SCALE_CENTER:
5653 			rmx_type = RMX_CENTER;
5654 			break;
5655 		case DRM_MODE_SCALE_ASPECT:
5656 			rmx_type = RMX_ASPECT;
5657 			break;
5658 		case DRM_MODE_SCALE_FULLSCREEN:
5659 			rmx_type = RMX_FULL;
5660 			break;
5661 		case DRM_MODE_SCALE_NONE:
5662 		default:
5663 			rmx_type = RMX_OFF;
5664 			break;
5665 		}
5666 
5667 		if (dm_old_state->scaling == rmx_type)
5668 			return 0;
5669 
5670 		dm_new_state->scaling = rmx_type;
5671 		ret = 0;
5672 	} else if (property == adev->mode_info.underscan_hborder_property) {
5673 		dm_new_state->underscan_hborder = val;
5674 		ret = 0;
5675 	} else if (property == adev->mode_info.underscan_vborder_property) {
5676 		dm_new_state->underscan_vborder = val;
5677 		ret = 0;
5678 	} else if (property == adev->mode_info.underscan_property) {
5679 		dm_new_state->underscan_enable = val;
5680 		ret = 0;
5681 	} else if (property == adev->mode_info.abm_level_property) {
5682 		dm_new_state->abm_level = val;
5683 		ret = 0;
5684 	}
5685 
5686 	return ret;
5687 }
5688 
5689 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5690 					    const struct drm_connector_state *state,
5691 					    struct drm_property *property,
5692 					    uint64_t *val)
5693 {
5694 	struct drm_device *dev = connector->dev;
5695 	struct amdgpu_device *adev = drm_to_adev(dev);
5696 	struct dm_connector_state *dm_state =
5697 		to_dm_connector_state(state);
5698 	int ret = -EINVAL;
5699 
5700 	if (property == dev->mode_config.scaling_mode_property) {
5701 		switch (dm_state->scaling) {
5702 		case RMX_CENTER:
5703 			*val = DRM_MODE_SCALE_CENTER;
5704 			break;
5705 		case RMX_ASPECT:
5706 			*val = DRM_MODE_SCALE_ASPECT;
5707 			break;
5708 		case RMX_FULL:
5709 			*val = DRM_MODE_SCALE_FULLSCREEN;
5710 			break;
5711 		case RMX_OFF:
5712 		default:
5713 			*val = DRM_MODE_SCALE_NONE;
5714 			break;
5715 		}
5716 		ret = 0;
5717 	} else if (property == adev->mode_info.underscan_hborder_property) {
5718 		*val = dm_state->underscan_hborder;
5719 		ret = 0;
5720 	} else if (property == adev->mode_info.underscan_vborder_property) {
5721 		*val = dm_state->underscan_vborder;
5722 		ret = 0;
5723 	} else if (property == adev->mode_info.underscan_property) {
5724 		*val = dm_state->underscan_enable;
5725 		ret = 0;
5726 	} else if (property == adev->mode_info.abm_level_property) {
5727 		*val = dm_state->abm_level;
5728 		ret = 0;
5729 	}
5730 
5731 	return ret;
5732 }
5733 
5734 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5735 {
5736 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5737 
5738 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5739 }
5740 
5741 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5742 {
5743 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5744 	const struct dc_link *link = aconnector->dc_link;
5745 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5746 	struct amdgpu_display_manager *dm = &adev->dm;
5747 
5748 	/*
5749 	 * Call only if mst_mgr was iniitalized before since it's not done
5750 	 * for all connector types.
5751 	 */
5752 	if (aconnector->mst_mgr.dev)
5753 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5754 
5755 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5756 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5757 
5758 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5759 	    link->type != dc_connection_none &&
5760 	    dm->backlight_dev) {
5761 		backlight_device_unregister(dm->backlight_dev);
5762 		dm->backlight_dev = NULL;
5763 	}
5764 #endif
5765 
5766 	if (aconnector->dc_em_sink)
5767 		dc_sink_release(aconnector->dc_em_sink);
5768 	aconnector->dc_em_sink = NULL;
5769 	if (aconnector->dc_sink)
5770 		dc_sink_release(aconnector->dc_sink);
5771 	aconnector->dc_sink = NULL;
5772 
5773 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5774 	drm_connector_unregister(connector);
5775 	drm_connector_cleanup(connector);
5776 	if (aconnector->i2c) {
5777 		i2c_del_adapter(&aconnector->i2c->base);
5778 		kfree(aconnector->i2c);
5779 	}
5780 	kfree(aconnector->dm_dp_aux.aux.name);
5781 
5782 	kfree(connector);
5783 }
5784 
5785 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5786 {
5787 	struct dm_connector_state *state =
5788 		to_dm_connector_state(connector->state);
5789 
5790 	if (connector->state)
5791 		__drm_atomic_helper_connector_destroy_state(connector->state);
5792 
5793 	kfree(state);
5794 
5795 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5796 
5797 	if (state) {
5798 		state->scaling = RMX_OFF;
5799 		state->underscan_enable = false;
5800 		state->underscan_hborder = 0;
5801 		state->underscan_vborder = 0;
5802 		state->base.max_requested_bpc = 8;
5803 		state->vcpi_slots = 0;
5804 		state->pbn = 0;
5805 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5806 			state->abm_level = amdgpu_dm_abm_level;
5807 
5808 		__drm_atomic_helper_connector_reset(connector, &state->base);
5809 	}
5810 }
5811 
5812 struct drm_connector_state *
5813 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5814 {
5815 	struct dm_connector_state *state =
5816 		to_dm_connector_state(connector->state);
5817 
5818 	struct dm_connector_state *new_state =
5819 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5820 
5821 	if (!new_state)
5822 		return NULL;
5823 
5824 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5825 
5826 	new_state->freesync_capable = state->freesync_capable;
5827 	new_state->abm_level = state->abm_level;
5828 	new_state->scaling = state->scaling;
5829 	new_state->underscan_enable = state->underscan_enable;
5830 	new_state->underscan_hborder = state->underscan_hborder;
5831 	new_state->underscan_vborder = state->underscan_vborder;
5832 	new_state->vcpi_slots = state->vcpi_slots;
5833 	new_state->pbn = state->pbn;
5834 	return &new_state->base;
5835 }
5836 
5837 static int
5838 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5839 {
5840 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5841 		to_amdgpu_dm_connector(connector);
5842 	int r;
5843 
5844 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5845 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5846 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5847 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5848 		if (r)
5849 			return r;
5850 	}
5851 
5852 #if defined(CONFIG_DEBUG_FS)
5853 	connector_debugfs_init(amdgpu_dm_connector);
5854 #endif
5855 
5856 	return 0;
5857 }
5858 
5859 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5860 	.reset = amdgpu_dm_connector_funcs_reset,
5861 	.detect = amdgpu_dm_connector_detect,
5862 	.fill_modes = drm_helper_probe_single_connector_modes,
5863 	.destroy = amdgpu_dm_connector_destroy,
5864 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5865 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5866 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5867 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5868 	.late_register = amdgpu_dm_connector_late_register,
5869 	.early_unregister = amdgpu_dm_connector_unregister
5870 };
5871 
5872 static int get_modes(struct drm_connector *connector)
5873 {
5874 	return amdgpu_dm_connector_get_modes(connector);
5875 }
5876 
5877 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5878 {
5879 	struct dc_sink_init_data init_params = {
5880 			.link = aconnector->dc_link,
5881 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5882 	};
5883 	struct edid *edid;
5884 
5885 	if (!aconnector->base.edid_blob_ptr) {
5886 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5887 				aconnector->base.name);
5888 
5889 		aconnector->base.force = DRM_FORCE_OFF;
5890 		aconnector->base.override_edid = false;
5891 		return;
5892 	}
5893 
5894 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5895 
5896 	aconnector->edid = edid;
5897 
5898 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5899 		aconnector->dc_link,
5900 		(uint8_t *)edid,
5901 		(edid->extensions + 1) * EDID_LENGTH,
5902 		&init_params);
5903 
5904 	if (aconnector->base.force == DRM_FORCE_ON) {
5905 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5906 		aconnector->dc_link->local_sink :
5907 		aconnector->dc_em_sink;
5908 		dc_sink_retain(aconnector->dc_sink);
5909 	}
5910 }
5911 
5912 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5913 {
5914 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5915 
5916 	/*
5917 	 * In case of headless boot with force on for DP managed connector
5918 	 * Those settings have to be != 0 to get initial modeset
5919 	 */
5920 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5921 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5922 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5923 	}
5924 
5925 
5926 	aconnector->base.override_edid = true;
5927 	create_eml_sink(aconnector);
5928 }
5929 
5930 static struct dc_stream_state *
5931 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5932 				const struct drm_display_mode *drm_mode,
5933 				const struct dm_connector_state *dm_state,
5934 				const struct dc_stream_state *old_stream)
5935 {
5936 	struct drm_connector *connector = &aconnector->base;
5937 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5938 	struct dc_stream_state *stream;
5939 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5940 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5941 	enum dc_status dc_result = DC_OK;
5942 
5943 	do {
5944 		stream = create_stream_for_sink(aconnector, drm_mode,
5945 						dm_state, old_stream,
5946 						requested_bpc);
5947 		if (stream == NULL) {
5948 			DRM_ERROR("Failed to create stream for sink!\n");
5949 			break;
5950 		}
5951 
5952 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5953 
5954 		if (dc_result != DC_OK) {
5955 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5956 				      drm_mode->hdisplay,
5957 				      drm_mode->vdisplay,
5958 				      drm_mode->clock,
5959 				      dc_result,
5960 				      dc_status_to_str(dc_result));
5961 
5962 			dc_stream_release(stream);
5963 			stream = NULL;
5964 			requested_bpc -= 2; /* lower bpc to retry validation */
5965 		}
5966 
5967 	} while (stream == NULL && requested_bpc >= 6);
5968 
5969 	return stream;
5970 }
5971 
5972 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5973 				   struct drm_display_mode *mode)
5974 {
5975 	int result = MODE_ERROR;
5976 	struct dc_sink *dc_sink;
5977 	/* TODO: Unhardcode stream count */
5978 	struct dc_stream_state *stream;
5979 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5980 
5981 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5982 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5983 		return result;
5984 
5985 	/*
5986 	 * Only run this the first time mode_valid is called to initilialize
5987 	 * EDID mgmt
5988 	 */
5989 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5990 		!aconnector->dc_em_sink)
5991 		handle_edid_mgmt(aconnector);
5992 
5993 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5994 
5995 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5996 				aconnector->base.force != DRM_FORCE_ON) {
5997 		DRM_ERROR("dc_sink is NULL!\n");
5998 		goto fail;
5999 	}
6000 
6001 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6002 	if (stream) {
6003 		dc_stream_release(stream);
6004 		result = MODE_OK;
6005 	}
6006 
6007 fail:
6008 	/* TODO: error handling*/
6009 	return result;
6010 }
6011 
6012 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6013 				struct dc_info_packet *out)
6014 {
6015 	struct hdmi_drm_infoframe frame;
6016 	unsigned char buf[30]; /* 26 + 4 */
6017 	ssize_t len;
6018 	int ret, i;
6019 
6020 	memset(out, 0, sizeof(*out));
6021 
6022 	if (!state->hdr_output_metadata)
6023 		return 0;
6024 
6025 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6026 	if (ret)
6027 		return ret;
6028 
6029 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6030 	if (len < 0)
6031 		return (int)len;
6032 
6033 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6034 	if (len != 30)
6035 		return -EINVAL;
6036 
6037 	/* Prepare the infopacket for DC. */
6038 	switch (state->connector->connector_type) {
6039 	case DRM_MODE_CONNECTOR_HDMIA:
6040 		out->hb0 = 0x87; /* type */
6041 		out->hb1 = 0x01; /* version */
6042 		out->hb2 = 0x1A; /* length */
6043 		out->sb[0] = buf[3]; /* checksum */
6044 		i = 1;
6045 		break;
6046 
6047 	case DRM_MODE_CONNECTOR_DisplayPort:
6048 	case DRM_MODE_CONNECTOR_eDP:
6049 		out->hb0 = 0x00; /* sdp id, zero */
6050 		out->hb1 = 0x87; /* type */
6051 		out->hb2 = 0x1D; /* payload len - 1 */
6052 		out->hb3 = (0x13 << 2); /* sdp version */
6053 		out->sb[0] = 0x01; /* version */
6054 		out->sb[1] = 0x1A; /* length */
6055 		i = 2;
6056 		break;
6057 
6058 	default:
6059 		return -EINVAL;
6060 	}
6061 
6062 	memcpy(&out->sb[i], &buf[4], 26);
6063 	out->valid = true;
6064 
6065 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6066 		       sizeof(out->sb), false);
6067 
6068 	return 0;
6069 }
6070 
6071 static bool
6072 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6073 			  const struct drm_connector_state *new_state)
6074 {
6075 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6076 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6077 
6078 	if (old_blob != new_blob) {
6079 		if (old_blob && new_blob &&
6080 		    old_blob->length == new_blob->length)
6081 			return memcmp(old_blob->data, new_blob->data,
6082 				      old_blob->length);
6083 
6084 		return true;
6085 	}
6086 
6087 	return false;
6088 }
6089 
6090 static int
6091 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6092 				 struct drm_atomic_state *state)
6093 {
6094 	struct drm_connector_state *new_con_state =
6095 		drm_atomic_get_new_connector_state(state, conn);
6096 	struct drm_connector_state *old_con_state =
6097 		drm_atomic_get_old_connector_state(state, conn);
6098 	struct drm_crtc *crtc = new_con_state->crtc;
6099 	struct drm_crtc_state *new_crtc_state;
6100 	int ret;
6101 
6102 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6103 
6104 	if (!crtc)
6105 		return 0;
6106 
6107 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6108 		struct dc_info_packet hdr_infopacket;
6109 
6110 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6111 		if (ret)
6112 			return ret;
6113 
6114 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6115 		if (IS_ERR(new_crtc_state))
6116 			return PTR_ERR(new_crtc_state);
6117 
6118 		/*
6119 		 * DC considers the stream backends changed if the
6120 		 * static metadata changes. Forcing the modeset also
6121 		 * gives a simple way for userspace to switch from
6122 		 * 8bpc to 10bpc when setting the metadata to enter
6123 		 * or exit HDR.
6124 		 *
6125 		 * Changing the static metadata after it's been
6126 		 * set is permissible, however. So only force a
6127 		 * modeset if we're entering or exiting HDR.
6128 		 */
6129 		new_crtc_state->mode_changed =
6130 			!old_con_state->hdr_output_metadata ||
6131 			!new_con_state->hdr_output_metadata;
6132 	}
6133 
6134 	return 0;
6135 }
6136 
6137 static const struct drm_connector_helper_funcs
6138 amdgpu_dm_connector_helper_funcs = {
6139 	/*
6140 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6141 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6142 	 * are missing after user start lightdm. So we need to renew modes list.
6143 	 * in get_modes call back, not just return the modes count
6144 	 */
6145 	.get_modes = get_modes,
6146 	.mode_valid = amdgpu_dm_connector_mode_valid,
6147 	.atomic_check = amdgpu_dm_connector_atomic_check,
6148 };
6149 
6150 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6151 {
6152 }
6153 
6154 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6155 {
6156 	struct drm_atomic_state *state = new_crtc_state->state;
6157 	struct drm_plane *plane;
6158 	int num_active = 0;
6159 
6160 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6161 		struct drm_plane_state *new_plane_state;
6162 
6163 		/* Cursor planes are "fake". */
6164 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6165 			continue;
6166 
6167 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6168 
6169 		if (!new_plane_state) {
6170 			/*
6171 			 * The plane is enable on the CRTC and hasn't changed
6172 			 * state. This means that it previously passed
6173 			 * validation and is therefore enabled.
6174 			 */
6175 			num_active += 1;
6176 			continue;
6177 		}
6178 
6179 		/* We need a framebuffer to be considered enabled. */
6180 		num_active += (new_plane_state->fb != NULL);
6181 	}
6182 
6183 	return num_active;
6184 }
6185 
6186 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6187 					 struct drm_crtc_state *new_crtc_state)
6188 {
6189 	struct dm_crtc_state *dm_new_crtc_state =
6190 		to_dm_crtc_state(new_crtc_state);
6191 
6192 	dm_new_crtc_state->active_planes = 0;
6193 
6194 	if (!dm_new_crtc_state->stream)
6195 		return;
6196 
6197 	dm_new_crtc_state->active_planes =
6198 		count_crtc_active_planes(new_crtc_state);
6199 }
6200 
6201 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6202 				       struct drm_atomic_state *state)
6203 {
6204 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6205 									  crtc);
6206 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6207 	struct dc *dc = adev->dm.dc;
6208 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6209 	int ret = -EINVAL;
6210 
6211 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6212 
6213 	dm_update_crtc_active_planes(crtc, crtc_state);
6214 
6215 	if (unlikely(!dm_crtc_state->stream &&
6216 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6217 		WARN_ON(1);
6218 		return ret;
6219 	}
6220 
6221 	/*
6222 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6223 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6224 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6225 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6226 	 */
6227 	if (crtc_state->enable &&
6228 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6229 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6230 		return -EINVAL;
6231 	}
6232 
6233 	/* In some use cases, like reset, no stream is attached */
6234 	if (!dm_crtc_state->stream)
6235 		return 0;
6236 
6237 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6238 		return 0;
6239 
6240 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6241 	return ret;
6242 }
6243 
6244 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6245 				      const struct drm_display_mode *mode,
6246 				      struct drm_display_mode *adjusted_mode)
6247 {
6248 	return true;
6249 }
6250 
6251 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6252 	.disable = dm_crtc_helper_disable,
6253 	.atomic_check = dm_crtc_helper_atomic_check,
6254 	.mode_fixup = dm_crtc_helper_mode_fixup,
6255 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6256 };
6257 
6258 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6259 {
6260 
6261 }
6262 
6263 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6264 {
6265 	switch (display_color_depth) {
6266 		case COLOR_DEPTH_666:
6267 			return 6;
6268 		case COLOR_DEPTH_888:
6269 			return 8;
6270 		case COLOR_DEPTH_101010:
6271 			return 10;
6272 		case COLOR_DEPTH_121212:
6273 			return 12;
6274 		case COLOR_DEPTH_141414:
6275 			return 14;
6276 		case COLOR_DEPTH_161616:
6277 			return 16;
6278 		default:
6279 			break;
6280 		}
6281 	return 0;
6282 }
6283 
6284 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6285 					  struct drm_crtc_state *crtc_state,
6286 					  struct drm_connector_state *conn_state)
6287 {
6288 	struct drm_atomic_state *state = crtc_state->state;
6289 	struct drm_connector *connector = conn_state->connector;
6290 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6291 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6292 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6293 	struct drm_dp_mst_topology_mgr *mst_mgr;
6294 	struct drm_dp_mst_port *mst_port;
6295 	enum dc_color_depth color_depth;
6296 	int clock, bpp = 0;
6297 	bool is_y420 = false;
6298 
6299 	if (!aconnector->port || !aconnector->dc_sink)
6300 		return 0;
6301 
6302 	mst_port = aconnector->port;
6303 	mst_mgr = &aconnector->mst_port->mst_mgr;
6304 
6305 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6306 		return 0;
6307 
6308 	if (!state->duplicated) {
6309 		int max_bpc = conn_state->max_requested_bpc;
6310 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6311 				aconnector->force_yuv420_output;
6312 		color_depth = convert_color_depth_from_display_info(connector,
6313 								    is_y420,
6314 								    max_bpc);
6315 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6316 		clock = adjusted_mode->clock;
6317 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6318 	}
6319 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6320 									   mst_mgr,
6321 									   mst_port,
6322 									   dm_new_connector_state->pbn,
6323 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6324 	if (dm_new_connector_state->vcpi_slots < 0) {
6325 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6326 		return dm_new_connector_state->vcpi_slots;
6327 	}
6328 	return 0;
6329 }
6330 
6331 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6332 	.disable = dm_encoder_helper_disable,
6333 	.atomic_check = dm_encoder_helper_atomic_check
6334 };
6335 
6336 #if defined(CONFIG_DRM_AMD_DC_DCN)
6337 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6338 					    struct dc_state *dc_state)
6339 {
6340 	struct dc_stream_state *stream = NULL;
6341 	struct drm_connector *connector;
6342 	struct drm_connector_state *new_con_state, *old_con_state;
6343 	struct amdgpu_dm_connector *aconnector;
6344 	struct dm_connector_state *dm_conn_state;
6345 	int i, j, clock, bpp;
6346 	int vcpi, pbn_div, pbn = 0;
6347 
6348 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6349 
6350 		aconnector = to_amdgpu_dm_connector(connector);
6351 
6352 		if (!aconnector->port)
6353 			continue;
6354 
6355 		if (!new_con_state || !new_con_state->crtc)
6356 			continue;
6357 
6358 		dm_conn_state = to_dm_connector_state(new_con_state);
6359 
6360 		for (j = 0; j < dc_state->stream_count; j++) {
6361 			stream = dc_state->streams[j];
6362 			if (!stream)
6363 				continue;
6364 
6365 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6366 				break;
6367 
6368 			stream = NULL;
6369 		}
6370 
6371 		if (!stream)
6372 			continue;
6373 
6374 		if (stream->timing.flags.DSC != 1) {
6375 			drm_dp_mst_atomic_enable_dsc(state,
6376 						     aconnector->port,
6377 						     dm_conn_state->pbn,
6378 						     0,
6379 						     false);
6380 			continue;
6381 		}
6382 
6383 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6384 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6385 		clock = stream->timing.pix_clk_100hz / 10;
6386 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6387 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6388 						    aconnector->port,
6389 						    pbn, pbn_div,
6390 						    true);
6391 		if (vcpi < 0)
6392 			return vcpi;
6393 
6394 		dm_conn_state->pbn = pbn;
6395 		dm_conn_state->vcpi_slots = vcpi;
6396 	}
6397 	return 0;
6398 }
6399 #endif
6400 
6401 static void dm_drm_plane_reset(struct drm_plane *plane)
6402 {
6403 	struct dm_plane_state *amdgpu_state = NULL;
6404 
6405 	if (plane->state)
6406 		plane->funcs->atomic_destroy_state(plane, plane->state);
6407 
6408 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6409 	WARN_ON(amdgpu_state == NULL);
6410 
6411 	if (amdgpu_state)
6412 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6413 }
6414 
6415 static struct drm_plane_state *
6416 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6417 {
6418 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6419 
6420 	old_dm_plane_state = to_dm_plane_state(plane->state);
6421 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6422 	if (!dm_plane_state)
6423 		return NULL;
6424 
6425 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6426 
6427 	if (old_dm_plane_state->dc_state) {
6428 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6429 		dc_plane_state_retain(dm_plane_state->dc_state);
6430 	}
6431 
6432 	return &dm_plane_state->base;
6433 }
6434 
6435 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6436 				struct drm_plane_state *state)
6437 {
6438 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6439 
6440 	if (dm_plane_state->dc_state)
6441 		dc_plane_state_release(dm_plane_state->dc_state);
6442 
6443 	drm_atomic_helper_plane_destroy_state(plane, state);
6444 }
6445 
6446 static const struct drm_plane_funcs dm_plane_funcs = {
6447 	.update_plane	= drm_atomic_helper_update_plane,
6448 	.disable_plane	= drm_atomic_helper_disable_plane,
6449 	.destroy	= drm_primary_helper_destroy,
6450 	.reset = dm_drm_plane_reset,
6451 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6452 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6453 	.format_mod_supported = dm_plane_format_mod_supported,
6454 };
6455 
6456 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6457 				      struct drm_plane_state *new_state)
6458 {
6459 	struct amdgpu_framebuffer *afb;
6460 	struct drm_gem_object *obj;
6461 	struct amdgpu_device *adev;
6462 	struct amdgpu_bo *rbo;
6463 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6464 	struct list_head list;
6465 	struct ttm_validate_buffer tv;
6466 	struct ww_acquire_ctx ticket;
6467 	uint32_t domain;
6468 	int r;
6469 
6470 	if (!new_state->fb) {
6471 		DRM_DEBUG_DRIVER("No FB bound\n");
6472 		return 0;
6473 	}
6474 
6475 	afb = to_amdgpu_framebuffer(new_state->fb);
6476 	obj = new_state->fb->obj[0];
6477 	rbo = gem_to_amdgpu_bo(obj);
6478 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6479 	INIT_LIST_HEAD(&list);
6480 
6481 	tv.bo = &rbo->tbo;
6482 	tv.num_shared = 1;
6483 	list_add(&tv.head, &list);
6484 
6485 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6486 	if (r) {
6487 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6488 		return r;
6489 	}
6490 
6491 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6492 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6493 	else
6494 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6495 
6496 	r = amdgpu_bo_pin(rbo, domain);
6497 	if (unlikely(r != 0)) {
6498 		if (r != -ERESTARTSYS)
6499 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6500 		ttm_eu_backoff_reservation(&ticket, &list);
6501 		return r;
6502 	}
6503 
6504 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6505 	if (unlikely(r != 0)) {
6506 		amdgpu_bo_unpin(rbo);
6507 		ttm_eu_backoff_reservation(&ticket, &list);
6508 		DRM_ERROR("%p bind failed\n", rbo);
6509 		return r;
6510 	}
6511 
6512 	ttm_eu_backoff_reservation(&ticket, &list);
6513 
6514 	afb->address = amdgpu_bo_gpu_offset(rbo);
6515 
6516 	amdgpu_bo_ref(rbo);
6517 
6518 	/**
6519 	 * We don't do surface updates on planes that have been newly created,
6520 	 * but we also don't have the afb->address during atomic check.
6521 	 *
6522 	 * Fill in buffer attributes depending on the address here, but only on
6523 	 * newly created planes since they're not being used by DC yet and this
6524 	 * won't modify global state.
6525 	 */
6526 	dm_plane_state_old = to_dm_plane_state(plane->state);
6527 	dm_plane_state_new = to_dm_plane_state(new_state);
6528 
6529 	if (dm_plane_state_new->dc_state &&
6530 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6531 		struct dc_plane_state *plane_state =
6532 			dm_plane_state_new->dc_state;
6533 		bool force_disable_dcc = !plane_state->dcc.enable;
6534 
6535 		fill_plane_buffer_attributes(
6536 			adev, afb, plane_state->format, plane_state->rotation,
6537 			afb->tiling_flags,
6538 			&plane_state->tiling_info, &plane_state->plane_size,
6539 			&plane_state->dcc, &plane_state->address,
6540 			afb->tmz_surface, force_disable_dcc);
6541 	}
6542 
6543 	return 0;
6544 }
6545 
6546 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6547 				       struct drm_plane_state *old_state)
6548 {
6549 	struct amdgpu_bo *rbo;
6550 	int r;
6551 
6552 	if (!old_state->fb)
6553 		return;
6554 
6555 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6556 	r = amdgpu_bo_reserve(rbo, false);
6557 	if (unlikely(r)) {
6558 		DRM_ERROR("failed to reserve rbo before unpin\n");
6559 		return;
6560 	}
6561 
6562 	amdgpu_bo_unpin(rbo);
6563 	amdgpu_bo_unreserve(rbo);
6564 	amdgpu_bo_unref(&rbo);
6565 }
6566 
6567 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6568 				       struct drm_crtc_state *new_crtc_state)
6569 {
6570 	struct drm_framebuffer *fb = state->fb;
6571 	int min_downscale, max_upscale;
6572 	int min_scale = 0;
6573 	int max_scale = INT_MAX;
6574 
6575 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6576 	if (fb && state->crtc) {
6577 		/* Validate viewport to cover the case when only the position changes */
6578 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6579 			int viewport_width = state->crtc_w;
6580 			int viewport_height = state->crtc_h;
6581 
6582 			if (state->crtc_x < 0)
6583 				viewport_width += state->crtc_x;
6584 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6585 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6586 
6587 			if (state->crtc_y < 0)
6588 				viewport_height += state->crtc_y;
6589 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6590 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6591 
6592 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6593 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6594 			 * (when plane is completely outside of screen).
6595 			 * x2 for width is because of pipe-split.
6596 			 */
6597 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6598 				return -EINVAL;
6599 		}
6600 
6601 		/* Get min/max allowed scaling factors from plane caps. */
6602 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6603 					     &min_downscale, &max_upscale);
6604 		/*
6605 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6606 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6607 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6608 		 */
6609 		min_scale = (1000 << 16) / max_upscale;
6610 		max_scale = (1000 << 16) / min_downscale;
6611 	}
6612 
6613 	return drm_atomic_helper_check_plane_state(
6614 		state, new_crtc_state, min_scale, max_scale, true, true);
6615 }
6616 
6617 static int dm_plane_atomic_check(struct drm_plane *plane,
6618 				 struct drm_plane_state *state)
6619 {
6620 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6621 	struct dc *dc = adev->dm.dc;
6622 	struct dm_plane_state *dm_plane_state;
6623 	struct dc_scaling_info scaling_info;
6624 	struct drm_crtc_state *new_crtc_state;
6625 	int ret;
6626 
6627 	trace_amdgpu_dm_plane_atomic_check(state);
6628 
6629 	dm_plane_state = to_dm_plane_state(state);
6630 
6631 	if (!dm_plane_state->dc_state)
6632 		return 0;
6633 
6634 	new_crtc_state =
6635 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6636 	if (!new_crtc_state)
6637 		return -EINVAL;
6638 
6639 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6640 	if (ret)
6641 		return ret;
6642 
6643 	ret = fill_dc_scaling_info(state, &scaling_info);
6644 	if (ret)
6645 		return ret;
6646 
6647 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6648 		return 0;
6649 
6650 	return -EINVAL;
6651 }
6652 
6653 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6654 				       struct drm_plane_state *new_plane_state)
6655 {
6656 	/* Only support async updates on cursor planes. */
6657 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6658 		return -EINVAL;
6659 
6660 	return 0;
6661 }
6662 
6663 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6664 					 struct drm_plane_state *new_state)
6665 {
6666 	struct drm_plane_state *old_state =
6667 		drm_atomic_get_old_plane_state(new_state->state, plane);
6668 
6669 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6670 
6671 	swap(plane->state->fb, new_state->fb);
6672 
6673 	plane->state->src_x = new_state->src_x;
6674 	plane->state->src_y = new_state->src_y;
6675 	plane->state->src_w = new_state->src_w;
6676 	plane->state->src_h = new_state->src_h;
6677 	plane->state->crtc_x = new_state->crtc_x;
6678 	plane->state->crtc_y = new_state->crtc_y;
6679 	plane->state->crtc_w = new_state->crtc_w;
6680 	plane->state->crtc_h = new_state->crtc_h;
6681 
6682 	handle_cursor_update(plane, old_state);
6683 }
6684 
6685 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6686 	.prepare_fb = dm_plane_helper_prepare_fb,
6687 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6688 	.atomic_check = dm_plane_atomic_check,
6689 	.atomic_async_check = dm_plane_atomic_async_check,
6690 	.atomic_async_update = dm_plane_atomic_async_update
6691 };
6692 
6693 /*
6694  * TODO: these are currently initialized to rgb formats only.
6695  * For future use cases we should either initialize them dynamically based on
6696  * plane capabilities, or initialize this array to all formats, so internal drm
6697  * check will succeed, and let DC implement proper check
6698  */
6699 static const uint32_t rgb_formats[] = {
6700 	DRM_FORMAT_XRGB8888,
6701 	DRM_FORMAT_ARGB8888,
6702 	DRM_FORMAT_RGBA8888,
6703 	DRM_FORMAT_XRGB2101010,
6704 	DRM_FORMAT_XBGR2101010,
6705 	DRM_FORMAT_ARGB2101010,
6706 	DRM_FORMAT_ABGR2101010,
6707 	DRM_FORMAT_XBGR8888,
6708 	DRM_FORMAT_ABGR8888,
6709 	DRM_FORMAT_RGB565,
6710 };
6711 
6712 static const uint32_t overlay_formats[] = {
6713 	DRM_FORMAT_XRGB8888,
6714 	DRM_FORMAT_ARGB8888,
6715 	DRM_FORMAT_RGBA8888,
6716 	DRM_FORMAT_XBGR8888,
6717 	DRM_FORMAT_ABGR8888,
6718 	DRM_FORMAT_RGB565
6719 };
6720 
6721 static const u32 cursor_formats[] = {
6722 	DRM_FORMAT_ARGB8888
6723 };
6724 
6725 static int get_plane_formats(const struct drm_plane *plane,
6726 			     const struct dc_plane_cap *plane_cap,
6727 			     uint32_t *formats, int max_formats)
6728 {
6729 	int i, num_formats = 0;
6730 
6731 	/*
6732 	 * TODO: Query support for each group of formats directly from
6733 	 * DC plane caps. This will require adding more formats to the
6734 	 * caps list.
6735 	 */
6736 
6737 	switch (plane->type) {
6738 	case DRM_PLANE_TYPE_PRIMARY:
6739 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6740 			if (num_formats >= max_formats)
6741 				break;
6742 
6743 			formats[num_formats++] = rgb_formats[i];
6744 		}
6745 
6746 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6747 			formats[num_formats++] = DRM_FORMAT_NV12;
6748 		if (plane_cap && plane_cap->pixel_format_support.p010)
6749 			formats[num_formats++] = DRM_FORMAT_P010;
6750 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6751 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6752 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6753 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6754 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6755 		}
6756 		break;
6757 
6758 	case DRM_PLANE_TYPE_OVERLAY:
6759 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6760 			if (num_formats >= max_formats)
6761 				break;
6762 
6763 			formats[num_formats++] = overlay_formats[i];
6764 		}
6765 		break;
6766 
6767 	case DRM_PLANE_TYPE_CURSOR:
6768 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6769 			if (num_formats >= max_formats)
6770 				break;
6771 
6772 			formats[num_formats++] = cursor_formats[i];
6773 		}
6774 		break;
6775 	}
6776 
6777 	return num_formats;
6778 }
6779 
6780 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6781 				struct drm_plane *plane,
6782 				unsigned long possible_crtcs,
6783 				const struct dc_plane_cap *plane_cap)
6784 {
6785 	uint32_t formats[32];
6786 	int num_formats;
6787 	int res = -EPERM;
6788 	unsigned int supported_rotations;
6789 	uint64_t *modifiers = NULL;
6790 
6791 	num_formats = get_plane_formats(plane, plane_cap, formats,
6792 					ARRAY_SIZE(formats));
6793 
6794 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6795 	if (res)
6796 		return res;
6797 
6798 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6799 				       &dm_plane_funcs, formats, num_formats,
6800 				       modifiers, plane->type, NULL);
6801 	kfree(modifiers);
6802 	if (res)
6803 		return res;
6804 
6805 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6806 	    plane_cap && plane_cap->per_pixel_alpha) {
6807 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6808 					  BIT(DRM_MODE_BLEND_PREMULTI);
6809 
6810 		drm_plane_create_alpha_property(plane);
6811 		drm_plane_create_blend_mode_property(plane, blend_caps);
6812 	}
6813 
6814 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6815 	    plane_cap &&
6816 	    (plane_cap->pixel_format_support.nv12 ||
6817 	     plane_cap->pixel_format_support.p010)) {
6818 		/* This only affects YUV formats. */
6819 		drm_plane_create_color_properties(
6820 			plane,
6821 			BIT(DRM_COLOR_YCBCR_BT601) |
6822 			BIT(DRM_COLOR_YCBCR_BT709) |
6823 			BIT(DRM_COLOR_YCBCR_BT2020),
6824 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6825 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6826 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6827 	}
6828 
6829 	supported_rotations =
6830 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6831 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6832 
6833 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6834 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6835 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6836 						   supported_rotations);
6837 
6838 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6839 
6840 	/* Create (reset) the plane state */
6841 	if (plane->funcs->reset)
6842 		plane->funcs->reset(plane);
6843 
6844 	return 0;
6845 }
6846 
6847 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6848 			       struct drm_plane *plane,
6849 			       uint32_t crtc_index)
6850 {
6851 	struct amdgpu_crtc *acrtc = NULL;
6852 	struct drm_plane *cursor_plane;
6853 
6854 	int res = -ENOMEM;
6855 
6856 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6857 	if (!cursor_plane)
6858 		goto fail;
6859 
6860 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6861 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6862 
6863 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6864 	if (!acrtc)
6865 		goto fail;
6866 
6867 	res = drm_crtc_init_with_planes(
6868 			dm->ddev,
6869 			&acrtc->base,
6870 			plane,
6871 			cursor_plane,
6872 			&amdgpu_dm_crtc_funcs, NULL);
6873 
6874 	if (res)
6875 		goto fail;
6876 
6877 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6878 
6879 	/* Create (reset) the plane state */
6880 	if (acrtc->base.funcs->reset)
6881 		acrtc->base.funcs->reset(&acrtc->base);
6882 
6883 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6884 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6885 
6886 	acrtc->crtc_id = crtc_index;
6887 	acrtc->base.enabled = false;
6888 	acrtc->otg_inst = -1;
6889 
6890 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6891 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6892 				   true, MAX_COLOR_LUT_ENTRIES);
6893 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6894 
6895 	return 0;
6896 
6897 fail:
6898 	kfree(acrtc);
6899 	kfree(cursor_plane);
6900 	return res;
6901 }
6902 
6903 
6904 static int to_drm_connector_type(enum signal_type st)
6905 {
6906 	switch (st) {
6907 	case SIGNAL_TYPE_HDMI_TYPE_A:
6908 		return DRM_MODE_CONNECTOR_HDMIA;
6909 	case SIGNAL_TYPE_EDP:
6910 		return DRM_MODE_CONNECTOR_eDP;
6911 	case SIGNAL_TYPE_LVDS:
6912 		return DRM_MODE_CONNECTOR_LVDS;
6913 	case SIGNAL_TYPE_RGB:
6914 		return DRM_MODE_CONNECTOR_VGA;
6915 	case SIGNAL_TYPE_DISPLAY_PORT:
6916 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6917 		return DRM_MODE_CONNECTOR_DisplayPort;
6918 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6919 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6920 		return DRM_MODE_CONNECTOR_DVID;
6921 	case SIGNAL_TYPE_VIRTUAL:
6922 		return DRM_MODE_CONNECTOR_VIRTUAL;
6923 
6924 	default:
6925 		return DRM_MODE_CONNECTOR_Unknown;
6926 	}
6927 }
6928 
6929 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6930 {
6931 	struct drm_encoder *encoder;
6932 
6933 	/* There is only one encoder per connector */
6934 	drm_connector_for_each_possible_encoder(connector, encoder)
6935 		return encoder;
6936 
6937 	return NULL;
6938 }
6939 
6940 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6941 {
6942 	struct drm_encoder *encoder;
6943 	struct amdgpu_encoder *amdgpu_encoder;
6944 
6945 	encoder = amdgpu_dm_connector_to_encoder(connector);
6946 
6947 	if (encoder == NULL)
6948 		return;
6949 
6950 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6951 
6952 	amdgpu_encoder->native_mode.clock = 0;
6953 
6954 	if (!list_empty(&connector->probed_modes)) {
6955 		struct drm_display_mode *preferred_mode = NULL;
6956 
6957 		list_for_each_entry(preferred_mode,
6958 				    &connector->probed_modes,
6959 				    head) {
6960 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6961 				amdgpu_encoder->native_mode = *preferred_mode;
6962 
6963 			break;
6964 		}
6965 
6966 	}
6967 }
6968 
6969 static struct drm_display_mode *
6970 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6971 			     char *name,
6972 			     int hdisplay, int vdisplay)
6973 {
6974 	struct drm_device *dev = encoder->dev;
6975 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6976 	struct drm_display_mode *mode = NULL;
6977 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6978 
6979 	mode = drm_mode_duplicate(dev, native_mode);
6980 
6981 	if (mode == NULL)
6982 		return NULL;
6983 
6984 	mode->hdisplay = hdisplay;
6985 	mode->vdisplay = vdisplay;
6986 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6987 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6988 
6989 	return mode;
6990 
6991 }
6992 
6993 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6994 						 struct drm_connector *connector)
6995 {
6996 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6997 	struct drm_display_mode *mode = NULL;
6998 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6999 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7000 				to_amdgpu_dm_connector(connector);
7001 	int i;
7002 	int n;
7003 	struct mode_size {
7004 		char name[DRM_DISPLAY_MODE_LEN];
7005 		int w;
7006 		int h;
7007 	} common_modes[] = {
7008 		{  "640x480",  640,  480},
7009 		{  "800x600",  800,  600},
7010 		{ "1024x768", 1024,  768},
7011 		{ "1280x720", 1280,  720},
7012 		{ "1280x800", 1280,  800},
7013 		{"1280x1024", 1280, 1024},
7014 		{ "1440x900", 1440,  900},
7015 		{"1680x1050", 1680, 1050},
7016 		{"1600x1200", 1600, 1200},
7017 		{"1920x1080", 1920, 1080},
7018 		{"1920x1200", 1920, 1200}
7019 	};
7020 
7021 	n = ARRAY_SIZE(common_modes);
7022 
7023 	for (i = 0; i < n; i++) {
7024 		struct drm_display_mode *curmode = NULL;
7025 		bool mode_existed = false;
7026 
7027 		if (common_modes[i].w > native_mode->hdisplay ||
7028 		    common_modes[i].h > native_mode->vdisplay ||
7029 		   (common_modes[i].w == native_mode->hdisplay &&
7030 		    common_modes[i].h == native_mode->vdisplay))
7031 			continue;
7032 
7033 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7034 			if (common_modes[i].w == curmode->hdisplay &&
7035 			    common_modes[i].h == curmode->vdisplay) {
7036 				mode_existed = true;
7037 				break;
7038 			}
7039 		}
7040 
7041 		if (mode_existed)
7042 			continue;
7043 
7044 		mode = amdgpu_dm_create_common_mode(encoder,
7045 				common_modes[i].name, common_modes[i].w,
7046 				common_modes[i].h);
7047 		drm_mode_probed_add(connector, mode);
7048 		amdgpu_dm_connector->num_modes++;
7049 	}
7050 }
7051 
7052 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7053 					      struct edid *edid)
7054 {
7055 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7056 			to_amdgpu_dm_connector(connector);
7057 
7058 	if (edid) {
7059 		/* empty probed_modes */
7060 		INIT_LIST_HEAD(&connector->probed_modes);
7061 		amdgpu_dm_connector->num_modes =
7062 				drm_add_edid_modes(connector, edid);
7063 
7064 		/* sorting the probed modes before calling function
7065 		 * amdgpu_dm_get_native_mode() since EDID can have
7066 		 * more than one preferred mode. The modes that are
7067 		 * later in the probed mode list could be of higher
7068 		 * and preferred resolution. For example, 3840x2160
7069 		 * resolution in base EDID preferred timing and 4096x2160
7070 		 * preferred resolution in DID extension block later.
7071 		 */
7072 		drm_mode_sort(&connector->probed_modes);
7073 		amdgpu_dm_get_native_mode(connector);
7074 
7075 		/* Freesync capabilities are reset by calling
7076 		 * drm_add_edid_modes() and need to be
7077 		 * restored here.
7078 		 */
7079 		amdgpu_dm_update_freesync_caps(connector, edid);
7080 	} else {
7081 		amdgpu_dm_connector->num_modes = 0;
7082 	}
7083 }
7084 
7085 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7086 			      struct drm_display_mode *mode)
7087 {
7088 	struct drm_display_mode *m;
7089 
7090 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7091 		if (drm_mode_equal(m, mode))
7092 			return true;
7093 	}
7094 
7095 	return false;
7096 }
7097 
7098 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7099 {
7100 	const struct drm_display_mode *m;
7101 	struct drm_display_mode *new_mode;
7102 	uint i;
7103 	uint32_t new_modes_count = 0;
7104 
7105 	/* Standard FPS values
7106 	 *
7107 	 * 23.976   - TV/NTSC
7108 	 * 24 	    - Cinema
7109 	 * 25 	    - TV/PAL
7110 	 * 29.97    - TV/NTSC
7111 	 * 30 	    - TV/NTSC
7112 	 * 48 	    - Cinema HFR
7113 	 * 50 	    - TV/PAL
7114 	 * 60 	    - Commonly used
7115 	 * 48,72,96 - Multiples of 24
7116 	 */
7117 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7118 					 48000, 50000, 60000, 72000, 96000 };
7119 
7120 	/*
7121 	 * Find mode with highest refresh rate with the same resolution
7122 	 * as the preferred mode. Some monitors report a preferred mode
7123 	 * with lower resolution than the highest refresh rate supported.
7124 	 */
7125 
7126 	m = get_highest_refresh_rate_mode(aconnector, true);
7127 	if (!m)
7128 		return 0;
7129 
7130 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7131 		uint64_t target_vtotal, target_vtotal_diff;
7132 		uint64_t num, den;
7133 
7134 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7135 			continue;
7136 
7137 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7138 		    common_rates[i] > aconnector->max_vfreq * 1000)
7139 			continue;
7140 
7141 		num = (unsigned long long)m->clock * 1000 * 1000;
7142 		den = common_rates[i] * (unsigned long long)m->htotal;
7143 		target_vtotal = div_u64(num, den);
7144 		target_vtotal_diff = target_vtotal - m->vtotal;
7145 
7146 		/* Check for illegal modes */
7147 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7148 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7149 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7150 			continue;
7151 
7152 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7153 		if (!new_mode)
7154 			goto out;
7155 
7156 		new_mode->vtotal += (u16)target_vtotal_diff;
7157 		new_mode->vsync_start += (u16)target_vtotal_diff;
7158 		new_mode->vsync_end += (u16)target_vtotal_diff;
7159 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7160 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7161 
7162 		if (!is_duplicate_mode(aconnector, new_mode)) {
7163 			drm_mode_probed_add(&aconnector->base, new_mode);
7164 			new_modes_count += 1;
7165 		} else
7166 			drm_mode_destroy(aconnector->base.dev, new_mode);
7167 	}
7168  out:
7169 	return new_modes_count;
7170 }
7171 
7172 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7173 						   struct edid *edid)
7174 {
7175 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7176 		to_amdgpu_dm_connector(connector);
7177 
7178 	if (!(amdgpu_freesync_vid_mode && edid))
7179 		return;
7180 
7181 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7182 		amdgpu_dm_connector->num_modes +=
7183 			add_fs_modes(amdgpu_dm_connector);
7184 }
7185 
7186 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7187 {
7188 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7189 			to_amdgpu_dm_connector(connector);
7190 	struct drm_encoder *encoder;
7191 	struct edid *edid = amdgpu_dm_connector->edid;
7192 
7193 	encoder = amdgpu_dm_connector_to_encoder(connector);
7194 
7195 	if (!drm_edid_is_valid(edid)) {
7196 		amdgpu_dm_connector->num_modes =
7197 				drm_add_modes_noedid(connector, 640, 480);
7198 	} else {
7199 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7200 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7201 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7202 	}
7203 	amdgpu_dm_fbc_init(connector);
7204 
7205 	return amdgpu_dm_connector->num_modes;
7206 }
7207 
7208 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7209 				     struct amdgpu_dm_connector *aconnector,
7210 				     int connector_type,
7211 				     struct dc_link *link,
7212 				     int link_index)
7213 {
7214 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7215 
7216 	/*
7217 	 * Some of the properties below require access to state, like bpc.
7218 	 * Allocate some default initial connector state with our reset helper.
7219 	 */
7220 	if (aconnector->base.funcs->reset)
7221 		aconnector->base.funcs->reset(&aconnector->base);
7222 
7223 	aconnector->connector_id = link_index;
7224 	aconnector->dc_link = link;
7225 	aconnector->base.interlace_allowed = false;
7226 	aconnector->base.doublescan_allowed = false;
7227 	aconnector->base.stereo_allowed = false;
7228 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7229 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7230 	aconnector->audio_inst = -1;
7231 	mutex_init(&aconnector->hpd_lock);
7232 
7233 	/*
7234 	 * configure support HPD hot plug connector_>polled default value is 0
7235 	 * which means HPD hot plug not supported
7236 	 */
7237 	switch (connector_type) {
7238 	case DRM_MODE_CONNECTOR_HDMIA:
7239 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7240 		aconnector->base.ycbcr_420_allowed =
7241 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7242 		break;
7243 	case DRM_MODE_CONNECTOR_DisplayPort:
7244 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7245 		aconnector->base.ycbcr_420_allowed =
7246 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7247 		break;
7248 	case DRM_MODE_CONNECTOR_DVID:
7249 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7250 		break;
7251 	default:
7252 		break;
7253 	}
7254 
7255 	drm_object_attach_property(&aconnector->base.base,
7256 				dm->ddev->mode_config.scaling_mode_property,
7257 				DRM_MODE_SCALE_NONE);
7258 
7259 	drm_object_attach_property(&aconnector->base.base,
7260 				adev->mode_info.underscan_property,
7261 				UNDERSCAN_OFF);
7262 	drm_object_attach_property(&aconnector->base.base,
7263 				adev->mode_info.underscan_hborder_property,
7264 				0);
7265 	drm_object_attach_property(&aconnector->base.base,
7266 				adev->mode_info.underscan_vborder_property,
7267 				0);
7268 
7269 	if (!aconnector->mst_port)
7270 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7271 
7272 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7273 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7274 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7275 
7276 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7277 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7278 		drm_object_attach_property(&aconnector->base.base,
7279 				adev->mode_info.abm_level_property, 0);
7280 	}
7281 
7282 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7283 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7284 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7285 		drm_object_attach_property(
7286 			&aconnector->base.base,
7287 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7288 
7289 		if (!aconnector->mst_port)
7290 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7291 
7292 #ifdef CONFIG_DRM_AMD_DC_HDCP
7293 		if (adev->dm.hdcp_workqueue)
7294 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7295 #endif
7296 	}
7297 }
7298 
7299 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7300 			      struct i2c_msg *msgs, int num)
7301 {
7302 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7303 	struct ddc_service *ddc_service = i2c->ddc_service;
7304 	struct i2c_command cmd;
7305 	int i;
7306 	int result = -EIO;
7307 
7308 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7309 
7310 	if (!cmd.payloads)
7311 		return result;
7312 
7313 	cmd.number_of_payloads = num;
7314 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7315 	cmd.speed = 100;
7316 
7317 	for (i = 0; i < num; i++) {
7318 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7319 		cmd.payloads[i].address = msgs[i].addr;
7320 		cmd.payloads[i].length = msgs[i].len;
7321 		cmd.payloads[i].data = msgs[i].buf;
7322 	}
7323 
7324 	if (dc_submit_i2c(
7325 			ddc_service->ctx->dc,
7326 			ddc_service->ddc_pin->hw_info.ddc_channel,
7327 			&cmd))
7328 		result = num;
7329 
7330 	kfree(cmd.payloads);
7331 	return result;
7332 }
7333 
7334 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7335 {
7336 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7337 }
7338 
7339 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7340 	.master_xfer = amdgpu_dm_i2c_xfer,
7341 	.functionality = amdgpu_dm_i2c_func,
7342 };
7343 
7344 static struct amdgpu_i2c_adapter *
7345 create_i2c(struct ddc_service *ddc_service,
7346 	   int link_index,
7347 	   int *res)
7348 {
7349 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7350 	struct amdgpu_i2c_adapter *i2c;
7351 
7352 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7353 	if (!i2c)
7354 		return NULL;
7355 	i2c->base.owner = THIS_MODULE;
7356 	i2c->base.class = I2C_CLASS_DDC;
7357 	i2c->base.dev.parent = &adev->pdev->dev;
7358 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7359 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7360 	i2c_set_adapdata(&i2c->base, i2c);
7361 	i2c->ddc_service = ddc_service;
7362 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7363 
7364 	return i2c;
7365 }
7366 
7367 
7368 /*
7369  * Note: this function assumes that dc_link_detect() was called for the
7370  * dc_link which will be represented by this aconnector.
7371  */
7372 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7373 				    struct amdgpu_dm_connector *aconnector,
7374 				    uint32_t link_index,
7375 				    struct amdgpu_encoder *aencoder)
7376 {
7377 	int res = 0;
7378 	int connector_type;
7379 	struct dc *dc = dm->dc;
7380 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7381 	struct amdgpu_i2c_adapter *i2c;
7382 
7383 	link->priv = aconnector;
7384 
7385 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7386 
7387 	i2c = create_i2c(link->ddc, link->link_index, &res);
7388 	if (!i2c) {
7389 		DRM_ERROR("Failed to create i2c adapter data\n");
7390 		return -ENOMEM;
7391 	}
7392 
7393 	aconnector->i2c = i2c;
7394 	res = i2c_add_adapter(&i2c->base);
7395 
7396 	if (res) {
7397 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7398 		goto out_free;
7399 	}
7400 
7401 	connector_type = to_drm_connector_type(link->connector_signal);
7402 
7403 	res = drm_connector_init_with_ddc(
7404 			dm->ddev,
7405 			&aconnector->base,
7406 			&amdgpu_dm_connector_funcs,
7407 			connector_type,
7408 			&i2c->base);
7409 
7410 	if (res) {
7411 		DRM_ERROR("connector_init failed\n");
7412 		aconnector->connector_id = -1;
7413 		goto out_free;
7414 	}
7415 
7416 	drm_connector_helper_add(
7417 			&aconnector->base,
7418 			&amdgpu_dm_connector_helper_funcs);
7419 
7420 	amdgpu_dm_connector_init_helper(
7421 		dm,
7422 		aconnector,
7423 		connector_type,
7424 		link,
7425 		link_index);
7426 
7427 	drm_connector_attach_encoder(
7428 		&aconnector->base, &aencoder->base);
7429 
7430 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7431 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7432 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7433 
7434 out_free:
7435 	if (res) {
7436 		kfree(i2c);
7437 		aconnector->i2c = NULL;
7438 	}
7439 	return res;
7440 }
7441 
7442 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7443 {
7444 	switch (adev->mode_info.num_crtc) {
7445 	case 1:
7446 		return 0x1;
7447 	case 2:
7448 		return 0x3;
7449 	case 3:
7450 		return 0x7;
7451 	case 4:
7452 		return 0xf;
7453 	case 5:
7454 		return 0x1f;
7455 	case 6:
7456 	default:
7457 		return 0x3f;
7458 	}
7459 }
7460 
7461 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7462 				  struct amdgpu_encoder *aencoder,
7463 				  uint32_t link_index)
7464 {
7465 	struct amdgpu_device *adev = drm_to_adev(dev);
7466 
7467 	int res = drm_encoder_init(dev,
7468 				   &aencoder->base,
7469 				   &amdgpu_dm_encoder_funcs,
7470 				   DRM_MODE_ENCODER_TMDS,
7471 				   NULL);
7472 
7473 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7474 
7475 	if (!res)
7476 		aencoder->encoder_id = link_index;
7477 	else
7478 		aencoder->encoder_id = -1;
7479 
7480 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7481 
7482 	return res;
7483 }
7484 
7485 static void manage_dm_interrupts(struct amdgpu_device *adev,
7486 				 struct amdgpu_crtc *acrtc,
7487 				 bool enable)
7488 {
7489 	/*
7490 	 * We have no guarantee that the frontend index maps to the same
7491 	 * backend index - some even map to more than one.
7492 	 *
7493 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7494 	 */
7495 	int irq_type =
7496 		amdgpu_display_crtc_idx_to_irq_type(
7497 			adev,
7498 			acrtc->crtc_id);
7499 
7500 	if (enable) {
7501 		drm_crtc_vblank_on(&acrtc->base);
7502 		amdgpu_irq_get(
7503 			adev,
7504 			&adev->pageflip_irq,
7505 			irq_type);
7506 	} else {
7507 
7508 		amdgpu_irq_put(
7509 			adev,
7510 			&adev->pageflip_irq,
7511 			irq_type);
7512 		drm_crtc_vblank_off(&acrtc->base);
7513 	}
7514 }
7515 
7516 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7517 				      struct amdgpu_crtc *acrtc)
7518 {
7519 	int irq_type =
7520 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7521 
7522 	/**
7523 	 * This reads the current state for the IRQ and force reapplies
7524 	 * the setting to hardware.
7525 	 */
7526 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7527 }
7528 
7529 static bool
7530 is_scaling_state_different(const struct dm_connector_state *dm_state,
7531 			   const struct dm_connector_state *old_dm_state)
7532 {
7533 	if (dm_state->scaling != old_dm_state->scaling)
7534 		return true;
7535 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7536 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7537 			return true;
7538 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7539 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7540 			return true;
7541 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7542 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7543 		return true;
7544 	return false;
7545 }
7546 
7547 #ifdef CONFIG_DRM_AMD_DC_HDCP
7548 static bool is_content_protection_different(struct drm_connector_state *state,
7549 					    const struct drm_connector_state *old_state,
7550 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7551 {
7552 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7553 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7554 
7555 	/* Handle: Type0/1 change */
7556 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7557 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7558 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7559 		return true;
7560 	}
7561 
7562 	/* CP is being re enabled, ignore this
7563 	 *
7564 	 * Handles:	ENABLED -> DESIRED
7565 	 */
7566 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7567 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7568 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7569 		return false;
7570 	}
7571 
7572 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7573 	 *
7574 	 * Handles:	UNDESIRED -> ENABLED
7575 	 */
7576 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7577 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7578 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7579 
7580 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7581 	 * hot-plug, headless s3, dpms
7582 	 *
7583 	 * Handles:	DESIRED -> DESIRED (Special case)
7584 	 */
7585 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7586 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7587 		dm_con_state->update_hdcp = false;
7588 		return true;
7589 	}
7590 
7591 	/*
7592 	 * Handles:	UNDESIRED -> UNDESIRED
7593 	 *		DESIRED -> DESIRED
7594 	 *		ENABLED -> ENABLED
7595 	 */
7596 	if (old_state->content_protection == state->content_protection)
7597 		return false;
7598 
7599 	/*
7600 	 * Handles:	UNDESIRED -> DESIRED
7601 	 *		DESIRED -> UNDESIRED
7602 	 *		ENABLED -> UNDESIRED
7603 	 */
7604 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7605 		return true;
7606 
7607 	/*
7608 	 * Handles:	DESIRED -> ENABLED
7609 	 */
7610 	return false;
7611 }
7612 
7613 #endif
7614 static void remove_stream(struct amdgpu_device *adev,
7615 			  struct amdgpu_crtc *acrtc,
7616 			  struct dc_stream_state *stream)
7617 {
7618 	/* this is the update mode case */
7619 
7620 	acrtc->otg_inst = -1;
7621 	acrtc->enabled = false;
7622 }
7623 
7624 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7625 			       struct dc_cursor_position *position)
7626 {
7627 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7628 	int x, y;
7629 	int xorigin = 0, yorigin = 0;
7630 
7631 	position->enable = false;
7632 	position->x = 0;
7633 	position->y = 0;
7634 
7635 	if (!crtc || !plane->state->fb)
7636 		return 0;
7637 
7638 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7639 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7640 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7641 			  __func__,
7642 			  plane->state->crtc_w,
7643 			  plane->state->crtc_h);
7644 		return -EINVAL;
7645 	}
7646 
7647 	x = plane->state->crtc_x;
7648 	y = plane->state->crtc_y;
7649 
7650 	if (x <= -amdgpu_crtc->max_cursor_width ||
7651 	    y <= -amdgpu_crtc->max_cursor_height)
7652 		return 0;
7653 
7654 	if (x < 0) {
7655 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7656 		x = 0;
7657 	}
7658 	if (y < 0) {
7659 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7660 		y = 0;
7661 	}
7662 	position->enable = true;
7663 	position->translate_by_source = true;
7664 	position->x = x;
7665 	position->y = y;
7666 	position->x_hotspot = xorigin;
7667 	position->y_hotspot = yorigin;
7668 
7669 	return 0;
7670 }
7671 
7672 static void handle_cursor_update(struct drm_plane *plane,
7673 				 struct drm_plane_state *old_plane_state)
7674 {
7675 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7676 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7677 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7678 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7679 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7680 	uint64_t address = afb ? afb->address : 0;
7681 	struct dc_cursor_position position;
7682 	struct dc_cursor_attributes attributes;
7683 	int ret;
7684 
7685 	if (!plane->state->fb && !old_plane_state->fb)
7686 		return;
7687 
7688 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7689 			 __func__,
7690 			 amdgpu_crtc->crtc_id,
7691 			 plane->state->crtc_w,
7692 			 plane->state->crtc_h);
7693 
7694 	ret = get_cursor_position(plane, crtc, &position);
7695 	if (ret)
7696 		return;
7697 
7698 	if (!position.enable) {
7699 		/* turn off cursor */
7700 		if (crtc_state && crtc_state->stream) {
7701 			mutex_lock(&adev->dm.dc_lock);
7702 			dc_stream_set_cursor_position(crtc_state->stream,
7703 						      &position);
7704 			mutex_unlock(&adev->dm.dc_lock);
7705 		}
7706 		return;
7707 	}
7708 
7709 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7710 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7711 
7712 	memset(&attributes, 0, sizeof(attributes));
7713 	attributes.address.high_part = upper_32_bits(address);
7714 	attributes.address.low_part  = lower_32_bits(address);
7715 	attributes.width             = plane->state->crtc_w;
7716 	attributes.height            = plane->state->crtc_h;
7717 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7718 	attributes.rotation_angle    = 0;
7719 	attributes.attribute_flags.value = 0;
7720 
7721 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7722 
7723 	if (crtc_state->stream) {
7724 		mutex_lock(&adev->dm.dc_lock);
7725 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7726 							 &attributes))
7727 			DRM_ERROR("DC failed to set cursor attributes\n");
7728 
7729 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7730 						   &position))
7731 			DRM_ERROR("DC failed to set cursor position\n");
7732 		mutex_unlock(&adev->dm.dc_lock);
7733 	}
7734 }
7735 
7736 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7737 {
7738 
7739 	assert_spin_locked(&acrtc->base.dev->event_lock);
7740 	WARN_ON(acrtc->event);
7741 
7742 	acrtc->event = acrtc->base.state->event;
7743 
7744 	/* Set the flip status */
7745 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7746 
7747 	/* Mark this event as consumed */
7748 	acrtc->base.state->event = NULL;
7749 
7750 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7751 						 acrtc->crtc_id);
7752 }
7753 
7754 static void update_freesync_state_on_stream(
7755 	struct amdgpu_display_manager *dm,
7756 	struct dm_crtc_state *new_crtc_state,
7757 	struct dc_stream_state *new_stream,
7758 	struct dc_plane_state *surface,
7759 	u32 flip_timestamp_in_us)
7760 {
7761 	struct mod_vrr_params vrr_params;
7762 	struct dc_info_packet vrr_infopacket = {0};
7763 	struct amdgpu_device *adev = dm->adev;
7764 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7765 	unsigned long flags;
7766 	bool pack_sdp_v1_3 = false;
7767 
7768 	if (!new_stream)
7769 		return;
7770 
7771 	/*
7772 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7773 	 * For now it's sufficient to just guard against these conditions.
7774 	 */
7775 
7776 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7777 		return;
7778 
7779 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7780         vrr_params = acrtc->dm_irq_params.vrr_params;
7781 
7782 	if (surface) {
7783 		mod_freesync_handle_preflip(
7784 			dm->freesync_module,
7785 			surface,
7786 			new_stream,
7787 			flip_timestamp_in_us,
7788 			&vrr_params);
7789 
7790 		if (adev->family < AMDGPU_FAMILY_AI &&
7791 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7792 			mod_freesync_handle_v_update(dm->freesync_module,
7793 						     new_stream, &vrr_params);
7794 
7795 			/* Need to call this before the frame ends. */
7796 			dc_stream_adjust_vmin_vmax(dm->dc,
7797 						   new_crtc_state->stream,
7798 						   &vrr_params.adjust);
7799 		}
7800 	}
7801 
7802 	mod_freesync_build_vrr_infopacket(
7803 		dm->freesync_module,
7804 		new_stream,
7805 		&vrr_params,
7806 		PACKET_TYPE_VRR,
7807 		TRANSFER_FUNC_UNKNOWN,
7808 		&vrr_infopacket,
7809 		pack_sdp_v1_3);
7810 
7811 	new_crtc_state->freesync_timing_changed |=
7812 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7813 			&vrr_params.adjust,
7814 			sizeof(vrr_params.adjust)) != 0);
7815 
7816 	new_crtc_state->freesync_vrr_info_changed |=
7817 		(memcmp(&new_crtc_state->vrr_infopacket,
7818 			&vrr_infopacket,
7819 			sizeof(vrr_infopacket)) != 0);
7820 
7821 	acrtc->dm_irq_params.vrr_params = vrr_params;
7822 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7823 
7824 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7825 	new_stream->vrr_infopacket = vrr_infopacket;
7826 
7827 	if (new_crtc_state->freesync_vrr_info_changed)
7828 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7829 			      new_crtc_state->base.crtc->base.id,
7830 			      (int)new_crtc_state->base.vrr_enabled,
7831 			      (int)vrr_params.state);
7832 
7833 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7834 }
7835 
7836 static void update_stream_irq_parameters(
7837 	struct amdgpu_display_manager *dm,
7838 	struct dm_crtc_state *new_crtc_state)
7839 {
7840 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7841 	struct mod_vrr_params vrr_params;
7842 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7843 	struct amdgpu_device *adev = dm->adev;
7844 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7845 	unsigned long flags;
7846 
7847 	if (!new_stream)
7848 		return;
7849 
7850 	/*
7851 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7852 	 * For now it's sufficient to just guard against these conditions.
7853 	 */
7854 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7855 		return;
7856 
7857 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7858 	vrr_params = acrtc->dm_irq_params.vrr_params;
7859 
7860 	if (new_crtc_state->vrr_supported &&
7861 	    config.min_refresh_in_uhz &&
7862 	    config.max_refresh_in_uhz) {
7863 		/*
7864 		 * if freesync compatible mode was set, config.state will be set
7865 		 * in atomic check
7866 		 */
7867 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7868 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7869 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7870 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7871 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7872 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7873 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7874 		} else {
7875 			config.state = new_crtc_state->base.vrr_enabled ?
7876 						     VRR_STATE_ACTIVE_VARIABLE :
7877 						     VRR_STATE_INACTIVE;
7878 		}
7879 	} else {
7880 		config.state = VRR_STATE_UNSUPPORTED;
7881 	}
7882 
7883 	mod_freesync_build_vrr_params(dm->freesync_module,
7884 				      new_stream,
7885 				      &config, &vrr_params);
7886 
7887 	new_crtc_state->freesync_timing_changed |=
7888 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7889 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7890 
7891 	new_crtc_state->freesync_config = config;
7892 	/* Copy state for access from DM IRQ handler */
7893 	acrtc->dm_irq_params.freesync_config = config;
7894 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7895 	acrtc->dm_irq_params.vrr_params = vrr_params;
7896 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7897 }
7898 
7899 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7900 					    struct dm_crtc_state *new_state)
7901 {
7902 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7903 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7904 
7905 	if (!old_vrr_active && new_vrr_active) {
7906 		/* Transition VRR inactive -> active:
7907 		 * While VRR is active, we must not disable vblank irq, as a
7908 		 * reenable after disable would compute bogus vblank/pflip
7909 		 * timestamps if it likely happened inside display front-porch.
7910 		 *
7911 		 * We also need vupdate irq for the actual core vblank handling
7912 		 * at end of vblank.
7913 		 */
7914 		dm_set_vupdate_irq(new_state->base.crtc, true);
7915 		drm_crtc_vblank_get(new_state->base.crtc);
7916 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7917 				 __func__, new_state->base.crtc->base.id);
7918 	} else if (old_vrr_active && !new_vrr_active) {
7919 		/* Transition VRR active -> inactive:
7920 		 * Allow vblank irq disable again for fixed refresh rate.
7921 		 */
7922 		dm_set_vupdate_irq(new_state->base.crtc, false);
7923 		drm_crtc_vblank_put(new_state->base.crtc);
7924 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7925 				 __func__, new_state->base.crtc->base.id);
7926 	}
7927 }
7928 
7929 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7930 {
7931 	struct drm_plane *plane;
7932 	struct drm_plane_state *old_plane_state, *new_plane_state;
7933 	int i;
7934 
7935 	/*
7936 	 * TODO: Make this per-stream so we don't issue redundant updates for
7937 	 * commits with multiple streams.
7938 	 */
7939 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7940 				       new_plane_state, i)
7941 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7942 			handle_cursor_update(plane, old_plane_state);
7943 }
7944 
7945 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7946 				    struct dc_state *dc_state,
7947 				    struct drm_device *dev,
7948 				    struct amdgpu_display_manager *dm,
7949 				    struct drm_crtc *pcrtc,
7950 				    bool wait_for_vblank)
7951 {
7952 	uint32_t i;
7953 	uint64_t timestamp_ns;
7954 	struct drm_plane *plane;
7955 	struct drm_plane_state *old_plane_state, *new_plane_state;
7956 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7957 	struct drm_crtc_state *new_pcrtc_state =
7958 			drm_atomic_get_new_crtc_state(state, pcrtc);
7959 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7960 	struct dm_crtc_state *dm_old_crtc_state =
7961 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7962 	int planes_count = 0, vpos, hpos;
7963 	long r;
7964 	unsigned long flags;
7965 	struct amdgpu_bo *abo;
7966 	uint32_t target_vblank, last_flip_vblank;
7967 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7968 	bool pflip_present = false;
7969 	struct {
7970 		struct dc_surface_update surface_updates[MAX_SURFACES];
7971 		struct dc_plane_info plane_infos[MAX_SURFACES];
7972 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7973 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7974 		struct dc_stream_update stream_update;
7975 	} *bundle;
7976 
7977 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7978 
7979 	if (!bundle) {
7980 		dm_error("Failed to allocate update bundle\n");
7981 		goto cleanup;
7982 	}
7983 
7984 	/*
7985 	 * Disable the cursor first if we're disabling all the planes.
7986 	 * It'll remain on the screen after the planes are re-enabled
7987 	 * if we don't.
7988 	 */
7989 	if (acrtc_state->active_planes == 0)
7990 		amdgpu_dm_commit_cursors(state);
7991 
7992 	/* update planes when needed */
7993 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7994 		struct drm_crtc *crtc = new_plane_state->crtc;
7995 		struct drm_crtc_state *new_crtc_state;
7996 		struct drm_framebuffer *fb = new_plane_state->fb;
7997 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7998 		bool plane_needs_flip;
7999 		struct dc_plane_state *dc_plane;
8000 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8001 
8002 		/* Cursor plane is handled after stream updates */
8003 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8004 			continue;
8005 
8006 		if (!fb || !crtc || pcrtc != crtc)
8007 			continue;
8008 
8009 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8010 		if (!new_crtc_state->active)
8011 			continue;
8012 
8013 		dc_plane = dm_new_plane_state->dc_state;
8014 
8015 		bundle->surface_updates[planes_count].surface = dc_plane;
8016 		if (new_pcrtc_state->color_mgmt_changed) {
8017 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8018 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8019 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8020 		}
8021 
8022 		fill_dc_scaling_info(new_plane_state,
8023 				     &bundle->scaling_infos[planes_count]);
8024 
8025 		bundle->surface_updates[planes_count].scaling_info =
8026 			&bundle->scaling_infos[planes_count];
8027 
8028 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8029 
8030 		pflip_present = pflip_present || plane_needs_flip;
8031 
8032 		if (!plane_needs_flip) {
8033 			planes_count += 1;
8034 			continue;
8035 		}
8036 
8037 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8038 
8039 		/*
8040 		 * Wait for all fences on this FB. Do limited wait to avoid
8041 		 * deadlock during GPU reset when this fence will not signal
8042 		 * but we hold reservation lock for the BO.
8043 		 */
8044 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8045 							false,
8046 							msecs_to_jiffies(5000));
8047 		if (unlikely(r <= 0))
8048 			DRM_ERROR("Waiting for fences timed out!");
8049 
8050 		fill_dc_plane_info_and_addr(
8051 			dm->adev, new_plane_state,
8052 			afb->tiling_flags,
8053 			&bundle->plane_infos[planes_count],
8054 			&bundle->flip_addrs[planes_count].address,
8055 			afb->tmz_surface, false);
8056 
8057 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8058 				 new_plane_state->plane->index,
8059 				 bundle->plane_infos[planes_count].dcc.enable);
8060 
8061 		bundle->surface_updates[planes_count].plane_info =
8062 			&bundle->plane_infos[planes_count];
8063 
8064 		/*
8065 		 * Only allow immediate flips for fast updates that don't
8066 		 * change FB pitch, DCC state, rotation or mirroing.
8067 		 */
8068 		bundle->flip_addrs[planes_count].flip_immediate =
8069 			crtc->state->async_flip &&
8070 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8071 
8072 		timestamp_ns = ktime_get_ns();
8073 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8074 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8075 		bundle->surface_updates[planes_count].surface = dc_plane;
8076 
8077 		if (!bundle->surface_updates[planes_count].surface) {
8078 			DRM_ERROR("No surface for CRTC: id=%d\n",
8079 					acrtc_attach->crtc_id);
8080 			continue;
8081 		}
8082 
8083 		if (plane == pcrtc->primary)
8084 			update_freesync_state_on_stream(
8085 				dm,
8086 				acrtc_state,
8087 				acrtc_state->stream,
8088 				dc_plane,
8089 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8090 
8091 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8092 				 __func__,
8093 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8094 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8095 
8096 		planes_count += 1;
8097 
8098 	}
8099 
8100 	if (pflip_present) {
8101 		if (!vrr_active) {
8102 			/* Use old throttling in non-vrr fixed refresh rate mode
8103 			 * to keep flip scheduling based on target vblank counts
8104 			 * working in a backwards compatible way, e.g., for
8105 			 * clients using the GLX_OML_sync_control extension or
8106 			 * DRI3/Present extension with defined target_msc.
8107 			 */
8108 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8109 		}
8110 		else {
8111 			/* For variable refresh rate mode only:
8112 			 * Get vblank of last completed flip to avoid > 1 vrr
8113 			 * flips per video frame by use of throttling, but allow
8114 			 * flip programming anywhere in the possibly large
8115 			 * variable vrr vblank interval for fine-grained flip
8116 			 * timing control and more opportunity to avoid stutter
8117 			 * on late submission of flips.
8118 			 */
8119 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8120 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8121 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8122 		}
8123 
8124 		target_vblank = last_flip_vblank + wait_for_vblank;
8125 
8126 		/*
8127 		 * Wait until we're out of the vertical blank period before the one
8128 		 * targeted by the flip
8129 		 */
8130 		while ((acrtc_attach->enabled &&
8131 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8132 							    0, &vpos, &hpos, NULL,
8133 							    NULL, &pcrtc->hwmode)
8134 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8135 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8136 			(int)(target_vblank -
8137 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8138 			usleep_range(1000, 1100);
8139 		}
8140 
8141 		/**
8142 		 * Prepare the flip event for the pageflip interrupt to handle.
8143 		 *
8144 		 * This only works in the case where we've already turned on the
8145 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8146 		 * from 0 -> n planes we have to skip a hardware generated event
8147 		 * and rely on sending it from software.
8148 		 */
8149 		if (acrtc_attach->base.state->event &&
8150 		    acrtc_state->active_planes > 0) {
8151 			drm_crtc_vblank_get(pcrtc);
8152 
8153 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8154 
8155 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8156 			prepare_flip_isr(acrtc_attach);
8157 
8158 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8159 		}
8160 
8161 		if (acrtc_state->stream) {
8162 			if (acrtc_state->freesync_vrr_info_changed)
8163 				bundle->stream_update.vrr_infopacket =
8164 					&acrtc_state->stream->vrr_infopacket;
8165 		}
8166 	}
8167 
8168 	/* Update the planes if changed or disable if we don't have any. */
8169 	if ((planes_count || acrtc_state->active_planes == 0) &&
8170 		acrtc_state->stream) {
8171 		bundle->stream_update.stream = acrtc_state->stream;
8172 		if (new_pcrtc_state->mode_changed) {
8173 			bundle->stream_update.src = acrtc_state->stream->src;
8174 			bundle->stream_update.dst = acrtc_state->stream->dst;
8175 		}
8176 
8177 		if (new_pcrtc_state->color_mgmt_changed) {
8178 			/*
8179 			 * TODO: This isn't fully correct since we've actually
8180 			 * already modified the stream in place.
8181 			 */
8182 			bundle->stream_update.gamut_remap =
8183 				&acrtc_state->stream->gamut_remap_matrix;
8184 			bundle->stream_update.output_csc_transform =
8185 				&acrtc_state->stream->csc_color_matrix;
8186 			bundle->stream_update.out_transfer_func =
8187 				acrtc_state->stream->out_transfer_func;
8188 		}
8189 
8190 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8191 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8192 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8193 
8194 		/*
8195 		 * If FreeSync state on the stream has changed then we need to
8196 		 * re-adjust the min/max bounds now that DC doesn't handle this
8197 		 * as part of commit.
8198 		 */
8199 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8200 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8201 			dc_stream_adjust_vmin_vmax(
8202 				dm->dc, acrtc_state->stream,
8203 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8204 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8205 		}
8206 		mutex_lock(&dm->dc_lock);
8207 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8208 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8209 			amdgpu_dm_psr_disable(acrtc_state->stream);
8210 
8211 		dc_commit_updates_for_stream(dm->dc,
8212 						     bundle->surface_updates,
8213 						     planes_count,
8214 						     acrtc_state->stream,
8215 						     &bundle->stream_update,
8216 						     dc_state);
8217 
8218 		/**
8219 		 * Enable or disable the interrupts on the backend.
8220 		 *
8221 		 * Most pipes are put into power gating when unused.
8222 		 *
8223 		 * When power gating is enabled on a pipe we lose the
8224 		 * interrupt enablement state when power gating is disabled.
8225 		 *
8226 		 * So we need to update the IRQ control state in hardware
8227 		 * whenever the pipe turns on (since it could be previously
8228 		 * power gated) or off (since some pipes can't be power gated
8229 		 * on some ASICs).
8230 		 */
8231 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8232 			dm_update_pflip_irq_state(drm_to_adev(dev),
8233 						  acrtc_attach);
8234 
8235 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8236 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8237 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8238 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8239 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8240 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8241 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8242 			amdgpu_dm_psr_enable(acrtc_state->stream);
8243 		}
8244 
8245 		mutex_unlock(&dm->dc_lock);
8246 	}
8247 
8248 	/*
8249 	 * Update cursor state *after* programming all the planes.
8250 	 * This avoids redundant programming in the case where we're going
8251 	 * to be disabling a single plane - those pipes are being disabled.
8252 	 */
8253 	if (acrtc_state->active_planes)
8254 		amdgpu_dm_commit_cursors(state);
8255 
8256 cleanup:
8257 	kfree(bundle);
8258 }
8259 
8260 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8261 				   struct drm_atomic_state *state)
8262 {
8263 	struct amdgpu_device *adev = drm_to_adev(dev);
8264 	struct amdgpu_dm_connector *aconnector;
8265 	struct drm_connector *connector;
8266 	struct drm_connector_state *old_con_state, *new_con_state;
8267 	struct drm_crtc_state *new_crtc_state;
8268 	struct dm_crtc_state *new_dm_crtc_state;
8269 	const struct dc_stream_status *status;
8270 	int i, inst;
8271 
8272 	/* Notify device removals. */
8273 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8274 		if (old_con_state->crtc != new_con_state->crtc) {
8275 			/* CRTC changes require notification. */
8276 			goto notify;
8277 		}
8278 
8279 		if (!new_con_state->crtc)
8280 			continue;
8281 
8282 		new_crtc_state = drm_atomic_get_new_crtc_state(
8283 			state, new_con_state->crtc);
8284 
8285 		if (!new_crtc_state)
8286 			continue;
8287 
8288 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8289 			continue;
8290 
8291 	notify:
8292 		aconnector = to_amdgpu_dm_connector(connector);
8293 
8294 		mutex_lock(&adev->dm.audio_lock);
8295 		inst = aconnector->audio_inst;
8296 		aconnector->audio_inst = -1;
8297 		mutex_unlock(&adev->dm.audio_lock);
8298 
8299 		amdgpu_dm_audio_eld_notify(adev, inst);
8300 	}
8301 
8302 	/* Notify audio device additions. */
8303 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8304 		if (!new_con_state->crtc)
8305 			continue;
8306 
8307 		new_crtc_state = drm_atomic_get_new_crtc_state(
8308 			state, new_con_state->crtc);
8309 
8310 		if (!new_crtc_state)
8311 			continue;
8312 
8313 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8314 			continue;
8315 
8316 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8317 		if (!new_dm_crtc_state->stream)
8318 			continue;
8319 
8320 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8321 		if (!status)
8322 			continue;
8323 
8324 		aconnector = to_amdgpu_dm_connector(connector);
8325 
8326 		mutex_lock(&adev->dm.audio_lock);
8327 		inst = status->audio_inst;
8328 		aconnector->audio_inst = inst;
8329 		mutex_unlock(&adev->dm.audio_lock);
8330 
8331 		amdgpu_dm_audio_eld_notify(adev, inst);
8332 	}
8333 }
8334 
8335 /*
8336  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8337  * @crtc_state: the DRM CRTC state
8338  * @stream_state: the DC stream state.
8339  *
8340  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8341  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8342  */
8343 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8344 						struct dc_stream_state *stream_state)
8345 {
8346 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8347 }
8348 
8349 /**
8350  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8351  * @state: The atomic state to commit
8352  *
8353  * This will tell DC to commit the constructed DC state from atomic_check,
8354  * programming the hardware. Any failures here implies a hardware failure, since
8355  * atomic check should have filtered anything non-kosher.
8356  */
8357 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8358 {
8359 	struct drm_device *dev = state->dev;
8360 	struct amdgpu_device *adev = drm_to_adev(dev);
8361 	struct amdgpu_display_manager *dm = &adev->dm;
8362 	struct dm_atomic_state *dm_state;
8363 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8364 	uint32_t i, j;
8365 	struct drm_crtc *crtc;
8366 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8367 	unsigned long flags;
8368 	bool wait_for_vblank = true;
8369 	struct drm_connector *connector;
8370 	struct drm_connector_state *old_con_state, *new_con_state;
8371 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8372 	int crtc_disable_count = 0;
8373 	bool mode_set_reset_required = false;
8374 
8375 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8376 
8377 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8378 
8379 	dm_state = dm_atomic_get_new_state(state);
8380 	if (dm_state && dm_state->context) {
8381 		dc_state = dm_state->context;
8382 	} else {
8383 		/* No state changes, retain current state. */
8384 		dc_state_temp = dc_create_state(dm->dc);
8385 		ASSERT(dc_state_temp);
8386 		dc_state = dc_state_temp;
8387 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8388 	}
8389 
8390 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8391 				       new_crtc_state, i) {
8392 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8393 
8394 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8395 
8396 		if (old_crtc_state->active &&
8397 		    (!new_crtc_state->active ||
8398 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8399 			manage_dm_interrupts(adev, acrtc, false);
8400 			dc_stream_release(dm_old_crtc_state->stream);
8401 		}
8402 	}
8403 
8404 	drm_atomic_helper_calc_timestamping_constants(state);
8405 
8406 	/* update changed items */
8407 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8408 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8409 
8410 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8411 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8412 
8413 		DRM_DEBUG_DRIVER(
8414 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8415 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8416 			"connectors_changed:%d\n",
8417 			acrtc->crtc_id,
8418 			new_crtc_state->enable,
8419 			new_crtc_state->active,
8420 			new_crtc_state->planes_changed,
8421 			new_crtc_state->mode_changed,
8422 			new_crtc_state->active_changed,
8423 			new_crtc_state->connectors_changed);
8424 
8425 		/* Disable cursor if disabling crtc */
8426 		if (old_crtc_state->active && !new_crtc_state->active) {
8427 			struct dc_cursor_position position;
8428 
8429 			memset(&position, 0, sizeof(position));
8430 			mutex_lock(&dm->dc_lock);
8431 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8432 			mutex_unlock(&dm->dc_lock);
8433 		}
8434 
8435 		/* Copy all transient state flags into dc state */
8436 		if (dm_new_crtc_state->stream) {
8437 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8438 							    dm_new_crtc_state->stream);
8439 		}
8440 
8441 		/* handles headless hotplug case, updating new_state and
8442 		 * aconnector as needed
8443 		 */
8444 
8445 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8446 
8447 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8448 
8449 			if (!dm_new_crtc_state->stream) {
8450 				/*
8451 				 * this could happen because of issues with
8452 				 * userspace notifications delivery.
8453 				 * In this case userspace tries to set mode on
8454 				 * display which is disconnected in fact.
8455 				 * dc_sink is NULL in this case on aconnector.
8456 				 * We expect reset mode will come soon.
8457 				 *
8458 				 * This can also happen when unplug is done
8459 				 * during resume sequence ended
8460 				 *
8461 				 * In this case, we want to pretend we still
8462 				 * have a sink to keep the pipe running so that
8463 				 * hw state is consistent with the sw state
8464 				 */
8465 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8466 						__func__, acrtc->base.base.id);
8467 				continue;
8468 			}
8469 
8470 			if (dm_old_crtc_state->stream)
8471 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8472 
8473 			pm_runtime_get_noresume(dev->dev);
8474 
8475 			acrtc->enabled = true;
8476 			acrtc->hw_mode = new_crtc_state->mode;
8477 			crtc->hwmode = new_crtc_state->mode;
8478 			mode_set_reset_required = true;
8479 		} else if (modereset_required(new_crtc_state)) {
8480 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8481 			/* i.e. reset mode */
8482 			if (dm_old_crtc_state->stream)
8483 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8484 
8485 			mode_set_reset_required = true;
8486 		}
8487 	} /* for_each_crtc_in_state() */
8488 
8489 	if (dc_state) {
8490 		/* if there mode set or reset, disable eDP PSR */
8491 		if (mode_set_reset_required)
8492 			amdgpu_dm_psr_disable_all(dm);
8493 
8494 		dm_enable_per_frame_crtc_master_sync(dc_state);
8495 		mutex_lock(&dm->dc_lock);
8496 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8497 		mutex_unlock(&dm->dc_lock);
8498 	}
8499 
8500 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8501 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8502 
8503 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8504 
8505 		if (dm_new_crtc_state->stream != NULL) {
8506 			const struct dc_stream_status *status =
8507 					dc_stream_get_status(dm_new_crtc_state->stream);
8508 
8509 			if (!status)
8510 				status = dc_stream_get_status_from_state(dc_state,
8511 									 dm_new_crtc_state->stream);
8512 			if (!status)
8513 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8514 			else
8515 				acrtc->otg_inst = status->primary_otg_inst;
8516 		}
8517 	}
8518 #ifdef CONFIG_DRM_AMD_DC_HDCP
8519 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8520 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8521 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8522 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8523 
8524 		new_crtc_state = NULL;
8525 
8526 		if (acrtc)
8527 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8528 
8529 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8530 
8531 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8532 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8533 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8534 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8535 			dm_new_con_state->update_hdcp = true;
8536 			continue;
8537 		}
8538 
8539 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8540 			hdcp_update_display(
8541 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8542 				new_con_state->hdcp_content_type,
8543 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8544 	}
8545 #endif
8546 
8547 	/* Handle connector state changes */
8548 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8549 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8550 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8551 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8552 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8553 		struct dc_stream_update stream_update;
8554 		struct dc_info_packet hdr_packet;
8555 		struct dc_stream_status *status = NULL;
8556 		bool abm_changed, hdr_changed, scaling_changed;
8557 
8558 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8559 		memset(&stream_update, 0, sizeof(stream_update));
8560 
8561 		if (acrtc) {
8562 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8563 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8564 		}
8565 
8566 		/* Skip any modesets/resets */
8567 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8568 			continue;
8569 
8570 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8571 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8572 
8573 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8574 							     dm_old_con_state);
8575 
8576 		abm_changed = dm_new_crtc_state->abm_level !=
8577 			      dm_old_crtc_state->abm_level;
8578 
8579 		hdr_changed =
8580 			is_hdr_metadata_different(old_con_state, new_con_state);
8581 
8582 		if (!scaling_changed && !abm_changed && !hdr_changed)
8583 			continue;
8584 
8585 		stream_update.stream = dm_new_crtc_state->stream;
8586 		if (scaling_changed) {
8587 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8588 					dm_new_con_state, dm_new_crtc_state->stream);
8589 
8590 			stream_update.src = dm_new_crtc_state->stream->src;
8591 			stream_update.dst = dm_new_crtc_state->stream->dst;
8592 		}
8593 
8594 		if (abm_changed) {
8595 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8596 
8597 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8598 		}
8599 
8600 		if (hdr_changed) {
8601 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8602 			stream_update.hdr_static_metadata = &hdr_packet;
8603 		}
8604 
8605 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8606 		WARN_ON(!status);
8607 		WARN_ON(!status->plane_count);
8608 
8609 		/*
8610 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8611 		 * Here we create an empty update on each plane.
8612 		 * To fix this, DC should permit updating only stream properties.
8613 		 */
8614 		for (j = 0; j < status->plane_count; j++)
8615 			dummy_updates[j].surface = status->plane_states[0];
8616 
8617 
8618 		mutex_lock(&dm->dc_lock);
8619 		dc_commit_updates_for_stream(dm->dc,
8620 						     dummy_updates,
8621 						     status->plane_count,
8622 						     dm_new_crtc_state->stream,
8623 						     &stream_update,
8624 						     dc_state);
8625 		mutex_unlock(&dm->dc_lock);
8626 	}
8627 
8628 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8629 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8630 				      new_crtc_state, i) {
8631 		if (old_crtc_state->active && !new_crtc_state->active)
8632 			crtc_disable_count++;
8633 
8634 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8635 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8636 
8637 		/* For freesync config update on crtc state and params for irq */
8638 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8639 
8640 		/* Handle vrr on->off / off->on transitions */
8641 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8642 						dm_new_crtc_state);
8643 	}
8644 
8645 	/**
8646 	 * Enable interrupts for CRTCs that are newly enabled or went through
8647 	 * a modeset. It was intentionally deferred until after the front end
8648 	 * state was modified to wait until the OTG was on and so the IRQ
8649 	 * handlers didn't access stale or invalid state.
8650 	 */
8651 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8652 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8653 
8654 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8655 
8656 		if (new_crtc_state->active &&
8657 		    (!old_crtc_state->active ||
8658 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8659 			dc_stream_retain(dm_new_crtc_state->stream);
8660 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8661 			manage_dm_interrupts(adev, acrtc, true);
8662 
8663 #ifdef CONFIG_DEBUG_FS
8664 			/**
8665 			 * Frontend may have changed so reapply the CRC capture
8666 			 * settings for the stream.
8667 			 */
8668 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8669 
8670 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8671 				amdgpu_dm_crtc_configure_crc_source(
8672 					crtc, dm_new_crtc_state,
8673 					dm_new_crtc_state->crc_src);
8674 			}
8675 #endif
8676 		}
8677 	}
8678 
8679 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8680 		if (new_crtc_state->async_flip)
8681 			wait_for_vblank = false;
8682 
8683 	/* update planes when needed per crtc*/
8684 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8685 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8686 
8687 		if (dm_new_crtc_state->stream)
8688 			amdgpu_dm_commit_planes(state, dc_state, dev,
8689 						dm, crtc, wait_for_vblank);
8690 	}
8691 
8692 	/* Update audio instances for each connector. */
8693 	amdgpu_dm_commit_audio(dev, state);
8694 
8695 	/*
8696 	 * send vblank event on all events not handled in flip and
8697 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8698 	 */
8699 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8700 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8701 
8702 		if (new_crtc_state->event)
8703 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8704 
8705 		new_crtc_state->event = NULL;
8706 	}
8707 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8708 
8709 	/* Signal HW programming completion */
8710 	drm_atomic_helper_commit_hw_done(state);
8711 
8712 	if (wait_for_vblank)
8713 		drm_atomic_helper_wait_for_flip_done(dev, state);
8714 
8715 	drm_atomic_helper_cleanup_planes(dev, state);
8716 
8717 	/* return the stolen vga memory back to VRAM */
8718 	if (!adev->mman.keep_stolen_vga_memory)
8719 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8720 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8721 
8722 	/*
8723 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8724 	 * so we can put the GPU into runtime suspend if we're not driving any
8725 	 * displays anymore
8726 	 */
8727 	for (i = 0; i < crtc_disable_count; i++)
8728 		pm_runtime_put_autosuspend(dev->dev);
8729 	pm_runtime_mark_last_busy(dev->dev);
8730 
8731 	if (dc_state_temp)
8732 		dc_release_state(dc_state_temp);
8733 }
8734 
8735 
8736 static int dm_force_atomic_commit(struct drm_connector *connector)
8737 {
8738 	int ret = 0;
8739 	struct drm_device *ddev = connector->dev;
8740 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8741 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8742 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8743 	struct drm_connector_state *conn_state;
8744 	struct drm_crtc_state *crtc_state;
8745 	struct drm_plane_state *plane_state;
8746 
8747 	if (!state)
8748 		return -ENOMEM;
8749 
8750 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8751 
8752 	/* Construct an atomic state to restore previous display setting */
8753 
8754 	/*
8755 	 * Attach connectors to drm_atomic_state
8756 	 */
8757 	conn_state = drm_atomic_get_connector_state(state, connector);
8758 
8759 	ret = PTR_ERR_OR_ZERO(conn_state);
8760 	if (ret)
8761 		goto out;
8762 
8763 	/* Attach crtc to drm_atomic_state*/
8764 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8765 
8766 	ret = PTR_ERR_OR_ZERO(crtc_state);
8767 	if (ret)
8768 		goto out;
8769 
8770 	/* force a restore */
8771 	crtc_state->mode_changed = true;
8772 
8773 	/* Attach plane to drm_atomic_state */
8774 	plane_state = drm_atomic_get_plane_state(state, plane);
8775 
8776 	ret = PTR_ERR_OR_ZERO(plane_state);
8777 	if (ret)
8778 		goto out;
8779 
8780 	/* Call commit internally with the state we just constructed */
8781 	ret = drm_atomic_commit(state);
8782 
8783 out:
8784 	drm_atomic_state_put(state);
8785 	if (ret)
8786 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8787 
8788 	return ret;
8789 }
8790 
8791 /*
8792  * This function handles all cases when set mode does not come upon hotplug.
8793  * This includes when a display is unplugged then plugged back into the
8794  * same port and when running without usermode desktop manager supprot
8795  */
8796 void dm_restore_drm_connector_state(struct drm_device *dev,
8797 				    struct drm_connector *connector)
8798 {
8799 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8800 	struct amdgpu_crtc *disconnected_acrtc;
8801 	struct dm_crtc_state *acrtc_state;
8802 
8803 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8804 		return;
8805 
8806 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8807 	if (!disconnected_acrtc)
8808 		return;
8809 
8810 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8811 	if (!acrtc_state->stream)
8812 		return;
8813 
8814 	/*
8815 	 * If the previous sink is not released and different from the current,
8816 	 * we deduce we are in a state where we can not rely on usermode call
8817 	 * to turn on the display, so we do it here
8818 	 */
8819 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8820 		dm_force_atomic_commit(&aconnector->base);
8821 }
8822 
8823 /*
8824  * Grabs all modesetting locks to serialize against any blocking commits,
8825  * Waits for completion of all non blocking commits.
8826  */
8827 static int do_aquire_global_lock(struct drm_device *dev,
8828 				 struct drm_atomic_state *state)
8829 {
8830 	struct drm_crtc *crtc;
8831 	struct drm_crtc_commit *commit;
8832 	long ret;
8833 
8834 	/*
8835 	 * Adding all modeset locks to aquire_ctx will
8836 	 * ensure that when the framework release it the
8837 	 * extra locks we are locking here will get released to
8838 	 */
8839 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8840 	if (ret)
8841 		return ret;
8842 
8843 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8844 		spin_lock(&crtc->commit_lock);
8845 		commit = list_first_entry_or_null(&crtc->commit_list,
8846 				struct drm_crtc_commit, commit_entry);
8847 		if (commit)
8848 			drm_crtc_commit_get(commit);
8849 		spin_unlock(&crtc->commit_lock);
8850 
8851 		if (!commit)
8852 			continue;
8853 
8854 		/*
8855 		 * Make sure all pending HW programming completed and
8856 		 * page flips done
8857 		 */
8858 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8859 
8860 		if (ret > 0)
8861 			ret = wait_for_completion_interruptible_timeout(
8862 					&commit->flip_done, 10*HZ);
8863 
8864 		if (ret == 0)
8865 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8866 				  "timed out\n", crtc->base.id, crtc->name);
8867 
8868 		drm_crtc_commit_put(commit);
8869 	}
8870 
8871 	return ret < 0 ? ret : 0;
8872 }
8873 
8874 static void get_freesync_config_for_crtc(
8875 	struct dm_crtc_state *new_crtc_state,
8876 	struct dm_connector_state *new_con_state)
8877 {
8878 	struct mod_freesync_config config = {0};
8879 	struct amdgpu_dm_connector *aconnector =
8880 			to_amdgpu_dm_connector(new_con_state->base.connector);
8881 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8882 	int vrefresh = drm_mode_vrefresh(mode);
8883 	bool fs_vid_mode = false;
8884 
8885 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8886 					vrefresh >= aconnector->min_vfreq &&
8887 					vrefresh <= aconnector->max_vfreq;
8888 
8889 	if (new_crtc_state->vrr_supported) {
8890 		new_crtc_state->stream->ignore_msa_timing_param = true;
8891 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8892 
8893 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8894 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8895 		config.vsif_supported = true;
8896 		config.btr = true;
8897 
8898 		if (fs_vid_mode) {
8899 			config.state = VRR_STATE_ACTIVE_FIXED;
8900 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8901 			goto out;
8902 		} else if (new_crtc_state->base.vrr_enabled) {
8903 			config.state = VRR_STATE_ACTIVE_VARIABLE;
8904 		} else {
8905 			config.state = VRR_STATE_INACTIVE;
8906 		}
8907 	}
8908 out:
8909 	new_crtc_state->freesync_config = config;
8910 }
8911 
8912 static void reset_freesync_config_for_crtc(
8913 	struct dm_crtc_state *new_crtc_state)
8914 {
8915 	new_crtc_state->vrr_supported = false;
8916 
8917 	memset(&new_crtc_state->vrr_infopacket, 0,
8918 	       sizeof(new_crtc_state->vrr_infopacket));
8919 }
8920 
8921 static bool
8922 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8923 				 struct drm_crtc_state *new_crtc_state)
8924 {
8925 	struct drm_display_mode old_mode, new_mode;
8926 
8927 	if (!old_crtc_state || !new_crtc_state)
8928 		return false;
8929 
8930 	old_mode = old_crtc_state->mode;
8931 	new_mode = new_crtc_state->mode;
8932 
8933 	if (old_mode.clock       == new_mode.clock &&
8934 	    old_mode.hdisplay    == new_mode.hdisplay &&
8935 	    old_mode.vdisplay    == new_mode.vdisplay &&
8936 	    old_mode.htotal      == new_mode.htotal &&
8937 	    old_mode.vtotal      != new_mode.vtotal &&
8938 	    old_mode.hsync_start == new_mode.hsync_start &&
8939 	    old_mode.vsync_start != new_mode.vsync_start &&
8940 	    old_mode.hsync_end   == new_mode.hsync_end &&
8941 	    old_mode.vsync_end   != new_mode.vsync_end &&
8942 	    old_mode.hskew       == new_mode.hskew &&
8943 	    old_mode.vscan       == new_mode.vscan &&
8944 	    (old_mode.vsync_end - old_mode.vsync_start) ==
8945 	    (new_mode.vsync_end - new_mode.vsync_start))
8946 		return true;
8947 
8948 	return false;
8949 }
8950 
8951 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8952 	uint64_t num, den, res;
8953 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8954 
8955 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8956 
8957 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8958 	den = (unsigned long long)new_crtc_state->mode.htotal *
8959 	      (unsigned long long)new_crtc_state->mode.vtotal;
8960 
8961 	res = div_u64(num, den);
8962 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8963 }
8964 
8965 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8966 				struct drm_atomic_state *state,
8967 				struct drm_crtc *crtc,
8968 				struct drm_crtc_state *old_crtc_state,
8969 				struct drm_crtc_state *new_crtc_state,
8970 				bool enable,
8971 				bool *lock_and_validation_needed)
8972 {
8973 	struct dm_atomic_state *dm_state = NULL;
8974 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8975 	struct dc_stream_state *new_stream;
8976 	int ret = 0;
8977 
8978 	/*
8979 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8980 	 * update changed items
8981 	 */
8982 	struct amdgpu_crtc *acrtc = NULL;
8983 	struct amdgpu_dm_connector *aconnector = NULL;
8984 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8985 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8986 
8987 	new_stream = NULL;
8988 
8989 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8990 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8991 	acrtc = to_amdgpu_crtc(crtc);
8992 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8993 
8994 	/* TODO This hack should go away */
8995 	if (aconnector && enable) {
8996 		/* Make sure fake sink is created in plug-in scenario */
8997 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8998 							    &aconnector->base);
8999 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9000 							    &aconnector->base);
9001 
9002 		if (IS_ERR(drm_new_conn_state)) {
9003 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9004 			goto fail;
9005 		}
9006 
9007 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9008 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9009 
9010 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9011 			goto skip_modeset;
9012 
9013 		new_stream = create_validate_stream_for_sink(aconnector,
9014 							     &new_crtc_state->mode,
9015 							     dm_new_conn_state,
9016 							     dm_old_crtc_state->stream);
9017 
9018 		/*
9019 		 * we can have no stream on ACTION_SET if a display
9020 		 * was disconnected during S3, in this case it is not an
9021 		 * error, the OS will be updated after detection, and
9022 		 * will do the right thing on next atomic commit
9023 		 */
9024 
9025 		if (!new_stream) {
9026 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9027 					__func__, acrtc->base.base.id);
9028 			ret = -ENOMEM;
9029 			goto fail;
9030 		}
9031 
9032 		/*
9033 		 * TODO: Check VSDB bits to decide whether this should
9034 		 * be enabled or not.
9035 		 */
9036 		new_stream->triggered_crtc_reset.enabled =
9037 			dm->force_timing_sync;
9038 
9039 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9040 
9041 		ret = fill_hdr_info_packet(drm_new_conn_state,
9042 					   &new_stream->hdr_static_metadata);
9043 		if (ret)
9044 			goto fail;
9045 
9046 		/*
9047 		 * If we already removed the old stream from the context
9048 		 * (and set the new stream to NULL) then we can't reuse
9049 		 * the old stream even if the stream and scaling are unchanged.
9050 		 * We'll hit the BUG_ON and black screen.
9051 		 *
9052 		 * TODO: Refactor this function to allow this check to work
9053 		 * in all conditions.
9054 		 */
9055 		if (amdgpu_freesync_vid_mode &&
9056 		    dm_new_crtc_state->stream &&
9057 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9058 			goto skip_modeset;
9059 
9060 		if (dm_new_crtc_state->stream &&
9061 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9062 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9063 			new_crtc_state->mode_changed = false;
9064 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9065 					 new_crtc_state->mode_changed);
9066 		}
9067 	}
9068 
9069 	/* mode_changed flag may get updated above, need to check again */
9070 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9071 		goto skip_modeset;
9072 
9073 	DRM_DEBUG_DRIVER(
9074 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9075 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9076 		"connectors_changed:%d\n",
9077 		acrtc->crtc_id,
9078 		new_crtc_state->enable,
9079 		new_crtc_state->active,
9080 		new_crtc_state->planes_changed,
9081 		new_crtc_state->mode_changed,
9082 		new_crtc_state->active_changed,
9083 		new_crtc_state->connectors_changed);
9084 
9085 	/* Remove stream for any changed/disabled CRTC */
9086 	if (!enable) {
9087 
9088 		if (!dm_old_crtc_state->stream)
9089 			goto skip_modeset;
9090 
9091 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9092 		    is_timing_unchanged_for_freesync(new_crtc_state,
9093 						     old_crtc_state)) {
9094 			new_crtc_state->mode_changed = false;
9095 			DRM_DEBUG_DRIVER(
9096 				"Mode change not required for front porch change, "
9097 				"setting mode_changed to %d",
9098 				new_crtc_state->mode_changed);
9099 
9100 			set_freesync_fixed_config(dm_new_crtc_state);
9101 
9102 			goto skip_modeset;
9103 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9104 			   is_freesync_video_mode(&new_crtc_state->mode,
9105 						  aconnector)) {
9106 			set_freesync_fixed_config(dm_new_crtc_state);
9107 		}
9108 
9109 		ret = dm_atomic_get_state(state, &dm_state);
9110 		if (ret)
9111 			goto fail;
9112 
9113 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9114 				crtc->base.id);
9115 
9116 		/* i.e. reset mode */
9117 		if (dc_remove_stream_from_ctx(
9118 				dm->dc,
9119 				dm_state->context,
9120 				dm_old_crtc_state->stream) != DC_OK) {
9121 			ret = -EINVAL;
9122 			goto fail;
9123 		}
9124 
9125 		dc_stream_release(dm_old_crtc_state->stream);
9126 		dm_new_crtc_state->stream = NULL;
9127 
9128 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9129 
9130 		*lock_and_validation_needed = true;
9131 
9132 	} else {/* Add stream for any updated/enabled CRTC */
9133 		/*
9134 		 * Quick fix to prevent NULL pointer on new_stream when
9135 		 * added MST connectors not found in existing crtc_state in the chained mode
9136 		 * TODO: need to dig out the root cause of that
9137 		 */
9138 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9139 			goto skip_modeset;
9140 
9141 		if (modereset_required(new_crtc_state))
9142 			goto skip_modeset;
9143 
9144 		if (modeset_required(new_crtc_state, new_stream,
9145 				     dm_old_crtc_state->stream)) {
9146 
9147 			WARN_ON(dm_new_crtc_state->stream);
9148 
9149 			ret = dm_atomic_get_state(state, &dm_state);
9150 			if (ret)
9151 				goto fail;
9152 
9153 			dm_new_crtc_state->stream = new_stream;
9154 
9155 			dc_stream_retain(new_stream);
9156 
9157 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9158 						crtc->base.id);
9159 
9160 			if (dc_add_stream_to_ctx(
9161 					dm->dc,
9162 					dm_state->context,
9163 					dm_new_crtc_state->stream) != DC_OK) {
9164 				ret = -EINVAL;
9165 				goto fail;
9166 			}
9167 
9168 			*lock_and_validation_needed = true;
9169 		}
9170 	}
9171 
9172 skip_modeset:
9173 	/* Release extra reference */
9174 	if (new_stream)
9175 		 dc_stream_release(new_stream);
9176 
9177 	/*
9178 	 * We want to do dc stream updates that do not require a
9179 	 * full modeset below.
9180 	 */
9181 	if (!(enable && aconnector && new_crtc_state->active))
9182 		return 0;
9183 	/*
9184 	 * Given above conditions, the dc state cannot be NULL because:
9185 	 * 1. We're in the process of enabling CRTCs (just been added
9186 	 *    to the dc context, or already is on the context)
9187 	 * 2. Has a valid connector attached, and
9188 	 * 3. Is currently active and enabled.
9189 	 * => The dc stream state currently exists.
9190 	 */
9191 	BUG_ON(dm_new_crtc_state->stream == NULL);
9192 
9193 	/* Scaling or underscan settings */
9194 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9195 		update_stream_scaling_settings(
9196 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9197 
9198 	/* ABM settings */
9199 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9200 
9201 	/*
9202 	 * Color management settings. We also update color properties
9203 	 * when a modeset is needed, to ensure it gets reprogrammed.
9204 	 */
9205 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9206 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9207 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9208 		if (ret)
9209 			goto fail;
9210 	}
9211 
9212 	/* Update Freesync settings. */
9213 	get_freesync_config_for_crtc(dm_new_crtc_state,
9214 				     dm_new_conn_state);
9215 
9216 	return ret;
9217 
9218 fail:
9219 	if (new_stream)
9220 		dc_stream_release(new_stream);
9221 	return ret;
9222 }
9223 
9224 static bool should_reset_plane(struct drm_atomic_state *state,
9225 			       struct drm_plane *plane,
9226 			       struct drm_plane_state *old_plane_state,
9227 			       struct drm_plane_state *new_plane_state)
9228 {
9229 	struct drm_plane *other;
9230 	struct drm_plane_state *old_other_state, *new_other_state;
9231 	struct drm_crtc_state *new_crtc_state;
9232 	int i;
9233 
9234 	/*
9235 	 * TODO: Remove this hack once the checks below are sufficient
9236 	 * enough to determine when we need to reset all the planes on
9237 	 * the stream.
9238 	 */
9239 	if (state->allow_modeset)
9240 		return true;
9241 
9242 	/* Exit early if we know that we're adding or removing the plane. */
9243 	if (old_plane_state->crtc != new_plane_state->crtc)
9244 		return true;
9245 
9246 	/* old crtc == new_crtc == NULL, plane not in context. */
9247 	if (!new_plane_state->crtc)
9248 		return false;
9249 
9250 	new_crtc_state =
9251 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9252 
9253 	if (!new_crtc_state)
9254 		return true;
9255 
9256 	/* CRTC Degamma changes currently require us to recreate planes. */
9257 	if (new_crtc_state->color_mgmt_changed)
9258 		return true;
9259 
9260 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9261 		return true;
9262 
9263 	/*
9264 	 * If there are any new primary or overlay planes being added or
9265 	 * removed then the z-order can potentially change. To ensure
9266 	 * correct z-order and pipe acquisition the current DC architecture
9267 	 * requires us to remove and recreate all existing planes.
9268 	 *
9269 	 * TODO: Come up with a more elegant solution for this.
9270 	 */
9271 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9272 		struct amdgpu_framebuffer *old_afb, *new_afb;
9273 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9274 			continue;
9275 
9276 		if (old_other_state->crtc != new_plane_state->crtc &&
9277 		    new_other_state->crtc != new_plane_state->crtc)
9278 			continue;
9279 
9280 		if (old_other_state->crtc != new_other_state->crtc)
9281 			return true;
9282 
9283 		/* Src/dst size and scaling updates. */
9284 		if (old_other_state->src_w != new_other_state->src_w ||
9285 		    old_other_state->src_h != new_other_state->src_h ||
9286 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9287 		    old_other_state->crtc_h != new_other_state->crtc_h)
9288 			return true;
9289 
9290 		/* Rotation / mirroring updates. */
9291 		if (old_other_state->rotation != new_other_state->rotation)
9292 			return true;
9293 
9294 		/* Blending updates. */
9295 		if (old_other_state->pixel_blend_mode !=
9296 		    new_other_state->pixel_blend_mode)
9297 			return true;
9298 
9299 		/* Alpha updates. */
9300 		if (old_other_state->alpha != new_other_state->alpha)
9301 			return true;
9302 
9303 		/* Colorspace changes. */
9304 		if (old_other_state->color_range != new_other_state->color_range ||
9305 		    old_other_state->color_encoding != new_other_state->color_encoding)
9306 			return true;
9307 
9308 		/* Framebuffer checks fall at the end. */
9309 		if (!old_other_state->fb || !new_other_state->fb)
9310 			continue;
9311 
9312 		/* Pixel format changes can require bandwidth updates. */
9313 		if (old_other_state->fb->format != new_other_state->fb->format)
9314 			return true;
9315 
9316 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9317 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9318 
9319 		/* Tiling and DCC changes also require bandwidth updates. */
9320 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9321 		    old_afb->base.modifier != new_afb->base.modifier)
9322 			return true;
9323 	}
9324 
9325 	return false;
9326 }
9327 
9328 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9329 			      struct drm_plane_state *new_plane_state,
9330 			      struct drm_framebuffer *fb)
9331 {
9332 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9333 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9334 	unsigned int pitch;
9335 	bool linear;
9336 
9337 	if (fb->width > new_acrtc->max_cursor_width ||
9338 	    fb->height > new_acrtc->max_cursor_height) {
9339 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9340 				 new_plane_state->fb->width,
9341 				 new_plane_state->fb->height);
9342 		return -EINVAL;
9343 	}
9344 	if (new_plane_state->src_w != fb->width << 16 ||
9345 	    new_plane_state->src_h != fb->height << 16) {
9346 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9347 		return -EINVAL;
9348 	}
9349 
9350 	/* Pitch in pixels */
9351 	pitch = fb->pitches[0] / fb->format->cpp[0];
9352 
9353 	if (fb->width != pitch) {
9354 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9355 				 fb->width, pitch);
9356 		return -EINVAL;
9357 	}
9358 
9359 	switch (pitch) {
9360 	case 64:
9361 	case 128:
9362 	case 256:
9363 		/* FB pitch is supported by cursor plane */
9364 		break;
9365 	default:
9366 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9367 		return -EINVAL;
9368 	}
9369 
9370 	/* Core DRM takes care of checking FB modifiers, so we only need to
9371 	 * check tiling flags when the FB doesn't have a modifier. */
9372 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9373 		if (adev->family < AMDGPU_FAMILY_AI) {
9374 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9375 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9376 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9377 		} else {
9378 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9379 		}
9380 		if (!linear) {
9381 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9382 			return -EINVAL;
9383 		}
9384 	}
9385 
9386 	return 0;
9387 }
9388 
9389 static int dm_update_plane_state(struct dc *dc,
9390 				 struct drm_atomic_state *state,
9391 				 struct drm_plane *plane,
9392 				 struct drm_plane_state *old_plane_state,
9393 				 struct drm_plane_state *new_plane_state,
9394 				 bool enable,
9395 				 bool *lock_and_validation_needed)
9396 {
9397 
9398 	struct dm_atomic_state *dm_state = NULL;
9399 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9400 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9401 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9402 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9403 	struct amdgpu_crtc *new_acrtc;
9404 	bool needs_reset;
9405 	int ret = 0;
9406 
9407 
9408 	new_plane_crtc = new_plane_state->crtc;
9409 	old_plane_crtc = old_plane_state->crtc;
9410 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9411 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9412 
9413 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9414 		if (!enable || !new_plane_crtc ||
9415 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9416 			return 0;
9417 
9418 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9419 
9420 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9421 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9422 			return -EINVAL;
9423 		}
9424 
9425 		if (new_plane_state->fb) {
9426 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9427 						 new_plane_state->fb);
9428 			if (ret)
9429 				return ret;
9430 		}
9431 
9432 		return 0;
9433 	}
9434 
9435 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9436 					 new_plane_state);
9437 
9438 	/* Remove any changed/removed planes */
9439 	if (!enable) {
9440 		if (!needs_reset)
9441 			return 0;
9442 
9443 		if (!old_plane_crtc)
9444 			return 0;
9445 
9446 		old_crtc_state = drm_atomic_get_old_crtc_state(
9447 				state, old_plane_crtc);
9448 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9449 
9450 		if (!dm_old_crtc_state->stream)
9451 			return 0;
9452 
9453 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9454 				plane->base.id, old_plane_crtc->base.id);
9455 
9456 		ret = dm_atomic_get_state(state, &dm_state);
9457 		if (ret)
9458 			return ret;
9459 
9460 		if (!dc_remove_plane_from_context(
9461 				dc,
9462 				dm_old_crtc_state->stream,
9463 				dm_old_plane_state->dc_state,
9464 				dm_state->context)) {
9465 
9466 			return -EINVAL;
9467 		}
9468 
9469 
9470 		dc_plane_state_release(dm_old_plane_state->dc_state);
9471 		dm_new_plane_state->dc_state = NULL;
9472 
9473 		*lock_and_validation_needed = true;
9474 
9475 	} else { /* Add new planes */
9476 		struct dc_plane_state *dc_new_plane_state;
9477 
9478 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9479 			return 0;
9480 
9481 		if (!new_plane_crtc)
9482 			return 0;
9483 
9484 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9485 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9486 
9487 		if (!dm_new_crtc_state->stream)
9488 			return 0;
9489 
9490 		if (!needs_reset)
9491 			return 0;
9492 
9493 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9494 		if (ret)
9495 			return ret;
9496 
9497 		WARN_ON(dm_new_plane_state->dc_state);
9498 
9499 		dc_new_plane_state = dc_create_plane_state(dc);
9500 		if (!dc_new_plane_state)
9501 			return -ENOMEM;
9502 
9503 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9504 				plane->base.id, new_plane_crtc->base.id);
9505 
9506 		ret = fill_dc_plane_attributes(
9507 			drm_to_adev(new_plane_crtc->dev),
9508 			dc_new_plane_state,
9509 			new_plane_state,
9510 			new_crtc_state);
9511 		if (ret) {
9512 			dc_plane_state_release(dc_new_plane_state);
9513 			return ret;
9514 		}
9515 
9516 		ret = dm_atomic_get_state(state, &dm_state);
9517 		if (ret) {
9518 			dc_plane_state_release(dc_new_plane_state);
9519 			return ret;
9520 		}
9521 
9522 		/*
9523 		 * Any atomic check errors that occur after this will
9524 		 * not need a release. The plane state will be attached
9525 		 * to the stream, and therefore part of the atomic
9526 		 * state. It'll be released when the atomic state is
9527 		 * cleaned.
9528 		 */
9529 		if (!dc_add_plane_to_context(
9530 				dc,
9531 				dm_new_crtc_state->stream,
9532 				dc_new_plane_state,
9533 				dm_state->context)) {
9534 
9535 			dc_plane_state_release(dc_new_plane_state);
9536 			return -EINVAL;
9537 		}
9538 
9539 		dm_new_plane_state->dc_state = dc_new_plane_state;
9540 
9541 		/* Tell DC to do a full surface update every time there
9542 		 * is a plane change. Inefficient, but works for now.
9543 		 */
9544 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9545 
9546 		*lock_and_validation_needed = true;
9547 	}
9548 
9549 
9550 	return ret;
9551 }
9552 
9553 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9554 				struct drm_crtc *crtc,
9555 				struct drm_crtc_state *new_crtc_state)
9556 {
9557 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9558 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9559 
9560 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9561 	 * cursor per pipe but it's going to inherit the scaling and
9562 	 * positioning from the underlying pipe. Check the cursor plane's
9563 	 * blending properties match the primary plane's. */
9564 
9565 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9566 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9567 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9568 		return 0;
9569 	}
9570 
9571 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9572 			 (new_cursor_state->src_w >> 16);
9573 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9574 			 (new_cursor_state->src_h >> 16);
9575 
9576 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9577 			 (new_primary_state->src_w >> 16);
9578 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9579 			 (new_primary_state->src_h >> 16);
9580 
9581 	if (cursor_scale_w != primary_scale_w ||
9582 	    cursor_scale_h != primary_scale_h) {
9583 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9584 		return -EINVAL;
9585 	}
9586 
9587 	return 0;
9588 }
9589 
9590 #if defined(CONFIG_DRM_AMD_DC_DCN)
9591 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9592 {
9593 	struct drm_connector *connector;
9594 	struct drm_connector_state *conn_state;
9595 	struct amdgpu_dm_connector *aconnector = NULL;
9596 	int i;
9597 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9598 		if (conn_state->crtc != crtc)
9599 			continue;
9600 
9601 		aconnector = to_amdgpu_dm_connector(connector);
9602 		if (!aconnector->port || !aconnector->mst_port)
9603 			aconnector = NULL;
9604 		else
9605 			break;
9606 	}
9607 
9608 	if (!aconnector)
9609 		return 0;
9610 
9611 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9612 }
9613 #endif
9614 
9615 /**
9616  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9617  * @dev: The DRM device
9618  * @state: The atomic state to commit
9619  *
9620  * Validate that the given atomic state is programmable by DC into hardware.
9621  * This involves constructing a &struct dc_state reflecting the new hardware
9622  * state we wish to commit, then querying DC to see if it is programmable. It's
9623  * important not to modify the existing DC state. Otherwise, atomic_check
9624  * may unexpectedly commit hardware changes.
9625  *
9626  * When validating the DC state, it's important that the right locks are
9627  * acquired. For full updates case which removes/adds/updates streams on one
9628  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9629  * that any such full update commit will wait for completion of any outstanding
9630  * flip using DRMs synchronization events.
9631  *
9632  * Note that DM adds the affected connectors for all CRTCs in state, when that
9633  * might not seem necessary. This is because DC stream creation requires the
9634  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9635  * be possible but non-trivial - a possible TODO item.
9636  *
9637  * Return: -Error code if validation failed.
9638  */
9639 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9640 				  struct drm_atomic_state *state)
9641 {
9642 	struct amdgpu_device *adev = drm_to_adev(dev);
9643 	struct dm_atomic_state *dm_state = NULL;
9644 	struct dc *dc = adev->dm.dc;
9645 	struct drm_connector *connector;
9646 	struct drm_connector_state *old_con_state, *new_con_state;
9647 	struct drm_crtc *crtc;
9648 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9649 	struct drm_plane *plane;
9650 	struct drm_plane_state *old_plane_state, *new_plane_state;
9651 	enum dc_status status;
9652 	int ret, i;
9653 	bool lock_and_validation_needed = false;
9654 	struct dm_crtc_state *dm_old_crtc_state;
9655 
9656 	trace_amdgpu_dm_atomic_check_begin(state);
9657 
9658 	ret = drm_atomic_helper_check_modeset(dev, state);
9659 	if (ret)
9660 		goto fail;
9661 
9662 	/* Check connector changes */
9663 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9664 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9665 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9666 
9667 		/* Skip connectors that are disabled or part of modeset already. */
9668 		if (!old_con_state->crtc && !new_con_state->crtc)
9669 			continue;
9670 
9671 		if (!new_con_state->crtc)
9672 			continue;
9673 
9674 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9675 		if (IS_ERR(new_crtc_state)) {
9676 			ret = PTR_ERR(new_crtc_state);
9677 			goto fail;
9678 		}
9679 
9680 		if (dm_old_con_state->abm_level !=
9681 		    dm_new_con_state->abm_level)
9682 			new_crtc_state->connectors_changed = true;
9683 	}
9684 
9685 #if defined(CONFIG_DRM_AMD_DC_DCN)
9686 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9687 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9688 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9689 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9690 				if (ret)
9691 					goto fail;
9692 			}
9693 		}
9694 	}
9695 #endif
9696 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9697 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9698 
9699 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9700 		    !new_crtc_state->color_mgmt_changed &&
9701 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9702 			dm_old_crtc_state->dsc_force_changed == false)
9703 			continue;
9704 
9705 		if (!new_crtc_state->enable)
9706 			continue;
9707 
9708 		ret = drm_atomic_add_affected_connectors(state, crtc);
9709 		if (ret)
9710 			return ret;
9711 
9712 		ret = drm_atomic_add_affected_planes(state, crtc);
9713 		if (ret)
9714 			goto fail;
9715 
9716 		if (dm_old_crtc_state->dsc_force_changed)
9717 			new_crtc_state->mode_changed = true;
9718 	}
9719 
9720 	/*
9721 	 * Add all primary and overlay planes on the CRTC to the state
9722 	 * whenever a plane is enabled to maintain correct z-ordering
9723 	 * and to enable fast surface updates.
9724 	 */
9725 	drm_for_each_crtc(crtc, dev) {
9726 		bool modified = false;
9727 
9728 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9729 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9730 				continue;
9731 
9732 			if (new_plane_state->crtc == crtc ||
9733 			    old_plane_state->crtc == crtc) {
9734 				modified = true;
9735 				break;
9736 			}
9737 		}
9738 
9739 		if (!modified)
9740 			continue;
9741 
9742 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9743 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9744 				continue;
9745 
9746 			new_plane_state =
9747 				drm_atomic_get_plane_state(state, plane);
9748 
9749 			if (IS_ERR(new_plane_state)) {
9750 				ret = PTR_ERR(new_plane_state);
9751 				goto fail;
9752 			}
9753 		}
9754 	}
9755 
9756 	/* Remove exiting planes if they are modified */
9757 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9758 		ret = dm_update_plane_state(dc, state, plane,
9759 					    old_plane_state,
9760 					    new_plane_state,
9761 					    false,
9762 					    &lock_and_validation_needed);
9763 		if (ret)
9764 			goto fail;
9765 	}
9766 
9767 	/* Disable all crtcs which require disable */
9768 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9769 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9770 					   old_crtc_state,
9771 					   new_crtc_state,
9772 					   false,
9773 					   &lock_and_validation_needed);
9774 		if (ret)
9775 			goto fail;
9776 	}
9777 
9778 	/* Enable all crtcs which require enable */
9779 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9780 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9781 					   old_crtc_state,
9782 					   new_crtc_state,
9783 					   true,
9784 					   &lock_and_validation_needed);
9785 		if (ret)
9786 			goto fail;
9787 	}
9788 
9789 	/* Add new/modified planes */
9790 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9791 		ret = dm_update_plane_state(dc, state, plane,
9792 					    old_plane_state,
9793 					    new_plane_state,
9794 					    true,
9795 					    &lock_and_validation_needed);
9796 		if (ret)
9797 			goto fail;
9798 	}
9799 
9800 	/* Run this here since we want to validate the streams we created */
9801 	ret = drm_atomic_helper_check_planes(dev, state);
9802 	if (ret)
9803 		goto fail;
9804 
9805 	/* Check cursor planes scaling */
9806 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9807 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9808 		if (ret)
9809 			goto fail;
9810 	}
9811 
9812 	if (state->legacy_cursor_update) {
9813 		/*
9814 		 * This is a fast cursor update coming from the plane update
9815 		 * helper, check if it can be done asynchronously for better
9816 		 * performance.
9817 		 */
9818 		state->async_update =
9819 			!drm_atomic_helper_async_check(dev, state);
9820 
9821 		/*
9822 		 * Skip the remaining global validation if this is an async
9823 		 * update. Cursor updates can be done without affecting
9824 		 * state or bandwidth calcs and this avoids the performance
9825 		 * penalty of locking the private state object and
9826 		 * allocating a new dc_state.
9827 		 */
9828 		if (state->async_update)
9829 			return 0;
9830 	}
9831 
9832 	/* Check scaling and underscan changes*/
9833 	/* TODO Removed scaling changes validation due to inability to commit
9834 	 * new stream into context w\o causing full reset. Need to
9835 	 * decide how to handle.
9836 	 */
9837 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9838 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9839 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9840 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9841 
9842 		/* Skip any modesets/resets */
9843 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9844 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9845 			continue;
9846 
9847 		/* Skip any thing not scale or underscan changes */
9848 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9849 			continue;
9850 
9851 		lock_and_validation_needed = true;
9852 	}
9853 
9854 	/**
9855 	 * Streams and planes are reset when there are changes that affect
9856 	 * bandwidth. Anything that affects bandwidth needs to go through
9857 	 * DC global validation to ensure that the configuration can be applied
9858 	 * to hardware.
9859 	 *
9860 	 * We have to currently stall out here in atomic_check for outstanding
9861 	 * commits to finish in this case because our IRQ handlers reference
9862 	 * DRM state directly - we can end up disabling interrupts too early
9863 	 * if we don't.
9864 	 *
9865 	 * TODO: Remove this stall and drop DM state private objects.
9866 	 */
9867 	if (lock_and_validation_needed) {
9868 		ret = dm_atomic_get_state(state, &dm_state);
9869 		if (ret)
9870 			goto fail;
9871 
9872 		ret = do_aquire_global_lock(dev, state);
9873 		if (ret)
9874 			goto fail;
9875 
9876 #if defined(CONFIG_DRM_AMD_DC_DCN)
9877 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9878 			goto fail;
9879 
9880 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9881 		if (ret)
9882 			goto fail;
9883 #endif
9884 
9885 		/*
9886 		 * Perform validation of MST topology in the state:
9887 		 * We need to perform MST atomic check before calling
9888 		 * dc_validate_global_state(), or there is a chance
9889 		 * to get stuck in an infinite loop and hang eventually.
9890 		 */
9891 		ret = drm_dp_mst_atomic_check(state);
9892 		if (ret)
9893 			goto fail;
9894 		status = dc_validate_global_state(dc, dm_state->context, false);
9895 		if (status != DC_OK) {
9896 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9897 				       dc_status_to_str(status), status);
9898 			ret = -EINVAL;
9899 			goto fail;
9900 		}
9901 	} else {
9902 		/*
9903 		 * The commit is a fast update. Fast updates shouldn't change
9904 		 * the DC context, affect global validation, and can have their
9905 		 * commit work done in parallel with other commits not touching
9906 		 * the same resource. If we have a new DC context as part of
9907 		 * the DM atomic state from validation we need to free it and
9908 		 * retain the existing one instead.
9909 		 *
9910 		 * Furthermore, since the DM atomic state only contains the DC
9911 		 * context and can safely be annulled, we can free the state
9912 		 * and clear the associated private object now to free
9913 		 * some memory and avoid a possible use-after-free later.
9914 		 */
9915 
9916 		for (i = 0; i < state->num_private_objs; i++) {
9917 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9918 
9919 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9920 				int j = state->num_private_objs-1;
9921 
9922 				dm_atomic_destroy_state(obj,
9923 						state->private_objs[i].state);
9924 
9925 				/* If i is not at the end of the array then the
9926 				 * last element needs to be moved to where i was
9927 				 * before the array can safely be truncated.
9928 				 */
9929 				if (i != j)
9930 					state->private_objs[i] =
9931 						state->private_objs[j];
9932 
9933 				state->private_objs[j].ptr = NULL;
9934 				state->private_objs[j].state = NULL;
9935 				state->private_objs[j].old_state = NULL;
9936 				state->private_objs[j].new_state = NULL;
9937 
9938 				state->num_private_objs = j;
9939 				break;
9940 			}
9941 		}
9942 	}
9943 
9944 	/* Store the overall update type for use later in atomic check. */
9945 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9946 		struct dm_crtc_state *dm_new_crtc_state =
9947 			to_dm_crtc_state(new_crtc_state);
9948 
9949 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9950 							 UPDATE_TYPE_FULL :
9951 							 UPDATE_TYPE_FAST;
9952 	}
9953 
9954 	/* Must be success */
9955 	WARN_ON(ret);
9956 
9957 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9958 
9959 	return ret;
9960 
9961 fail:
9962 	if (ret == -EDEADLK)
9963 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9964 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9965 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9966 	else
9967 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9968 
9969 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9970 
9971 	return ret;
9972 }
9973 
9974 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9975 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9976 {
9977 	uint8_t dpcd_data;
9978 	bool capable = false;
9979 
9980 	if (amdgpu_dm_connector->dc_link &&
9981 		dm_helpers_dp_read_dpcd(
9982 				NULL,
9983 				amdgpu_dm_connector->dc_link,
9984 				DP_DOWN_STREAM_PORT_COUNT,
9985 				&dpcd_data,
9986 				sizeof(dpcd_data))) {
9987 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9988 	}
9989 
9990 	return capable;
9991 }
9992 
9993 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9994 		uint8_t *edid_ext, int len,
9995 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9996 {
9997 	int i;
9998 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9999 	struct dc *dc = adev->dm.dc;
10000 
10001 	/* send extension block to DMCU for parsing */
10002 	for (i = 0; i < len; i += 8) {
10003 		bool res;
10004 		int offset;
10005 
10006 		/* send 8 bytes a time */
10007 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10008 			return false;
10009 
10010 		if (i+8 == len) {
10011 			/* EDID block sent completed, expect result */
10012 			int version, min_rate, max_rate;
10013 
10014 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10015 			if (res) {
10016 				/* amd vsdb found */
10017 				vsdb_info->freesync_supported = 1;
10018 				vsdb_info->amd_vsdb_version = version;
10019 				vsdb_info->min_refresh_rate_hz = min_rate;
10020 				vsdb_info->max_refresh_rate_hz = max_rate;
10021 				return true;
10022 			}
10023 			/* not amd vsdb */
10024 			return false;
10025 		}
10026 
10027 		/* check for ack*/
10028 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10029 		if (!res)
10030 			return false;
10031 	}
10032 
10033 	return false;
10034 }
10035 
10036 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10037 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10038 {
10039 	uint8_t *edid_ext = NULL;
10040 	int i;
10041 	bool valid_vsdb_found = false;
10042 
10043 	/*----- drm_find_cea_extension() -----*/
10044 	/* No EDID or EDID extensions */
10045 	if (edid == NULL || edid->extensions == 0)
10046 		return -ENODEV;
10047 
10048 	/* Find CEA extension */
10049 	for (i = 0; i < edid->extensions; i++) {
10050 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10051 		if (edid_ext[0] == CEA_EXT)
10052 			break;
10053 	}
10054 
10055 	if (i == edid->extensions)
10056 		return -ENODEV;
10057 
10058 	/*----- cea_db_offsets() -----*/
10059 	if (edid_ext[0] != CEA_EXT)
10060 		return -ENODEV;
10061 
10062 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10063 
10064 	return valid_vsdb_found ? i : -ENODEV;
10065 }
10066 
10067 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10068 					struct edid *edid)
10069 {
10070 	int i = 0;
10071 	struct detailed_timing *timing;
10072 	struct detailed_non_pixel *data;
10073 	struct detailed_data_monitor_range *range;
10074 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10075 			to_amdgpu_dm_connector(connector);
10076 	struct dm_connector_state *dm_con_state = NULL;
10077 
10078 	struct drm_device *dev = connector->dev;
10079 	struct amdgpu_device *adev = drm_to_adev(dev);
10080 	bool freesync_capable = false;
10081 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10082 
10083 	if (!connector->state) {
10084 		DRM_ERROR("%s - Connector has no state", __func__);
10085 		goto update;
10086 	}
10087 
10088 	if (!edid) {
10089 		dm_con_state = to_dm_connector_state(connector->state);
10090 
10091 		amdgpu_dm_connector->min_vfreq = 0;
10092 		amdgpu_dm_connector->max_vfreq = 0;
10093 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10094 
10095 		goto update;
10096 	}
10097 
10098 	dm_con_state = to_dm_connector_state(connector->state);
10099 
10100 	if (!amdgpu_dm_connector->dc_sink) {
10101 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10102 		goto update;
10103 	}
10104 	if (!adev->dm.freesync_module)
10105 		goto update;
10106 
10107 
10108 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10109 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10110 		bool edid_check_required = false;
10111 
10112 		if (edid) {
10113 			edid_check_required = is_dp_capable_without_timing_msa(
10114 						adev->dm.dc,
10115 						amdgpu_dm_connector);
10116 		}
10117 
10118 		if (edid_check_required == true && (edid->version > 1 ||
10119 		   (edid->version == 1 && edid->revision > 1))) {
10120 			for (i = 0; i < 4; i++) {
10121 
10122 				timing	= &edid->detailed_timings[i];
10123 				data	= &timing->data.other_data;
10124 				range	= &data->data.range;
10125 				/*
10126 				 * Check if monitor has continuous frequency mode
10127 				 */
10128 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10129 					continue;
10130 				/*
10131 				 * Check for flag range limits only. If flag == 1 then
10132 				 * no additional timing information provided.
10133 				 * Default GTF, GTF Secondary curve and CVT are not
10134 				 * supported
10135 				 */
10136 				if (range->flags != 1)
10137 					continue;
10138 
10139 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10140 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10141 				amdgpu_dm_connector->pixel_clock_mhz =
10142 					range->pixel_clock_mhz * 10;
10143 
10144 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10145 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10146 
10147 				break;
10148 			}
10149 
10150 			if (amdgpu_dm_connector->max_vfreq -
10151 			    amdgpu_dm_connector->min_vfreq > 10) {
10152 
10153 				freesync_capable = true;
10154 			}
10155 		}
10156 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10157 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10158 		if (i >= 0 && vsdb_info.freesync_supported) {
10159 			timing  = &edid->detailed_timings[i];
10160 			data    = &timing->data.other_data;
10161 
10162 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10163 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10164 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10165 				freesync_capable = true;
10166 
10167 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10168 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10169 		}
10170 	}
10171 
10172 update:
10173 	if (dm_con_state)
10174 		dm_con_state->freesync_capable = freesync_capable;
10175 
10176 	if (connector->vrr_capable_property)
10177 		drm_connector_set_vrr_capable_property(connector,
10178 						       freesync_capable);
10179 }
10180 
10181 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10182 {
10183 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10184 
10185 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10186 		return;
10187 	if (link->type == dc_connection_none)
10188 		return;
10189 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10190 					dpcd_data, sizeof(dpcd_data))) {
10191 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10192 
10193 		if (dpcd_data[0] == 0) {
10194 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10195 			link->psr_settings.psr_feature_enabled = false;
10196 		} else {
10197 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10198 			link->psr_settings.psr_feature_enabled = true;
10199 		}
10200 
10201 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10202 	}
10203 }
10204 
10205 /*
10206  * amdgpu_dm_link_setup_psr() - configure psr link
10207  * @stream: stream state
10208  *
10209  * Return: true if success
10210  */
10211 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10212 {
10213 	struct dc_link *link = NULL;
10214 	struct psr_config psr_config = {0};
10215 	struct psr_context psr_context = {0};
10216 	bool ret = false;
10217 
10218 	if (stream == NULL)
10219 		return false;
10220 
10221 	link = stream->link;
10222 
10223 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10224 
10225 	if (psr_config.psr_version > 0) {
10226 		psr_config.psr_exit_link_training_required = 0x1;
10227 		psr_config.psr_frame_capture_indication_req = 0;
10228 		psr_config.psr_rfb_setup_time = 0x37;
10229 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10230 		psr_config.allow_smu_optimizations = 0x0;
10231 
10232 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10233 
10234 	}
10235 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10236 
10237 	return ret;
10238 }
10239 
10240 /*
10241  * amdgpu_dm_psr_enable() - enable psr f/w
10242  * @stream: stream state
10243  *
10244  * Return: true if success
10245  */
10246 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10247 {
10248 	struct dc_link *link = stream->link;
10249 	unsigned int vsync_rate_hz = 0;
10250 	struct dc_static_screen_params params = {0};
10251 	/* Calculate number of static frames before generating interrupt to
10252 	 * enter PSR.
10253 	 */
10254 	// Init fail safe of 2 frames static
10255 	unsigned int num_frames_static = 2;
10256 
10257 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10258 
10259 	vsync_rate_hz = div64_u64(div64_u64((
10260 			stream->timing.pix_clk_100hz * 100),
10261 			stream->timing.v_total),
10262 			stream->timing.h_total);
10263 
10264 	/* Round up
10265 	 * Calculate number of frames such that at least 30 ms of time has
10266 	 * passed.
10267 	 */
10268 	if (vsync_rate_hz != 0) {
10269 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10270 		num_frames_static = (30000 / frame_time_microsec) + 1;
10271 	}
10272 
10273 	params.triggers.cursor_update = true;
10274 	params.triggers.overlay_update = true;
10275 	params.triggers.surface_update = true;
10276 	params.num_frames = num_frames_static;
10277 
10278 	dc_stream_set_static_screen_params(link->ctx->dc,
10279 					   &stream, 1,
10280 					   &params);
10281 
10282 	return dc_link_set_psr_allow_active(link, true, false, false);
10283 }
10284 
10285 /*
10286  * amdgpu_dm_psr_disable() - disable psr f/w
10287  * @stream:  stream state
10288  *
10289  * Return: true if success
10290  */
10291 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10292 {
10293 
10294 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10295 
10296 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10297 }
10298 
10299 /*
10300  * amdgpu_dm_psr_disable() - disable psr f/w
10301  * if psr is enabled on any stream
10302  *
10303  * Return: true if success
10304  */
10305 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10306 {
10307 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10308 	return dc_set_psr_allow_active(dm->dc, false);
10309 }
10310 
10311 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10312 {
10313 	struct amdgpu_device *adev = drm_to_adev(dev);
10314 	struct dc *dc = adev->dm.dc;
10315 	int i;
10316 
10317 	mutex_lock(&adev->dm.dc_lock);
10318 	if (dc->current_state) {
10319 		for (i = 0; i < dc->current_state->stream_count; ++i)
10320 			dc->current_state->streams[i]
10321 				->triggered_crtc_reset.enabled =
10322 				adev->dm.force_timing_sync;
10323 
10324 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10325 		dc_trigger_sync(dc, dc->current_state);
10326 	}
10327 	mutex_unlock(&adev->dm.dc_lock);
10328 }
10329 
10330 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10331 		       uint32_t value, const char *func_name)
10332 {
10333 #ifdef DM_CHECK_ADDR_0
10334 	if (address == 0) {
10335 		DC_ERR("invalid register write. address = 0");
10336 		return;
10337 	}
10338 #endif
10339 	cgs_write_register(ctx->cgs_device, address, value);
10340 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10341 }
10342 
10343 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10344 			  const char *func_name)
10345 {
10346 	uint32_t value;
10347 #ifdef DM_CHECK_ADDR_0
10348 	if (address == 0) {
10349 		DC_ERR("invalid register read; address = 0\n");
10350 		return 0;
10351 	}
10352 #endif
10353 
10354 	if (ctx->dmub_srv &&
10355 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10356 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10357 		ASSERT(false);
10358 		return 0;
10359 	}
10360 
10361 	value = cgs_read_register(ctx->cgs_device, address);
10362 
10363 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10364 
10365 	return value;
10366 }
10367