1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static bool
219 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
220 				 struct drm_crtc_state *new_crtc_state);
221 /*
222  * dm_vblank_get_counter
223  *
224  * @brief
225  * Get counter for number of vertical blanks
226  *
227  * @param
228  * struct amdgpu_device *adev - [in] desired amdgpu device
229  * int disp_idx - [in] which CRTC to get the counter from
230  *
231  * @return
232  * Counter for vertical blanks
233  */
234 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
235 {
236 	if (crtc >= adev->mode_info.num_crtc)
237 		return 0;
238 	else {
239 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
240 
241 		if (acrtc->dm_irq_params.stream == NULL) {
242 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
243 				  crtc);
244 			return 0;
245 		}
246 
247 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
248 	}
249 }
250 
251 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
252 				  u32 *vbl, u32 *position)
253 {
254 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
255 
256 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257 		return -EINVAL;
258 	else {
259 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
260 
261 		if (acrtc->dm_irq_params.stream ==  NULL) {
262 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 				  crtc);
264 			return 0;
265 		}
266 
267 		/*
268 		 * TODO rework base driver to use values directly.
269 		 * for now parse it back into reg-format
270 		 */
271 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 					 &v_blank_start,
273 					 &v_blank_end,
274 					 &h_position,
275 					 &v_position);
276 
277 		*position = v_position | (h_position << 16);
278 		*vbl = v_blank_start | (v_blank_end << 16);
279 	}
280 
281 	return 0;
282 }
283 
284 static bool dm_is_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return true;
288 }
289 
290 static int dm_wait_for_idle(void *handle)
291 {
292 	/* XXX todo */
293 	return 0;
294 }
295 
296 static bool dm_check_soft_reset(void *handle)
297 {
298 	return false;
299 }
300 
301 static int dm_soft_reset(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static struct amdgpu_crtc *
308 get_crtc_by_otg_inst(struct amdgpu_device *adev,
309 		     int otg_inst)
310 {
311 	struct drm_device *dev = adev_to_drm(adev);
312 	struct drm_crtc *crtc;
313 	struct amdgpu_crtc *amdgpu_crtc;
314 
315 	if (WARN_ON(otg_inst == -1))
316 		return adev->mode_info.crtcs[0];
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	WARN_ON(!e);
396 
397 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398 
399 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
400 	if (!vrr_active ||
401 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402 				      &v_blank_end, &hpos, &vpos) ||
403 	    (vpos < v_blank_start)) {
404 		/* Update to correct count and vblank timestamp if racing with
405 		 * vblank irq. This also updates to the correct vblank timestamp
406 		 * even in VRR mode, as scanout is past the front-porch atm.
407 		 */
408 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409 
410 		/* Wake up userspace by sending the pageflip event with proper
411 		 * count and timestamp of vblank of flip completion.
412 		 */
413 		if (e) {
414 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415 
416 			/* Event sent, so done with vblank for this flip */
417 			drm_crtc_vblank_put(&amdgpu_crtc->base);
418 		}
419 	} else if (e) {
420 		/* VRR active and inside front-porch: vblank count and
421 		 * timestamp for pageflip event will only be up to date after
422 		 * drm_crtc_handle_vblank() has been executed from late vblank
423 		 * irq handler after start of back-porch (vline 0). We queue the
424 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
425 		 * updated timestamp and count, once it runs after us.
426 		 *
427 		 * We need to open-code this instead of using the helper
428 		 * drm_crtc_arm_vblank_event(), as that helper would
429 		 * call drm_crtc_accurate_vblank_count(), which we must
430 		 * not call in VRR mode while we are in front-porch!
431 		 */
432 
433 		/* sequence will be replaced by real count during send-out. */
434 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435 		e->pipe = amdgpu_crtc->crtc_id;
436 
437 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438 		e = NULL;
439 	}
440 
441 	/* Keep track of vblank of this flip for flip throttling. We use the
442 	 * cooked hw counter, as that one incremented at start of this vblank
443 	 * of pageflip completion, so last_flip_vblank is the forbidden count
444 	 * for queueing new pageflips if vsync + VRR is enabled.
445 	 */
446 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
447 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448 
449 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451 
452 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
454 		     vrr_active, (int) !e);
455 }
456 
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459 	struct common_irq_params *irq_params = interrupt_params;
460 	struct amdgpu_device *adev = irq_params->adev;
461 	struct amdgpu_crtc *acrtc;
462 	struct drm_device *drm_dev;
463 	struct drm_vblank_crtc *vblank;
464 	ktime_t frame_duration_ns, previous_timestamp;
465 	unsigned long flags;
466 	int vrr_active;
467 
468 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
469 
470 	if (acrtc) {
471 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
472 		drm_dev = acrtc->base.dev;
473 		vblank = &drm_dev->vblank[acrtc->base.index];
474 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
475 		frame_duration_ns = vblank->time - previous_timestamp;
476 
477 		if (frame_duration_ns > 0) {
478 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
479 						frame_duration_ns,
480 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
481 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
482 		}
483 
484 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
485 			      acrtc->crtc_id,
486 			      vrr_active);
487 
488 		/* Core vblank handling is done here after end of front-porch in
489 		 * vrr mode, as vblank timestamping will give valid results
490 		 * while now done after front-porch. This will also deliver
491 		 * page-flip completion events that have been queued to us
492 		 * if a pageflip happened inside front-porch.
493 		 */
494 		if (vrr_active) {
495 			drm_crtc_handle_vblank(&acrtc->base);
496 
497 			/* BTR processing for pre-DCE12 ASICs */
498 			if (acrtc->dm_irq_params.stream &&
499 			    adev->family < AMDGPU_FAMILY_AI) {
500 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
501 				mod_freesync_handle_v_update(
502 				    adev->dm.freesync_module,
503 				    acrtc->dm_irq_params.stream,
504 				    &acrtc->dm_irq_params.vrr_params);
505 
506 				dc_stream_adjust_vmin_vmax(
507 				    adev->dm.dc,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params.adjust);
510 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
511 			}
512 		}
513 	}
514 }
515 
516 /**
517  * dm_crtc_high_irq() - Handles CRTC interrupt
518  * @interrupt_params: used for determining the CRTC instance
519  *
520  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
521  * event handler.
522  */
523 static void dm_crtc_high_irq(void *interrupt_params)
524 {
525 	struct common_irq_params *irq_params = interrupt_params;
526 	struct amdgpu_device *adev = irq_params->adev;
527 	struct amdgpu_crtc *acrtc;
528 	unsigned long flags;
529 	int vrr_active;
530 
531 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
532 	if (!acrtc)
533 		return;
534 
535 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
536 
537 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
538 		      vrr_active, acrtc->dm_irq_params.active_planes);
539 
540 	/**
541 	 * Core vblank handling at start of front-porch is only possible
542 	 * in non-vrr mode, as only there vblank timestamping will give
543 	 * valid results while done in front-porch. Otherwise defer it
544 	 * to dm_vupdate_high_irq after end of front-porch.
545 	 */
546 	if (!vrr_active)
547 		drm_crtc_handle_vblank(&acrtc->base);
548 
549 	/**
550 	 * Following stuff must happen at start of vblank, for crc
551 	 * computation and below-the-range btr support in vrr mode.
552 	 */
553 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
554 
555 	/* BTR updates need to happen before VUPDATE on Vega and above. */
556 	if (adev->family < AMDGPU_FAMILY_AI)
557 		return;
558 
559 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
560 
561 	if (acrtc->dm_irq_params.stream &&
562 	    acrtc->dm_irq_params.vrr_params.supported &&
563 	    acrtc->dm_irq_params.freesync_config.state ==
564 		    VRR_STATE_ACTIVE_VARIABLE) {
565 		mod_freesync_handle_v_update(adev->dm.freesync_module,
566 					     acrtc->dm_irq_params.stream,
567 					     &acrtc->dm_irq_params.vrr_params);
568 
569 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
570 					   &acrtc->dm_irq_params.vrr_params.adjust);
571 	}
572 
573 	/*
574 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
575 	 * In that case, pageflip completion interrupts won't fire and pageflip
576 	 * completion events won't get delivered. Prevent this by sending
577 	 * pending pageflip events from here if a flip is still pending.
578 	 *
579 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
580 	 * avoid race conditions between flip programming and completion,
581 	 * which could cause too early flip completion events.
582 	 */
583 	if (adev->family >= AMDGPU_FAMILY_RV &&
584 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
585 	    acrtc->dm_irq_params.active_planes == 0) {
586 		if (acrtc->event) {
587 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
588 			acrtc->event = NULL;
589 			drm_crtc_vblank_put(&acrtc->base);
590 		}
591 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
592 	}
593 
594 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
595 }
596 
597 #if defined(CONFIG_DRM_AMD_DC_DCN)
598 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt_params: interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
607 {
608 	struct common_irq_params *irq_params = interrupt_params;
609 	struct amdgpu_device *adev = irq_params->adev;
610 	struct amdgpu_crtc *acrtc;
611 
612 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
613 
614 	if (!acrtc)
615 		return;
616 
617 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
618 }
619 #endif
620 
621 #define DMUB_TRACE_MAX_READ 64
622 /**
623  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
624  * @interrupt_params: used for determining the Outbox instance
625  *
626  * Handles the Outbox Interrupt
627  * event handler.
628  */
629 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
630 {
631 	struct dmub_notification notify;
632 	struct common_irq_params *irq_params = interrupt_params;
633 	struct amdgpu_device *adev = irq_params->adev;
634 	struct amdgpu_display_manager *dm = &adev->dm;
635 	struct dmcub_trace_buf_entry entry = { 0 };
636 	uint32_t count = 0;
637 
638 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
639 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
640 			do {
641 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
642 			} while (notify.pending_notification);
643 
644 			if (adev->dm.dmub_notify)
645 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
646 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
647 				complete(&adev->dm.dmub_aux_transfer_done);
648 			// TODO : HPD Implementation
649 
650 		} else {
651 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
652 		}
653 	}
654 
655 
656 	do {
657 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
658 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
659 							entry.param0, entry.param1);
660 
661 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
662 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
663 		} else
664 			break;
665 
666 		count++;
667 
668 	} while (count <= DMUB_TRACE_MAX_READ);
669 
670 	ASSERT(count <= DMUB_TRACE_MAX_READ);
671 }
672 #endif
673 
674 static int dm_set_clockgating_state(void *handle,
675 		  enum amd_clockgating_state state)
676 {
677 	return 0;
678 }
679 
680 static int dm_set_powergating_state(void *handle,
681 		  enum amd_powergating_state state)
682 {
683 	return 0;
684 }
685 
686 /* Prototypes of private functions */
687 static int dm_early_init(void* handle);
688 
689 /* Allocate memory for FBC compressed data  */
690 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
691 {
692 	struct drm_device *dev = connector->dev;
693 	struct amdgpu_device *adev = drm_to_adev(dev);
694 	struct dm_compressor_info *compressor = &adev->dm.compressor;
695 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
696 	struct drm_display_mode *mode;
697 	unsigned long max_size = 0;
698 
699 	if (adev->dm.dc->fbc_compressor == NULL)
700 		return;
701 
702 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
703 		return;
704 
705 	if (compressor->bo_ptr)
706 		return;
707 
708 
709 	list_for_each_entry(mode, &connector->modes, head) {
710 		if (max_size < mode->htotal * mode->vtotal)
711 			max_size = mode->htotal * mode->vtotal;
712 	}
713 
714 	if (max_size) {
715 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
716 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
717 			    &compressor->gpu_addr, &compressor->cpu_addr);
718 
719 		if (r)
720 			DRM_ERROR("DM: Failed to initialize FBC\n");
721 		else {
722 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
723 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
724 		}
725 
726 	}
727 
728 }
729 
730 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
731 					  int pipe, bool *enabled,
732 					  unsigned char *buf, int max_bytes)
733 {
734 	struct drm_device *dev = dev_get_drvdata(kdev);
735 	struct amdgpu_device *adev = drm_to_adev(dev);
736 	struct drm_connector *connector;
737 	struct drm_connector_list_iter conn_iter;
738 	struct amdgpu_dm_connector *aconnector;
739 	int ret = 0;
740 
741 	*enabled = false;
742 
743 	mutex_lock(&adev->dm.audio_lock);
744 
745 	drm_connector_list_iter_begin(dev, &conn_iter);
746 	drm_for_each_connector_iter(connector, &conn_iter) {
747 		aconnector = to_amdgpu_dm_connector(connector);
748 		if (aconnector->audio_inst != port)
749 			continue;
750 
751 		*enabled = true;
752 		ret = drm_eld_size(connector->eld);
753 		memcpy(buf, connector->eld, min(max_bytes, ret));
754 
755 		break;
756 	}
757 	drm_connector_list_iter_end(&conn_iter);
758 
759 	mutex_unlock(&adev->dm.audio_lock);
760 
761 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
762 
763 	return ret;
764 }
765 
766 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
767 	.get_eld = amdgpu_dm_audio_component_get_eld,
768 };
769 
770 static int amdgpu_dm_audio_component_bind(struct device *kdev,
771 				       struct device *hda_kdev, void *data)
772 {
773 	struct drm_device *dev = dev_get_drvdata(kdev);
774 	struct amdgpu_device *adev = drm_to_adev(dev);
775 	struct drm_audio_component *acomp = data;
776 
777 	acomp->ops = &amdgpu_dm_audio_component_ops;
778 	acomp->dev = kdev;
779 	adev->dm.audio_component = acomp;
780 
781 	return 0;
782 }
783 
784 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
785 					  struct device *hda_kdev, void *data)
786 {
787 	struct drm_device *dev = dev_get_drvdata(kdev);
788 	struct amdgpu_device *adev = drm_to_adev(dev);
789 	struct drm_audio_component *acomp = data;
790 
791 	acomp->ops = NULL;
792 	acomp->dev = NULL;
793 	adev->dm.audio_component = NULL;
794 }
795 
796 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
797 	.bind	= amdgpu_dm_audio_component_bind,
798 	.unbind	= amdgpu_dm_audio_component_unbind,
799 };
800 
801 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
802 {
803 	int i, ret;
804 
805 	if (!amdgpu_audio)
806 		return 0;
807 
808 	adev->mode_info.audio.enabled = true;
809 
810 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
811 
812 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
813 		adev->mode_info.audio.pin[i].channels = -1;
814 		adev->mode_info.audio.pin[i].rate = -1;
815 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
816 		adev->mode_info.audio.pin[i].status_bits = 0;
817 		adev->mode_info.audio.pin[i].category_code = 0;
818 		adev->mode_info.audio.pin[i].connected = false;
819 		adev->mode_info.audio.pin[i].id =
820 			adev->dm.dc->res_pool->audios[i]->inst;
821 		adev->mode_info.audio.pin[i].offset = 0;
822 	}
823 
824 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
825 	if (ret < 0)
826 		return ret;
827 
828 	adev->dm.audio_registered = true;
829 
830 	return 0;
831 }
832 
833 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
834 {
835 	if (!amdgpu_audio)
836 		return;
837 
838 	if (!adev->mode_info.audio.enabled)
839 		return;
840 
841 	if (adev->dm.audio_registered) {
842 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
843 		adev->dm.audio_registered = false;
844 	}
845 
846 	/* TODO: Disable audio? */
847 
848 	adev->mode_info.audio.enabled = false;
849 }
850 
851 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
852 {
853 	struct drm_audio_component *acomp = adev->dm.audio_component;
854 
855 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
856 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
857 
858 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
859 						 pin, -1);
860 	}
861 }
862 
863 static int dm_dmub_hw_init(struct amdgpu_device *adev)
864 {
865 	const struct dmcub_firmware_header_v1_0 *hdr;
866 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
867 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
868 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
869 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
870 	struct abm *abm = adev->dm.dc->res_pool->abm;
871 	struct dmub_srv_hw_params hw_params;
872 	enum dmub_status status;
873 	const unsigned char *fw_inst_const, *fw_bss_data;
874 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
875 	bool has_hw_support;
876 
877 	if (!dmub_srv)
878 		/* DMUB isn't supported on the ASIC. */
879 		return 0;
880 
881 	if (!fb_info) {
882 		DRM_ERROR("No framebuffer info for DMUB service.\n");
883 		return -EINVAL;
884 	}
885 
886 	if (!dmub_fw) {
887 		/* Firmware required for DMUB support. */
888 		DRM_ERROR("No firmware provided for DMUB.\n");
889 		return -EINVAL;
890 	}
891 
892 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
893 	if (status != DMUB_STATUS_OK) {
894 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
895 		return -EINVAL;
896 	}
897 
898 	if (!has_hw_support) {
899 		DRM_INFO("DMUB unsupported on ASIC\n");
900 		return 0;
901 	}
902 
903 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
904 
905 	fw_inst_const = dmub_fw->data +
906 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
907 			PSP_HEADER_BYTES;
908 
909 	fw_bss_data = dmub_fw->data +
910 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
911 		      le32_to_cpu(hdr->inst_const_bytes);
912 
913 	/* Copy firmware and bios info into FB memory. */
914 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
915 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
916 
917 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
918 
919 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
920 	 * amdgpu_ucode_init_single_fw will load dmub firmware
921 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
922 	 * will be done by dm_dmub_hw_init
923 	 */
924 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
925 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
926 				fw_inst_const_size);
927 	}
928 
929 	if (fw_bss_data_size)
930 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
931 		       fw_bss_data, fw_bss_data_size);
932 
933 	/* Copy firmware bios info into FB memory. */
934 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
935 	       adev->bios_size);
936 
937 	/* Reset regions that need to be reset. */
938 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
939 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
940 
941 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
942 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
943 
944 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
945 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
946 
947 	/* Initialize hardware. */
948 	memset(&hw_params, 0, sizeof(hw_params));
949 	hw_params.fb_base = adev->gmc.fb_start;
950 	hw_params.fb_offset = adev->gmc.aper_base;
951 
952 	/* backdoor load firmware and trigger dmub running */
953 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
954 		hw_params.load_inst_const = true;
955 
956 	if (dmcu)
957 		hw_params.psp_version = dmcu->psp_version;
958 
959 	for (i = 0; i < fb_info->num_fb; ++i)
960 		hw_params.fb[i] = &fb_info->fb[i];
961 
962 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
963 	if (status != DMUB_STATUS_OK) {
964 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
965 		return -EINVAL;
966 	}
967 
968 	/* Wait for firmware load to finish. */
969 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
970 	if (status != DMUB_STATUS_OK)
971 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
972 
973 	/* Init DMCU and ABM if available. */
974 	if (dmcu && abm) {
975 		dmcu->funcs->dmcu_init(dmcu);
976 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
977 	}
978 
979 	if (!adev->dm.dc->ctx->dmub_srv)
980 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
981 	if (!adev->dm.dc->ctx->dmub_srv) {
982 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
983 		return -ENOMEM;
984 	}
985 
986 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
987 		 adev->dm.dmcub_fw_version);
988 
989 	return 0;
990 }
991 
992 #if defined(CONFIG_DRM_AMD_DC_DCN)
993 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
994 {
995 	uint64_t pt_base;
996 	uint32_t logical_addr_low;
997 	uint32_t logical_addr_high;
998 	uint32_t agp_base, agp_bot, agp_top;
999 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1000 
1001 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1002 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1003 
1004 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1005 		/*
1006 		 * Raven2 has a HW issue that it is unable to use the vram which
1007 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1008 		 * workaround that increase system aperture high address (add 1)
1009 		 * to get rid of the VM fault and hardware hang.
1010 		 */
1011 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1012 	else
1013 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1014 
1015 	agp_base = 0;
1016 	agp_bot = adev->gmc.agp_start >> 24;
1017 	agp_top = adev->gmc.agp_end >> 24;
1018 
1019 
1020 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1021 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1022 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1023 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1024 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1025 	page_table_base.low_part = lower_32_bits(pt_base);
1026 
1027 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1028 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1029 
1030 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1031 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1032 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1033 
1034 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1035 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1036 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1037 
1038 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1039 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1040 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1041 
1042 	pa_config->is_hvm_enabled = 0;
1043 
1044 }
1045 #endif
1046 #if defined(CONFIG_DRM_AMD_DC_DCN)
1047 static void vblank_control_worker(struct work_struct *work)
1048 {
1049 	struct vblank_control_work *vblank_work =
1050 		container_of(work, struct vblank_control_work, work);
1051 	struct amdgpu_display_manager *dm = vblank_work->dm;
1052 
1053 	mutex_lock(&dm->dc_lock);
1054 
1055 	if (vblank_work->enable)
1056 		dm->active_vblank_irq_count++;
1057 	else if(dm->active_vblank_irq_count)
1058 		dm->active_vblank_irq_count--;
1059 
1060 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1061 
1062 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1063 
1064 	/* Control PSR based on vblank requirements from OS */
1065 	if (vblank_work->stream && vblank_work->stream->link) {
1066 		if (vblank_work->enable) {
1067 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1068 				amdgpu_dm_psr_disable(vblank_work->stream);
1069 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1070 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1071 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1072 			amdgpu_dm_psr_enable(vblank_work->stream);
1073 		}
1074 	}
1075 
1076 	mutex_unlock(&dm->dc_lock);
1077 
1078 	dc_stream_release(vblank_work->stream);
1079 
1080 	kfree(vblank_work);
1081 }
1082 
1083 #endif
1084 static int amdgpu_dm_init(struct amdgpu_device *adev)
1085 {
1086 	struct dc_init_data init_data;
1087 #ifdef CONFIG_DRM_AMD_DC_HDCP
1088 	struct dc_callback_init init_params;
1089 #endif
1090 	int r;
1091 
1092 	adev->dm.ddev = adev_to_drm(adev);
1093 	adev->dm.adev = adev;
1094 
1095 	/* Zero all the fields */
1096 	memset(&init_data, 0, sizeof(init_data));
1097 #ifdef CONFIG_DRM_AMD_DC_HDCP
1098 	memset(&init_params, 0, sizeof(init_params));
1099 #endif
1100 
1101 	mutex_init(&adev->dm.dc_lock);
1102 	mutex_init(&adev->dm.audio_lock);
1103 #if defined(CONFIG_DRM_AMD_DC_DCN)
1104 	spin_lock_init(&adev->dm.vblank_lock);
1105 #endif
1106 
1107 	if(amdgpu_dm_irq_init(adev)) {
1108 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1109 		goto error;
1110 	}
1111 
1112 	init_data.asic_id.chip_family = adev->family;
1113 
1114 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1115 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1116 
1117 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1118 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1119 	init_data.asic_id.atombios_base_address =
1120 		adev->mode_info.atom_context->bios;
1121 
1122 	init_data.driver = adev;
1123 
1124 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1125 
1126 	if (!adev->dm.cgs_device) {
1127 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1128 		goto error;
1129 	}
1130 
1131 	init_data.cgs_device = adev->dm.cgs_device;
1132 
1133 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1134 
1135 	switch (adev->asic_type) {
1136 	case CHIP_CARRIZO:
1137 	case CHIP_STONEY:
1138 	case CHIP_RAVEN:
1139 	case CHIP_RENOIR:
1140 		init_data.flags.gpu_vm_support = true;
1141 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1142 			init_data.flags.disable_dmcu = true;
1143 		break;
1144 	case CHIP_VANGOGH:
1145 	case CHIP_YELLOW_CARP:
1146 		init_data.flags.gpu_vm_support = true;
1147 		break;
1148 	default:
1149 		break;
1150 	}
1151 
1152 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1153 		init_data.flags.fbc_support = true;
1154 
1155 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1156 		init_data.flags.multi_mon_pp_mclk_switch = true;
1157 
1158 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1159 		init_data.flags.disable_fractional_pwm = true;
1160 
1161 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1162 		init_data.flags.edp_no_power_sequencing = true;
1163 
1164 	init_data.flags.power_down_display_on_boot = true;
1165 
1166 	INIT_LIST_HEAD(&adev->dm.da_list);
1167 	/* Display Core create. */
1168 	adev->dm.dc = dc_create(&init_data);
1169 
1170 	if (adev->dm.dc) {
1171 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1172 	} else {
1173 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1174 		goto error;
1175 	}
1176 
1177 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1178 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1179 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1180 	}
1181 
1182 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1183 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1184 
1185 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1186 		adev->dm.dc->debug.disable_stutter = true;
1187 
1188 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1189 		adev->dm.dc->debug.disable_dsc = true;
1190 
1191 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1192 		adev->dm.dc->debug.disable_clock_gate = true;
1193 
1194 	r = dm_dmub_hw_init(adev);
1195 	if (r) {
1196 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1197 		goto error;
1198 	}
1199 
1200 	dc_hardware_init(adev->dm.dc);
1201 
1202 #if defined(CONFIG_DRM_AMD_DC_DCN)
1203 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1204 		struct dc_phy_addr_space_config pa_config;
1205 
1206 		mmhub_read_system_context(adev, &pa_config);
1207 
1208 		// Call the DC init_memory func
1209 		dc_setup_system_context(adev->dm.dc, &pa_config);
1210 	}
1211 #endif
1212 
1213 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1214 	if (!adev->dm.freesync_module) {
1215 		DRM_ERROR(
1216 		"amdgpu: failed to initialize freesync_module.\n");
1217 	} else
1218 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1219 				adev->dm.freesync_module);
1220 
1221 	amdgpu_dm_init_color_mod();
1222 
1223 #if defined(CONFIG_DRM_AMD_DC_DCN)
1224 	if (adev->dm.dc->caps.max_links > 0) {
1225 		adev->dm.vblank_control_workqueue =
1226 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1227 		if (!adev->dm.vblank_control_workqueue)
1228 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1229 	}
1230 #endif
1231 
1232 #ifdef CONFIG_DRM_AMD_DC_HDCP
1233 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1234 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1235 
1236 		if (!adev->dm.hdcp_workqueue)
1237 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1238 		else
1239 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1240 
1241 		dc_init_callbacks(adev->dm.dc, &init_params);
1242 	}
1243 #endif
1244 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1245 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1246 #endif
1247 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1248 		init_completion(&adev->dm.dmub_aux_transfer_done);
1249 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1250 		if (!adev->dm.dmub_notify) {
1251 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1252 			goto error;
1253 		}
1254 		amdgpu_dm_outbox_init(adev);
1255 	}
1256 
1257 	if (amdgpu_dm_initialize_drm_device(adev)) {
1258 		DRM_ERROR(
1259 		"amdgpu: failed to initialize sw for display support.\n");
1260 		goto error;
1261 	}
1262 
1263 	/* create fake encoders for MST */
1264 	dm_dp_create_fake_mst_encoders(adev);
1265 
1266 	/* TODO: Add_display_info? */
1267 
1268 	/* TODO use dynamic cursor width */
1269 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1270 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1271 
1272 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1273 		DRM_ERROR(
1274 		"amdgpu: failed to initialize sw for display support.\n");
1275 		goto error;
1276 	}
1277 
1278 
1279 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1280 
1281 	return 0;
1282 error:
1283 	amdgpu_dm_fini(adev);
1284 
1285 	return -EINVAL;
1286 }
1287 
1288 static int amdgpu_dm_early_fini(void *handle)
1289 {
1290 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1291 
1292 	amdgpu_dm_audio_fini(adev);
1293 
1294 	return 0;
1295 }
1296 
1297 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1298 {
1299 	int i;
1300 
1301 #if defined(CONFIG_DRM_AMD_DC_DCN)
1302 	if (adev->dm.vblank_control_workqueue) {
1303 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1304 		adev->dm.vblank_control_workqueue = NULL;
1305 	}
1306 #endif
1307 
1308 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1309 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1310 	}
1311 
1312 	amdgpu_dm_destroy_drm_device(&adev->dm);
1313 
1314 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1315 	if (adev->dm.crc_rd_wrk) {
1316 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1317 		kfree(adev->dm.crc_rd_wrk);
1318 		adev->dm.crc_rd_wrk = NULL;
1319 	}
1320 #endif
1321 #ifdef CONFIG_DRM_AMD_DC_HDCP
1322 	if (adev->dm.hdcp_workqueue) {
1323 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1324 		adev->dm.hdcp_workqueue = NULL;
1325 	}
1326 
1327 	if (adev->dm.dc)
1328 		dc_deinit_callbacks(adev->dm.dc);
1329 #endif
1330 
1331 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1332 
1333 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1334 		kfree(adev->dm.dmub_notify);
1335 		adev->dm.dmub_notify = NULL;
1336 	}
1337 
1338 	if (adev->dm.dmub_bo)
1339 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1340 				      &adev->dm.dmub_bo_gpu_addr,
1341 				      &adev->dm.dmub_bo_cpu_addr);
1342 
1343 	/* DC Destroy TODO: Replace destroy DAL */
1344 	if (adev->dm.dc)
1345 		dc_destroy(&adev->dm.dc);
1346 	/*
1347 	 * TODO: pageflip, vlank interrupt
1348 	 *
1349 	 * amdgpu_dm_irq_fini(adev);
1350 	 */
1351 
1352 	if (adev->dm.cgs_device) {
1353 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1354 		adev->dm.cgs_device = NULL;
1355 	}
1356 	if (adev->dm.freesync_module) {
1357 		mod_freesync_destroy(adev->dm.freesync_module);
1358 		adev->dm.freesync_module = NULL;
1359 	}
1360 
1361 	mutex_destroy(&adev->dm.audio_lock);
1362 	mutex_destroy(&adev->dm.dc_lock);
1363 
1364 	return;
1365 }
1366 
1367 static int load_dmcu_fw(struct amdgpu_device *adev)
1368 {
1369 	const char *fw_name_dmcu = NULL;
1370 	int r;
1371 	const struct dmcu_firmware_header_v1_0 *hdr;
1372 
1373 	switch(adev->asic_type) {
1374 #if defined(CONFIG_DRM_AMD_DC_SI)
1375 	case CHIP_TAHITI:
1376 	case CHIP_PITCAIRN:
1377 	case CHIP_VERDE:
1378 	case CHIP_OLAND:
1379 #endif
1380 	case CHIP_BONAIRE:
1381 	case CHIP_HAWAII:
1382 	case CHIP_KAVERI:
1383 	case CHIP_KABINI:
1384 	case CHIP_MULLINS:
1385 	case CHIP_TONGA:
1386 	case CHIP_FIJI:
1387 	case CHIP_CARRIZO:
1388 	case CHIP_STONEY:
1389 	case CHIP_POLARIS11:
1390 	case CHIP_POLARIS10:
1391 	case CHIP_POLARIS12:
1392 	case CHIP_VEGAM:
1393 	case CHIP_VEGA10:
1394 	case CHIP_VEGA12:
1395 	case CHIP_VEGA20:
1396 	case CHIP_NAVI10:
1397 	case CHIP_NAVI14:
1398 	case CHIP_RENOIR:
1399 	case CHIP_SIENNA_CICHLID:
1400 	case CHIP_NAVY_FLOUNDER:
1401 	case CHIP_DIMGREY_CAVEFISH:
1402 	case CHIP_BEIGE_GOBY:
1403 	case CHIP_VANGOGH:
1404 	case CHIP_YELLOW_CARP:
1405 		return 0;
1406 	case CHIP_NAVI12:
1407 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1408 		break;
1409 	case CHIP_RAVEN:
1410 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1411 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1412 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1413 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1414 		else
1415 			return 0;
1416 		break;
1417 	default:
1418 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1419 		return -EINVAL;
1420 	}
1421 
1422 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1423 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1424 		return 0;
1425 	}
1426 
1427 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1428 	if (r == -ENOENT) {
1429 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1430 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1431 		adev->dm.fw_dmcu = NULL;
1432 		return 0;
1433 	}
1434 	if (r) {
1435 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1436 			fw_name_dmcu);
1437 		return r;
1438 	}
1439 
1440 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1441 	if (r) {
1442 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1443 			fw_name_dmcu);
1444 		release_firmware(adev->dm.fw_dmcu);
1445 		adev->dm.fw_dmcu = NULL;
1446 		return r;
1447 	}
1448 
1449 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1450 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1451 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1452 	adev->firmware.fw_size +=
1453 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1454 
1455 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1456 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1457 	adev->firmware.fw_size +=
1458 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1459 
1460 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1461 
1462 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1463 
1464 	return 0;
1465 }
1466 
1467 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1468 {
1469 	struct amdgpu_device *adev = ctx;
1470 
1471 	return dm_read_reg(adev->dm.dc->ctx, address);
1472 }
1473 
1474 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1475 				     uint32_t value)
1476 {
1477 	struct amdgpu_device *adev = ctx;
1478 
1479 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1480 }
1481 
1482 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1483 {
1484 	struct dmub_srv_create_params create_params;
1485 	struct dmub_srv_region_params region_params;
1486 	struct dmub_srv_region_info region_info;
1487 	struct dmub_srv_fb_params fb_params;
1488 	struct dmub_srv_fb_info *fb_info;
1489 	struct dmub_srv *dmub_srv;
1490 	const struct dmcub_firmware_header_v1_0 *hdr;
1491 	const char *fw_name_dmub;
1492 	enum dmub_asic dmub_asic;
1493 	enum dmub_status status;
1494 	int r;
1495 
1496 	switch (adev->asic_type) {
1497 	case CHIP_RENOIR:
1498 		dmub_asic = DMUB_ASIC_DCN21;
1499 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1500 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1501 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1502 		break;
1503 	case CHIP_SIENNA_CICHLID:
1504 		dmub_asic = DMUB_ASIC_DCN30;
1505 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1506 		break;
1507 	case CHIP_NAVY_FLOUNDER:
1508 		dmub_asic = DMUB_ASIC_DCN30;
1509 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1510 		break;
1511 	case CHIP_VANGOGH:
1512 		dmub_asic = DMUB_ASIC_DCN301;
1513 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1514 		break;
1515 	case CHIP_DIMGREY_CAVEFISH:
1516 		dmub_asic = DMUB_ASIC_DCN302;
1517 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1518 		break;
1519 	case CHIP_BEIGE_GOBY:
1520 		dmub_asic = DMUB_ASIC_DCN303;
1521 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1522 		break;
1523 	case CHIP_YELLOW_CARP:
1524 		dmub_asic = DMUB_ASIC_DCN31;
1525 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1526 		break;
1527 
1528 	default:
1529 		/* ASIC doesn't support DMUB. */
1530 		return 0;
1531 	}
1532 
1533 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1534 	if (r) {
1535 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1536 		return 0;
1537 	}
1538 
1539 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1540 	if (r) {
1541 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1542 		return 0;
1543 	}
1544 
1545 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1546 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1547 
1548 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1549 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1550 			AMDGPU_UCODE_ID_DMCUB;
1551 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1552 			adev->dm.dmub_fw;
1553 		adev->firmware.fw_size +=
1554 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1555 
1556 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1557 			 adev->dm.dmcub_fw_version);
1558 	}
1559 
1560 
1561 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1562 	dmub_srv = adev->dm.dmub_srv;
1563 
1564 	if (!dmub_srv) {
1565 		DRM_ERROR("Failed to allocate DMUB service!\n");
1566 		return -ENOMEM;
1567 	}
1568 
1569 	memset(&create_params, 0, sizeof(create_params));
1570 	create_params.user_ctx = adev;
1571 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1572 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1573 	create_params.asic = dmub_asic;
1574 
1575 	/* Create the DMUB service. */
1576 	status = dmub_srv_create(dmub_srv, &create_params);
1577 	if (status != DMUB_STATUS_OK) {
1578 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1579 		return -EINVAL;
1580 	}
1581 
1582 	/* Calculate the size of all the regions for the DMUB service. */
1583 	memset(&region_params, 0, sizeof(region_params));
1584 
1585 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1586 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1587 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1588 	region_params.vbios_size = adev->bios_size;
1589 	region_params.fw_bss_data = region_params.bss_data_size ?
1590 		adev->dm.dmub_fw->data +
1591 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1592 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1593 	region_params.fw_inst_const =
1594 		adev->dm.dmub_fw->data +
1595 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1596 		PSP_HEADER_BYTES;
1597 
1598 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1599 					   &region_info);
1600 
1601 	if (status != DMUB_STATUS_OK) {
1602 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1603 		return -EINVAL;
1604 	}
1605 
1606 	/*
1607 	 * Allocate a framebuffer based on the total size of all the regions.
1608 	 * TODO: Move this into GART.
1609 	 */
1610 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1611 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1612 				    &adev->dm.dmub_bo_gpu_addr,
1613 				    &adev->dm.dmub_bo_cpu_addr);
1614 	if (r)
1615 		return r;
1616 
1617 	/* Rebase the regions on the framebuffer address. */
1618 	memset(&fb_params, 0, sizeof(fb_params));
1619 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1620 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1621 	fb_params.region_info = &region_info;
1622 
1623 	adev->dm.dmub_fb_info =
1624 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1625 	fb_info = adev->dm.dmub_fb_info;
1626 
1627 	if (!fb_info) {
1628 		DRM_ERROR(
1629 			"Failed to allocate framebuffer info for DMUB service!\n");
1630 		return -ENOMEM;
1631 	}
1632 
1633 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1634 	if (status != DMUB_STATUS_OK) {
1635 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1636 		return -EINVAL;
1637 	}
1638 
1639 	return 0;
1640 }
1641 
1642 static int dm_sw_init(void *handle)
1643 {
1644 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1645 	int r;
1646 
1647 	r = dm_dmub_sw_init(adev);
1648 	if (r)
1649 		return r;
1650 
1651 	return load_dmcu_fw(adev);
1652 }
1653 
1654 static int dm_sw_fini(void *handle)
1655 {
1656 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657 
1658 	kfree(adev->dm.dmub_fb_info);
1659 	adev->dm.dmub_fb_info = NULL;
1660 
1661 	if (adev->dm.dmub_srv) {
1662 		dmub_srv_destroy(adev->dm.dmub_srv);
1663 		adev->dm.dmub_srv = NULL;
1664 	}
1665 
1666 	release_firmware(adev->dm.dmub_fw);
1667 	adev->dm.dmub_fw = NULL;
1668 
1669 	release_firmware(adev->dm.fw_dmcu);
1670 	adev->dm.fw_dmcu = NULL;
1671 
1672 	return 0;
1673 }
1674 
1675 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1676 {
1677 	struct amdgpu_dm_connector *aconnector;
1678 	struct drm_connector *connector;
1679 	struct drm_connector_list_iter iter;
1680 	int ret = 0;
1681 
1682 	drm_connector_list_iter_begin(dev, &iter);
1683 	drm_for_each_connector_iter(connector, &iter) {
1684 		aconnector = to_amdgpu_dm_connector(connector);
1685 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1686 		    aconnector->mst_mgr.aux) {
1687 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1688 					 aconnector,
1689 					 aconnector->base.base.id);
1690 
1691 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1692 			if (ret < 0) {
1693 				DRM_ERROR("DM_MST: Failed to start MST\n");
1694 				aconnector->dc_link->type =
1695 					dc_connection_single;
1696 				break;
1697 			}
1698 		}
1699 	}
1700 	drm_connector_list_iter_end(&iter);
1701 
1702 	return ret;
1703 }
1704 
1705 static int dm_late_init(void *handle)
1706 {
1707 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1708 
1709 	struct dmcu_iram_parameters params;
1710 	unsigned int linear_lut[16];
1711 	int i;
1712 	struct dmcu *dmcu = NULL;
1713 
1714 	dmcu = adev->dm.dc->res_pool->dmcu;
1715 
1716 	for (i = 0; i < 16; i++)
1717 		linear_lut[i] = 0xFFFF * i / 15;
1718 
1719 	params.set = 0;
1720 	params.backlight_ramping_start = 0xCCCC;
1721 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1722 	params.backlight_lut_array_size = 16;
1723 	params.backlight_lut_array = linear_lut;
1724 
1725 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1726 	 * 0xFFFF x 0.01 = 0x28F
1727 	 */
1728 	params.min_abm_backlight = 0x28F;
1729 	/* In the case where abm is implemented on dmcub,
1730 	* dmcu object will be null.
1731 	* ABM 2.4 and up are implemented on dmcub.
1732 	*/
1733 	if (dmcu) {
1734 		if (!dmcu_load_iram(dmcu, params))
1735 			return -EINVAL;
1736 	} else if (adev->dm.dc->ctx->dmub_srv) {
1737 		struct dc_link *edp_links[MAX_NUM_EDP];
1738 		int edp_num;
1739 
1740 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1741 		for (i = 0; i < edp_num; i++) {
1742 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1743 				return -EINVAL;
1744 		}
1745 	}
1746 
1747 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1748 }
1749 
1750 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1751 {
1752 	struct amdgpu_dm_connector *aconnector;
1753 	struct drm_connector *connector;
1754 	struct drm_connector_list_iter iter;
1755 	struct drm_dp_mst_topology_mgr *mgr;
1756 	int ret;
1757 	bool need_hotplug = false;
1758 
1759 	drm_connector_list_iter_begin(dev, &iter);
1760 	drm_for_each_connector_iter(connector, &iter) {
1761 		aconnector = to_amdgpu_dm_connector(connector);
1762 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1763 		    aconnector->mst_port)
1764 			continue;
1765 
1766 		mgr = &aconnector->mst_mgr;
1767 
1768 		if (suspend) {
1769 			drm_dp_mst_topology_mgr_suspend(mgr);
1770 		} else {
1771 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1772 			if (ret < 0) {
1773 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1774 				need_hotplug = true;
1775 			}
1776 		}
1777 	}
1778 	drm_connector_list_iter_end(&iter);
1779 
1780 	if (need_hotplug)
1781 		drm_kms_helper_hotplug_event(dev);
1782 }
1783 
1784 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1785 {
1786 	struct smu_context *smu = &adev->smu;
1787 	int ret = 0;
1788 
1789 	if (!is_support_sw_smu(adev))
1790 		return 0;
1791 
1792 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1793 	 * on window driver dc implementation.
1794 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1795 	 * should be passed to smu during boot up and resume from s3.
1796 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1797 	 * dcn20_resource_construct
1798 	 * then call pplib functions below to pass the settings to smu:
1799 	 * smu_set_watermarks_for_clock_ranges
1800 	 * smu_set_watermarks_table
1801 	 * navi10_set_watermarks_table
1802 	 * smu_write_watermarks_table
1803 	 *
1804 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1805 	 * dc has implemented different flow for window driver:
1806 	 * dc_hardware_init / dc_set_power_state
1807 	 * dcn10_init_hw
1808 	 * notify_wm_ranges
1809 	 * set_wm_ranges
1810 	 * -- Linux
1811 	 * smu_set_watermarks_for_clock_ranges
1812 	 * renoir_set_watermarks_table
1813 	 * smu_write_watermarks_table
1814 	 *
1815 	 * For Linux,
1816 	 * dc_hardware_init -> amdgpu_dm_init
1817 	 * dc_set_power_state --> dm_resume
1818 	 *
1819 	 * therefore, this function apply to navi10/12/14 but not Renoir
1820 	 * *
1821 	 */
1822 	switch(adev->asic_type) {
1823 	case CHIP_NAVI10:
1824 	case CHIP_NAVI14:
1825 	case CHIP_NAVI12:
1826 		break;
1827 	default:
1828 		return 0;
1829 	}
1830 
1831 	ret = smu_write_watermarks_table(smu);
1832 	if (ret) {
1833 		DRM_ERROR("Failed to update WMTABLE!\n");
1834 		return ret;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
1840 /**
1841  * dm_hw_init() - Initialize DC device
1842  * @handle: The base driver device containing the amdgpu_dm device.
1843  *
1844  * Initialize the &struct amdgpu_display_manager device. This involves calling
1845  * the initializers of each DM component, then populating the struct with them.
1846  *
1847  * Although the function implies hardware initialization, both hardware and
1848  * software are initialized here. Splitting them out to their relevant init
1849  * hooks is a future TODO item.
1850  *
1851  * Some notable things that are initialized here:
1852  *
1853  * - Display Core, both software and hardware
1854  * - DC modules that we need (freesync and color management)
1855  * - DRM software states
1856  * - Interrupt sources and handlers
1857  * - Vblank support
1858  * - Debug FS entries, if enabled
1859  */
1860 static int dm_hw_init(void *handle)
1861 {
1862 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1863 	/* Create DAL display manager */
1864 	amdgpu_dm_init(adev);
1865 	amdgpu_dm_hpd_init(adev);
1866 
1867 	return 0;
1868 }
1869 
1870 /**
1871  * dm_hw_fini() - Teardown DC device
1872  * @handle: The base driver device containing the amdgpu_dm device.
1873  *
1874  * Teardown components within &struct amdgpu_display_manager that require
1875  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1876  * were loaded. Also flush IRQ workqueues and disable them.
1877  */
1878 static int dm_hw_fini(void *handle)
1879 {
1880 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1881 
1882 	amdgpu_dm_hpd_fini(adev);
1883 
1884 	amdgpu_dm_irq_fini(adev);
1885 	amdgpu_dm_fini(adev);
1886 	return 0;
1887 }
1888 
1889 
1890 static int dm_enable_vblank(struct drm_crtc *crtc);
1891 static void dm_disable_vblank(struct drm_crtc *crtc);
1892 
1893 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1894 				 struct dc_state *state, bool enable)
1895 {
1896 	enum dc_irq_source irq_source;
1897 	struct amdgpu_crtc *acrtc;
1898 	int rc = -EBUSY;
1899 	int i = 0;
1900 
1901 	for (i = 0; i < state->stream_count; i++) {
1902 		acrtc = get_crtc_by_otg_inst(
1903 				adev, state->stream_status[i].primary_otg_inst);
1904 
1905 		if (acrtc && state->stream_status[i].plane_count != 0) {
1906 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1907 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1908 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1909 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1910 			if (rc)
1911 				DRM_WARN("Failed to %s pflip interrupts\n",
1912 					 enable ? "enable" : "disable");
1913 
1914 			if (enable) {
1915 				rc = dm_enable_vblank(&acrtc->base);
1916 				if (rc)
1917 					DRM_WARN("Failed to enable vblank interrupts\n");
1918 			} else {
1919 				dm_disable_vblank(&acrtc->base);
1920 			}
1921 
1922 		}
1923 	}
1924 
1925 }
1926 
1927 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1928 {
1929 	struct dc_state *context = NULL;
1930 	enum dc_status res = DC_ERROR_UNEXPECTED;
1931 	int i;
1932 	struct dc_stream_state *del_streams[MAX_PIPES];
1933 	int del_streams_count = 0;
1934 
1935 	memset(del_streams, 0, sizeof(del_streams));
1936 
1937 	context = dc_create_state(dc);
1938 	if (context == NULL)
1939 		goto context_alloc_fail;
1940 
1941 	dc_resource_state_copy_construct_current(dc, context);
1942 
1943 	/* First remove from context all streams */
1944 	for (i = 0; i < context->stream_count; i++) {
1945 		struct dc_stream_state *stream = context->streams[i];
1946 
1947 		del_streams[del_streams_count++] = stream;
1948 	}
1949 
1950 	/* Remove all planes for removed streams and then remove the streams */
1951 	for (i = 0; i < del_streams_count; i++) {
1952 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1953 			res = DC_FAIL_DETACH_SURFACES;
1954 			goto fail;
1955 		}
1956 
1957 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1958 		if (res != DC_OK)
1959 			goto fail;
1960 	}
1961 
1962 
1963 	res = dc_validate_global_state(dc, context, false);
1964 
1965 	if (res != DC_OK) {
1966 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1967 		goto fail;
1968 	}
1969 
1970 	res = dc_commit_state(dc, context);
1971 
1972 fail:
1973 	dc_release_state(context);
1974 
1975 context_alloc_fail:
1976 	return res;
1977 }
1978 
1979 static int dm_suspend(void *handle)
1980 {
1981 	struct amdgpu_device *adev = handle;
1982 	struct amdgpu_display_manager *dm = &adev->dm;
1983 	int ret = 0;
1984 
1985 	if (amdgpu_in_reset(adev)) {
1986 		mutex_lock(&dm->dc_lock);
1987 
1988 #if defined(CONFIG_DRM_AMD_DC_DCN)
1989 		dc_allow_idle_optimizations(adev->dm.dc, false);
1990 #endif
1991 
1992 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1993 
1994 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1995 
1996 		amdgpu_dm_commit_zero_streams(dm->dc);
1997 
1998 		amdgpu_dm_irq_suspend(adev);
1999 
2000 		return ret;
2001 	}
2002 
2003 	WARN_ON(adev->dm.cached_state);
2004 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2005 
2006 	s3_handle_mst(adev_to_drm(adev), true);
2007 
2008 	amdgpu_dm_irq_suspend(adev);
2009 
2010 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2011 
2012 	return 0;
2013 }
2014 
2015 static struct amdgpu_dm_connector *
2016 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2017 					     struct drm_crtc *crtc)
2018 {
2019 	uint32_t i;
2020 	struct drm_connector_state *new_con_state;
2021 	struct drm_connector *connector;
2022 	struct drm_crtc *crtc_from_state;
2023 
2024 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2025 		crtc_from_state = new_con_state->crtc;
2026 
2027 		if (crtc_from_state == crtc)
2028 			return to_amdgpu_dm_connector(connector);
2029 	}
2030 
2031 	return NULL;
2032 }
2033 
2034 static void emulated_link_detect(struct dc_link *link)
2035 {
2036 	struct dc_sink_init_data sink_init_data = { 0 };
2037 	struct display_sink_capability sink_caps = { 0 };
2038 	enum dc_edid_status edid_status;
2039 	struct dc_context *dc_ctx = link->ctx;
2040 	struct dc_sink *sink = NULL;
2041 	struct dc_sink *prev_sink = NULL;
2042 
2043 	link->type = dc_connection_none;
2044 	prev_sink = link->local_sink;
2045 
2046 	if (prev_sink)
2047 		dc_sink_release(prev_sink);
2048 
2049 	switch (link->connector_signal) {
2050 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2051 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2052 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2053 		break;
2054 	}
2055 
2056 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2057 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2058 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2059 		break;
2060 	}
2061 
2062 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2063 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2064 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2065 		break;
2066 	}
2067 
2068 	case SIGNAL_TYPE_LVDS: {
2069 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2070 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2071 		break;
2072 	}
2073 
2074 	case SIGNAL_TYPE_EDP: {
2075 		sink_caps.transaction_type =
2076 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2077 		sink_caps.signal = SIGNAL_TYPE_EDP;
2078 		break;
2079 	}
2080 
2081 	case SIGNAL_TYPE_DISPLAY_PORT: {
2082 		sink_caps.transaction_type =
2083 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2084 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2085 		break;
2086 	}
2087 
2088 	default:
2089 		DC_ERROR("Invalid connector type! signal:%d\n",
2090 			link->connector_signal);
2091 		return;
2092 	}
2093 
2094 	sink_init_data.link = link;
2095 	sink_init_data.sink_signal = sink_caps.signal;
2096 
2097 	sink = dc_sink_create(&sink_init_data);
2098 	if (!sink) {
2099 		DC_ERROR("Failed to create sink!\n");
2100 		return;
2101 	}
2102 
2103 	/* dc_sink_create returns a new reference */
2104 	link->local_sink = sink;
2105 
2106 	edid_status = dm_helpers_read_local_edid(
2107 			link->ctx,
2108 			link,
2109 			sink);
2110 
2111 	if (edid_status != EDID_OK)
2112 		DC_ERROR("Failed to read EDID");
2113 
2114 }
2115 
2116 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2117 				     struct amdgpu_display_manager *dm)
2118 {
2119 	struct {
2120 		struct dc_surface_update surface_updates[MAX_SURFACES];
2121 		struct dc_plane_info plane_infos[MAX_SURFACES];
2122 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2123 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2124 		struct dc_stream_update stream_update;
2125 	} * bundle;
2126 	int k, m;
2127 
2128 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2129 
2130 	if (!bundle) {
2131 		dm_error("Failed to allocate update bundle\n");
2132 		goto cleanup;
2133 	}
2134 
2135 	for (k = 0; k < dc_state->stream_count; k++) {
2136 		bundle->stream_update.stream = dc_state->streams[k];
2137 
2138 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2139 			bundle->surface_updates[m].surface =
2140 				dc_state->stream_status->plane_states[m];
2141 			bundle->surface_updates[m].surface->force_full_update =
2142 				true;
2143 		}
2144 		dc_commit_updates_for_stream(
2145 			dm->dc, bundle->surface_updates,
2146 			dc_state->stream_status->plane_count,
2147 			dc_state->streams[k], &bundle->stream_update, dc_state);
2148 	}
2149 
2150 cleanup:
2151 	kfree(bundle);
2152 
2153 	return;
2154 }
2155 
2156 static void dm_set_dpms_off(struct dc_link *link)
2157 {
2158 	struct dc_stream_state *stream_state;
2159 	struct amdgpu_dm_connector *aconnector = link->priv;
2160 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2161 	struct dc_stream_update stream_update;
2162 	bool dpms_off = true;
2163 
2164 	memset(&stream_update, 0, sizeof(stream_update));
2165 	stream_update.dpms_off = &dpms_off;
2166 
2167 	mutex_lock(&adev->dm.dc_lock);
2168 	stream_state = dc_stream_find_from_link(link);
2169 
2170 	if (stream_state == NULL) {
2171 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2172 		mutex_unlock(&adev->dm.dc_lock);
2173 		return;
2174 	}
2175 
2176 	stream_update.stream = stream_state;
2177 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2178 				     stream_state, &stream_update,
2179 				     stream_state->ctx->dc->current_state);
2180 	mutex_unlock(&adev->dm.dc_lock);
2181 }
2182 
2183 static int dm_resume(void *handle)
2184 {
2185 	struct amdgpu_device *adev = handle;
2186 	struct drm_device *ddev = adev_to_drm(adev);
2187 	struct amdgpu_display_manager *dm = &adev->dm;
2188 	struct amdgpu_dm_connector *aconnector;
2189 	struct drm_connector *connector;
2190 	struct drm_connector_list_iter iter;
2191 	struct drm_crtc *crtc;
2192 	struct drm_crtc_state *new_crtc_state;
2193 	struct dm_crtc_state *dm_new_crtc_state;
2194 	struct drm_plane *plane;
2195 	struct drm_plane_state *new_plane_state;
2196 	struct dm_plane_state *dm_new_plane_state;
2197 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2198 	enum dc_connection_type new_connection_type = dc_connection_none;
2199 	struct dc_state *dc_state;
2200 	int i, r, j;
2201 
2202 	if (amdgpu_in_reset(adev)) {
2203 		dc_state = dm->cached_dc_state;
2204 
2205 		r = dm_dmub_hw_init(adev);
2206 		if (r)
2207 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2208 
2209 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2210 		dc_resume(dm->dc);
2211 
2212 		amdgpu_dm_irq_resume_early(adev);
2213 
2214 		for (i = 0; i < dc_state->stream_count; i++) {
2215 			dc_state->streams[i]->mode_changed = true;
2216 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2217 				dc_state->stream_status->plane_states[j]->update_flags.raw
2218 					= 0xffffffff;
2219 			}
2220 		}
2221 #if defined(CONFIG_DRM_AMD_DC_DCN)
2222 		/*
2223 		 * Resource allocation happens for link encoders for newer ASIC in
2224 		 * dc_validate_global_state, so we need to revalidate it.
2225 		 *
2226 		 * This shouldn't fail (it passed once before), so warn if it does.
2227 		 */
2228 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2229 #endif
2230 
2231 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2232 
2233 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2234 
2235 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2236 
2237 		dc_release_state(dm->cached_dc_state);
2238 		dm->cached_dc_state = NULL;
2239 
2240 		amdgpu_dm_irq_resume_late(adev);
2241 
2242 		mutex_unlock(&dm->dc_lock);
2243 
2244 		return 0;
2245 	}
2246 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2247 	dc_release_state(dm_state->context);
2248 	dm_state->context = dc_create_state(dm->dc);
2249 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2250 	dc_resource_state_construct(dm->dc, dm_state->context);
2251 
2252 	/* Before powering on DC we need to re-initialize DMUB. */
2253 	r = dm_dmub_hw_init(adev);
2254 	if (r)
2255 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2256 
2257 	/* power on hardware */
2258 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2259 
2260 	/* program HPD filter */
2261 	dc_resume(dm->dc);
2262 
2263 	/*
2264 	 * early enable HPD Rx IRQ, should be done before set mode as short
2265 	 * pulse interrupts are used for MST
2266 	 */
2267 	amdgpu_dm_irq_resume_early(adev);
2268 
2269 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2270 	s3_handle_mst(ddev, false);
2271 
2272 	/* Do detection*/
2273 	drm_connector_list_iter_begin(ddev, &iter);
2274 	drm_for_each_connector_iter(connector, &iter) {
2275 		aconnector = to_amdgpu_dm_connector(connector);
2276 
2277 		/*
2278 		 * this is the case when traversing through already created
2279 		 * MST connectors, should be skipped
2280 		 */
2281 		if (aconnector->mst_port)
2282 			continue;
2283 
2284 		mutex_lock(&aconnector->hpd_lock);
2285 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2286 			DRM_ERROR("KMS: Failed to detect connector\n");
2287 
2288 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2289 			emulated_link_detect(aconnector->dc_link);
2290 		else
2291 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2292 
2293 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2294 			aconnector->fake_enable = false;
2295 
2296 		if (aconnector->dc_sink)
2297 			dc_sink_release(aconnector->dc_sink);
2298 		aconnector->dc_sink = NULL;
2299 		amdgpu_dm_update_connector_after_detect(aconnector);
2300 		mutex_unlock(&aconnector->hpd_lock);
2301 	}
2302 	drm_connector_list_iter_end(&iter);
2303 
2304 	/* Force mode set in atomic commit */
2305 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2306 		new_crtc_state->active_changed = true;
2307 
2308 	/*
2309 	 * atomic_check is expected to create the dc states. We need to release
2310 	 * them here, since they were duplicated as part of the suspend
2311 	 * procedure.
2312 	 */
2313 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2314 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2315 		if (dm_new_crtc_state->stream) {
2316 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2317 			dc_stream_release(dm_new_crtc_state->stream);
2318 			dm_new_crtc_state->stream = NULL;
2319 		}
2320 	}
2321 
2322 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2323 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2324 		if (dm_new_plane_state->dc_state) {
2325 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2326 			dc_plane_state_release(dm_new_plane_state->dc_state);
2327 			dm_new_plane_state->dc_state = NULL;
2328 		}
2329 	}
2330 
2331 	drm_atomic_helper_resume(ddev, dm->cached_state);
2332 
2333 	dm->cached_state = NULL;
2334 
2335 	amdgpu_dm_irq_resume_late(adev);
2336 
2337 	amdgpu_dm_smu_write_watermarks_table(adev);
2338 
2339 	return 0;
2340 }
2341 
2342 /**
2343  * DOC: DM Lifecycle
2344  *
2345  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2346  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2347  * the base driver's device list to be initialized and torn down accordingly.
2348  *
2349  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2350  */
2351 
2352 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2353 	.name = "dm",
2354 	.early_init = dm_early_init,
2355 	.late_init = dm_late_init,
2356 	.sw_init = dm_sw_init,
2357 	.sw_fini = dm_sw_fini,
2358 	.early_fini = amdgpu_dm_early_fini,
2359 	.hw_init = dm_hw_init,
2360 	.hw_fini = dm_hw_fini,
2361 	.suspend = dm_suspend,
2362 	.resume = dm_resume,
2363 	.is_idle = dm_is_idle,
2364 	.wait_for_idle = dm_wait_for_idle,
2365 	.check_soft_reset = dm_check_soft_reset,
2366 	.soft_reset = dm_soft_reset,
2367 	.set_clockgating_state = dm_set_clockgating_state,
2368 	.set_powergating_state = dm_set_powergating_state,
2369 };
2370 
2371 const struct amdgpu_ip_block_version dm_ip_block =
2372 {
2373 	.type = AMD_IP_BLOCK_TYPE_DCE,
2374 	.major = 1,
2375 	.minor = 0,
2376 	.rev = 0,
2377 	.funcs = &amdgpu_dm_funcs,
2378 };
2379 
2380 
2381 /**
2382  * DOC: atomic
2383  *
2384  * *WIP*
2385  */
2386 
2387 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2388 	.fb_create = amdgpu_display_user_framebuffer_create,
2389 	.get_format_info = amd_get_format_info,
2390 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2391 	.atomic_check = amdgpu_dm_atomic_check,
2392 	.atomic_commit = drm_atomic_helper_commit,
2393 };
2394 
2395 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2396 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2397 };
2398 
2399 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2400 {
2401 	u32 max_cll, min_cll, max, min, q, r;
2402 	struct amdgpu_dm_backlight_caps *caps;
2403 	struct amdgpu_display_manager *dm;
2404 	struct drm_connector *conn_base;
2405 	struct amdgpu_device *adev;
2406 	struct dc_link *link = NULL;
2407 	static const u8 pre_computed_values[] = {
2408 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2409 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2410 	int i;
2411 
2412 	if (!aconnector || !aconnector->dc_link)
2413 		return;
2414 
2415 	link = aconnector->dc_link;
2416 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2417 		return;
2418 
2419 	conn_base = &aconnector->base;
2420 	adev = drm_to_adev(conn_base->dev);
2421 	dm = &adev->dm;
2422 	for (i = 0; i < dm->num_of_edps; i++) {
2423 		if (link == dm->backlight_link[i])
2424 			break;
2425 	}
2426 	if (i >= dm->num_of_edps)
2427 		return;
2428 	caps = &dm->backlight_caps[i];
2429 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2430 	caps->aux_support = false;
2431 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2432 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2433 
2434 	if (caps->ext_caps->bits.oled == 1 /*||
2435 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2436 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2437 		caps->aux_support = true;
2438 
2439 	if (amdgpu_backlight == 0)
2440 		caps->aux_support = false;
2441 	else if (amdgpu_backlight == 1)
2442 		caps->aux_support = true;
2443 
2444 	/* From the specification (CTA-861-G), for calculating the maximum
2445 	 * luminance we need to use:
2446 	 *	Luminance = 50*2**(CV/32)
2447 	 * Where CV is a one-byte value.
2448 	 * For calculating this expression we may need float point precision;
2449 	 * to avoid this complexity level, we take advantage that CV is divided
2450 	 * by a constant. From the Euclids division algorithm, we know that CV
2451 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2452 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2453 	 * need to pre-compute the value of r/32. For pre-computing the values
2454 	 * We just used the following Ruby line:
2455 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2456 	 * The results of the above expressions can be verified at
2457 	 * pre_computed_values.
2458 	 */
2459 	q = max_cll >> 5;
2460 	r = max_cll % 32;
2461 	max = (1 << q) * pre_computed_values[r];
2462 
2463 	// min luminance: maxLum * (CV/255)^2 / 100
2464 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2465 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2466 
2467 	caps->aux_max_input_signal = max;
2468 	caps->aux_min_input_signal = min;
2469 }
2470 
2471 void amdgpu_dm_update_connector_after_detect(
2472 		struct amdgpu_dm_connector *aconnector)
2473 {
2474 	struct drm_connector *connector = &aconnector->base;
2475 	struct drm_device *dev = connector->dev;
2476 	struct dc_sink *sink;
2477 
2478 	/* MST handled by drm_mst framework */
2479 	if (aconnector->mst_mgr.mst_state == true)
2480 		return;
2481 
2482 	sink = aconnector->dc_link->local_sink;
2483 	if (sink)
2484 		dc_sink_retain(sink);
2485 
2486 	/*
2487 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2488 	 * the connector sink is set to either fake or physical sink depends on link status.
2489 	 * Skip if already done during boot.
2490 	 */
2491 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2492 			&& aconnector->dc_em_sink) {
2493 
2494 		/*
2495 		 * For S3 resume with headless use eml_sink to fake stream
2496 		 * because on resume connector->sink is set to NULL
2497 		 */
2498 		mutex_lock(&dev->mode_config.mutex);
2499 
2500 		if (sink) {
2501 			if (aconnector->dc_sink) {
2502 				amdgpu_dm_update_freesync_caps(connector, NULL);
2503 				/*
2504 				 * retain and release below are used to
2505 				 * bump up refcount for sink because the link doesn't point
2506 				 * to it anymore after disconnect, so on next crtc to connector
2507 				 * reshuffle by UMD we will get into unwanted dc_sink release
2508 				 */
2509 				dc_sink_release(aconnector->dc_sink);
2510 			}
2511 			aconnector->dc_sink = sink;
2512 			dc_sink_retain(aconnector->dc_sink);
2513 			amdgpu_dm_update_freesync_caps(connector,
2514 					aconnector->edid);
2515 		} else {
2516 			amdgpu_dm_update_freesync_caps(connector, NULL);
2517 			if (!aconnector->dc_sink) {
2518 				aconnector->dc_sink = aconnector->dc_em_sink;
2519 				dc_sink_retain(aconnector->dc_sink);
2520 			}
2521 		}
2522 
2523 		mutex_unlock(&dev->mode_config.mutex);
2524 
2525 		if (sink)
2526 			dc_sink_release(sink);
2527 		return;
2528 	}
2529 
2530 	/*
2531 	 * TODO: temporary guard to look for proper fix
2532 	 * if this sink is MST sink, we should not do anything
2533 	 */
2534 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2535 		dc_sink_release(sink);
2536 		return;
2537 	}
2538 
2539 	if (aconnector->dc_sink == sink) {
2540 		/*
2541 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2542 		 * Do nothing!!
2543 		 */
2544 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2545 				aconnector->connector_id);
2546 		if (sink)
2547 			dc_sink_release(sink);
2548 		return;
2549 	}
2550 
2551 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2552 		aconnector->connector_id, aconnector->dc_sink, sink);
2553 
2554 	mutex_lock(&dev->mode_config.mutex);
2555 
2556 	/*
2557 	 * 1. Update status of the drm connector
2558 	 * 2. Send an event and let userspace tell us what to do
2559 	 */
2560 	if (sink) {
2561 		/*
2562 		 * TODO: check if we still need the S3 mode update workaround.
2563 		 * If yes, put it here.
2564 		 */
2565 		if (aconnector->dc_sink) {
2566 			amdgpu_dm_update_freesync_caps(connector, NULL);
2567 			dc_sink_release(aconnector->dc_sink);
2568 		}
2569 
2570 		aconnector->dc_sink = sink;
2571 		dc_sink_retain(aconnector->dc_sink);
2572 		if (sink->dc_edid.length == 0) {
2573 			aconnector->edid = NULL;
2574 			if (aconnector->dc_link->aux_mode) {
2575 				drm_dp_cec_unset_edid(
2576 					&aconnector->dm_dp_aux.aux);
2577 			}
2578 		} else {
2579 			aconnector->edid =
2580 				(struct edid *)sink->dc_edid.raw_edid;
2581 
2582 			drm_connector_update_edid_property(connector,
2583 							   aconnector->edid);
2584 			if (aconnector->dc_link->aux_mode)
2585 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2586 						    aconnector->edid);
2587 		}
2588 
2589 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2590 		update_connector_ext_caps(aconnector);
2591 	} else {
2592 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2593 		amdgpu_dm_update_freesync_caps(connector, NULL);
2594 		drm_connector_update_edid_property(connector, NULL);
2595 		aconnector->num_modes = 0;
2596 		dc_sink_release(aconnector->dc_sink);
2597 		aconnector->dc_sink = NULL;
2598 		aconnector->edid = NULL;
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2601 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2602 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2603 #endif
2604 	}
2605 
2606 	mutex_unlock(&dev->mode_config.mutex);
2607 
2608 	update_subconnector_property(aconnector);
2609 
2610 	if (sink)
2611 		dc_sink_release(sink);
2612 }
2613 
2614 static void handle_hpd_irq(void *param)
2615 {
2616 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2617 	struct drm_connector *connector = &aconnector->base;
2618 	struct drm_device *dev = connector->dev;
2619 	enum dc_connection_type new_connection_type = dc_connection_none;
2620 	struct amdgpu_device *adev = drm_to_adev(dev);
2621 #ifdef CONFIG_DRM_AMD_DC_HDCP
2622 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2623 #endif
2624 
2625 	if (adev->dm.disable_hpd_irq)
2626 		return;
2627 
2628 	/*
2629 	 * In case of failure or MST no need to update connector status or notify the OS
2630 	 * since (for MST case) MST does this in its own context.
2631 	 */
2632 	mutex_lock(&aconnector->hpd_lock);
2633 
2634 #ifdef CONFIG_DRM_AMD_DC_HDCP
2635 	if (adev->dm.hdcp_workqueue) {
2636 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2637 		dm_con_state->update_hdcp = true;
2638 	}
2639 #endif
2640 	if (aconnector->fake_enable)
2641 		aconnector->fake_enable = false;
2642 
2643 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2644 		DRM_ERROR("KMS: Failed to detect connector\n");
2645 
2646 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2647 		emulated_link_detect(aconnector->dc_link);
2648 
2649 
2650 		drm_modeset_lock_all(dev);
2651 		dm_restore_drm_connector_state(dev, connector);
2652 		drm_modeset_unlock_all(dev);
2653 
2654 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2655 			drm_kms_helper_hotplug_event(dev);
2656 
2657 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2658 		if (new_connection_type == dc_connection_none &&
2659 		    aconnector->dc_link->type == dc_connection_none)
2660 			dm_set_dpms_off(aconnector->dc_link);
2661 
2662 		amdgpu_dm_update_connector_after_detect(aconnector);
2663 
2664 		drm_modeset_lock_all(dev);
2665 		dm_restore_drm_connector_state(dev, connector);
2666 		drm_modeset_unlock_all(dev);
2667 
2668 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2669 			drm_kms_helper_hotplug_event(dev);
2670 	}
2671 	mutex_unlock(&aconnector->hpd_lock);
2672 
2673 }
2674 
2675 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2676 {
2677 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2678 	uint8_t dret;
2679 	bool new_irq_handled = false;
2680 	int dpcd_addr;
2681 	int dpcd_bytes_to_read;
2682 
2683 	const int max_process_count = 30;
2684 	int process_count = 0;
2685 
2686 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2687 
2688 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2689 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2690 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2691 		dpcd_addr = DP_SINK_COUNT;
2692 	} else {
2693 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2694 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2695 		dpcd_addr = DP_SINK_COUNT_ESI;
2696 	}
2697 
2698 	dret = drm_dp_dpcd_read(
2699 		&aconnector->dm_dp_aux.aux,
2700 		dpcd_addr,
2701 		esi,
2702 		dpcd_bytes_to_read);
2703 
2704 	while (dret == dpcd_bytes_to_read &&
2705 		process_count < max_process_count) {
2706 		uint8_t retry;
2707 		dret = 0;
2708 
2709 		process_count++;
2710 
2711 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2712 		/* handle HPD short pulse irq */
2713 		if (aconnector->mst_mgr.mst_state)
2714 			drm_dp_mst_hpd_irq(
2715 				&aconnector->mst_mgr,
2716 				esi,
2717 				&new_irq_handled);
2718 
2719 		if (new_irq_handled) {
2720 			/* ACK at DPCD to notify down stream */
2721 			const int ack_dpcd_bytes_to_write =
2722 				dpcd_bytes_to_read - 1;
2723 
2724 			for (retry = 0; retry < 3; retry++) {
2725 				uint8_t wret;
2726 
2727 				wret = drm_dp_dpcd_write(
2728 					&aconnector->dm_dp_aux.aux,
2729 					dpcd_addr + 1,
2730 					&esi[1],
2731 					ack_dpcd_bytes_to_write);
2732 				if (wret == ack_dpcd_bytes_to_write)
2733 					break;
2734 			}
2735 
2736 			/* check if there is new irq to be handled */
2737 			dret = drm_dp_dpcd_read(
2738 				&aconnector->dm_dp_aux.aux,
2739 				dpcd_addr,
2740 				esi,
2741 				dpcd_bytes_to_read);
2742 
2743 			new_irq_handled = false;
2744 		} else {
2745 			break;
2746 		}
2747 	}
2748 
2749 	if (process_count == max_process_count)
2750 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2751 }
2752 
2753 static void handle_hpd_rx_irq(void *param)
2754 {
2755 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2756 	struct drm_connector *connector = &aconnector->base;
2757 	struct drm_device *dev = connector->dev;
2758 	struct dc_link *dc_link = aconnector->dc_link;
2759 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2760 	bool result = false;
2761 	enum dc_connection_type new_connection_type = dc_connection_none;
2762 	struct amdgpu_device *adev = drm_to_adev(dev);
2763 	union hpd_irq_data hpd_irq_data;
2764 	bool lock_flag = 0;
2765 
2766 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2767 
2768 	if (adev->dm.disable_hpd_irq)
2769 		return;
2770 
2771 
2772 	/*
2773 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2774 	 * conflict, after implement i2c helper, this mutex should be
2775 	 * retired.
2776 	 */
2777 	mutex_lock(&aconnector->hpd_lock);
2778 
2779 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2780 
2781 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2782 		(dc_link->type == dc_connection_mst_branch)) {
2783 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2784 			result = true;
2785 			dm_handle_hpd_rx_irq(aconnector);
2786 			goto out;
2787 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2788 			result = false;
2789 			dm_handle_hpd_rx_irq(aconnector);
2790 			goto out;
2791 		}
2792 	}
2793 
2794 	/*
2795 	 * TODO: We need the lock to avoid touching DC state while it's being
2796 	 * modified during automated compliance testing, or when link loss
2797 	 * happens. While this should be split into subhandlers and proper
2798 	 * interfaces to avoid having to conditionally lock like this in the
2799 	 * outer layer, we need this workaround temporarily to allow MST
2800 	 * lightup in some scenarios to avoid timeout.
2801 	 */
2802 	if (!amdgpu_in_reset(adev) &&
2803 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2804 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2805 		mutex_lock(&adev->dm.dc_lock);
2806 		lock_flag = 1;
2807 	}
2808 
2809 #ifdef CONFIG_DRM_AMD_DC_HDCP
2810 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2811 #else
2812 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2813 #endif
2814 	if (!amdgpu_in_reset(adev) && lock_flag)
2815 		mutex_unlock(&adev->dm.dc_lock);
2816 
2817 out:
2818 	if (result && !is_mst_root_connector) {
2819 		/* Downstream Port status changed. */
2820 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2821 			DRM_ERROR("KMS: Failed to detect connector\n");
2822 
2823 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2824 			emulated_link_detect(dc_link);
2825 
2826 			if (aconnector->fake_enable)
2827 				aconnector->fake_enable = false;
2828 
2829 			amdgpu_dm_update_connector_after_detect(aconnector);
2830 
2831 
2832 			drm_modeset_lock_all(dev);
2833 			dm_restore_drm_connector_state(dev, connector);
2834 			drm_modeset_unlock_all(dev);
2835 
2836 			drm_kms_helper_hotplug_event(dev);
2837 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2838 
2839 			if (aconnector->fake_enable)
2840 				aconnector->fake_enable = false;
2841 
2842 			amdgpu_dm_update_connector_after_detect(aconnector);
2843 
2844 
2845 			drm_modeset_lock_all(dev);
2846 			dm_restore_drm_connector_state(dev, connector);
2847 			drm_modeset_unlock_all(dev);
2848 
2849 			drm_kms_helper_hotplug_event(dev);
2850 		}
2851 	}
2852 #ifdef CONFIG_DRM_AMD_DC_HDCP
2853 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2854 		if (adev->dm.hdcp_workqueue)
2855 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2856 	}
2857 #endif
2858 
2859 	if (dc_link->type != dc_connection_mst_branch)
2860 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2861 
2862 	mutex_unlock(&aconnector->hpd_lock);
2863 }
2864 
2865 static void register_hpd_handlers(struct amdgpu_device *adev)
2866 {
2867 	struct drm_device *dev = adev_to_drm(adev);
2868 	struct drm_connector *connector;
2869 	struct amdgpu_dm_connector *aconnector;
2870 	const struct dc_link *dc_link;
2871 	struct dc_interrupt_params int_params = {0};
2872 
2873 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2874 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2875 
2876 	list_for_each_entry(connector,
2877 			&dev->mode_config.connector_list, head)	{
2878 
2879 		aconnector = to_amdgpu_dm_connector(connector);
2880 		dc_link = aconnector->dc_link;
2881 
2882 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2883 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2884 			int_params.irq_source = dc_link->irq_source_hpd;
2885 
2886 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2887 					handle_hpd_irq,
2888 					(void *) aconnector);
2889 		}
2890 
2891 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2892 
2893 			/* Also register for DP short pulse (hpd_rx). */
2894 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2895 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2896 
2897 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2898 					handle_hpd_rx_irq,
2899 					(void *) aconnector);
2900 		}
2901 	}
2902 }
2903 
2904 #if defined(CONFIG_DRM_AMD_DC_SI)
2905 /* Register IRQ sources and initialize IRQ callbacks */
2906 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2907 {
2908 	struct dc *dc = adev->dm.dc;
2909 	struct common_irq_params *c_irq_params;
2910 	struct dc_interrupt_params int_params = {0};
2911 	int r;
2912 	int i;
2913 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2914 
2915 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2916 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2917 
2918 	/*
2919 	 * Actions of amdgpu_irq_add_id():
2920 	 * 1. Register a set() function with base driver.
2921 	 *    Base driver will call set() function to enable/disable an
2922 	 *    interrupt in DC hardware.
2923 	 * 2. Register amdgpu_dm_irq_handler().
2924 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2925 	 *    coming from DC hardware.
2926 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2927 	 *    for acknowledging and handling. */
2928 
2929 	/* Use VBLANK interrupt */
2930 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2931 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2932 		if (r) {
2933 			DRM_ERROR("Failed to add crtc irq id!\n");
2934 			return r;
2935 		}
2936 
2937 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938 		int_params.irq_source =
2939 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2940 
2941 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2942 
2943 		c_irq_params->adev = adev;
2944 		c_irq_params->irq_src = int_params.irq_source;
2945 
2946 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2947 				dm_crtc_high_irq, c_irq_params);
2948 	}
2949 
2950 	/* Use GRPH_PFLIP interrupt */
2951 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2952 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2953 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2954 		if (r) {
2955 			DRM_ERROR("Failed to add page flip irq id!\n");
2956 			return r;
2957 		}
2958 
2959 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2960 		int_params.irq_source =
2961 			dc_interrupt_to_irq_source(dc, i, 0);
2962 
2963 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2964 
2965 		c_irq_params->adev = adev;
2966 		c_irq_params->irq_src = int_params.irq_source;
2967 
2968 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2969 				dm_pflip_high_irq, c_irq_params);
2970 
2971 	}
2972 
2973 	/* HPD */
2974 	r = amdgpu_irq_add_id(adev, client_id,
2975 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2976 	if (r) {
2977 		DRM_ERROR("Failed to add hpd irq id!\n");
2978 		return r;
2979 	}
2980 
2981 	register_hpd_handlers(adev);
2982 
2983 	return 0;
2984 }
2985 #endif
2986 
2987 /* Register IRQ sources and initialize IRQ callbacks */
2988 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2989 {
2990 	struct dc *dc = adev->dm.dc;
2991 	struct common_irq_params *c_irq_params;
2992 	struct dc_interrupt_params int_params = {0};
2993 	int r;
2994 	int i;
2995 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2996 
2997 	if (adev->asic_type >= CHIP_VEGA10)
2998 		client_id = SOC15_IH_CLIENTID_DCE;
2999 
3000 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3001 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3002 
3003 	/*
3004 	 * Actions of amdgpu_irq_add_id():
3005 	 * 1. Register a set() function with base driver.
3006 	 *    Base driver will call set() function to enable/disable an
3007 	 *    interrupt in DC hardware.
3008 	 * 2. Register amdgpu_dm_irq_handler().
3009 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3010 	 *    coming from DC hardware.
3011 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3012 	 *    for acknowledging and handling. */
3013 
3014 	/* Use VBLANK interrupt */
3015 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3016 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3017 		if (r) {
3018 			DRM_ERROR("Failed to add crtc irq id!\n");
3019 			return r;
3020 		}
3021 
3022 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3023 		int_params.irq_source =
3024 			dc_interrupt_to_irq_source(dc, i, 0);
3025 
3026 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3027 
3028 		c_irq_params->adev = adev;
3029 		c_irq_params->irq_src = int_params.irq_source;
3030 
3031 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3032 				dm_crtc_high_irq, c_irq_params);
3033 	}
3034 
3035 	/* Use VUPDATE interrupt */
3036 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3037 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3038 		if (r) {
3039 			DRM_ERROR("Failed to add vupdate irq id!\n");
3040 			return r;
3041 		}
3042 
3043 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3044 		int_params.irq_source =
3045 			dc_interrupt_to_irq_source(dc, i, 0);
3046 
3047 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3048 
3049 		c_irq_params->adev = adev;
3050 		c_irq_params->irq_src = int_params.irq_source;
3051 
3052 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3053 				dm_vupdate_high_irq, c_irq_params);
3054 	}
3055 
3056 	/* Use GRPH_PFLIP interrupt */
3057 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3058 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3059 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3060 		if (r) {
3061 			DRM_ERROR("Failed to add page flip irq id!\n");
3062 			return r;
3063 		}
3064 
3065 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3066 		int_params.irq_source =
3067 			dc_interrupt_to_irq_source(dc, i, 0);
3068 
3069 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3070 
3071 		c_irq_params->adev = adev;
3072 		c_irq_params->irq_src = int_params.irq_source;
3073 
3074 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3075 				dm_pflip_high_irq, c_irq_params);
3076 
3077 	}
3078 
3079 	/* HPD */
3080 	r = amdgpu_irq_add_id(adev, client_id,
3081 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3082 	if (r) {
3083 		DRM_ERROR("Failed to add hpd irq id!\n");
3084 		return r;
3085 	}
3086 
3087 	register_hpd_handlers(adev);
3088 
3089 	return 0;
3090 }
3091 
3092 #if defined(CONFIG_DRM_AMD_DC_DCN)
3093 /* Register IRQ sources and initialize IRQ callbacks */
3094 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3095 {
3096 	struct dc *dc = adev->dm.dc;
3097 	struct common_irq_params *c_irq_params;
3098 	struct dc_interrupt_params int_params = {0};
3099 	int r;
3100 	int i;
3101 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3102 	static const unsigned int vrtl_int_srcid[] = {
3103 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3104 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3105 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3106 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3107 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3108 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3109 	};
3110 #endif
3111 
3112 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3113 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3114 
3115 	/*
3116 	 * Actions of amdgpu_irq_add_id():
3117 	 * 1. Register a set() function with base driver.
3118 	 *    Base driver will call set() function to enable/disable an
3119 	 *    interrupt in DC hardware.
3120 	 * 2. Register amdgpu_dm_irq_handler().
3121 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3122 	 *    coming from DC hardware.
3123 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3124 	 *    for acknowledging and handling.
3125 	 */
3126 
3127 	/* Use VSTARTUP interrupt */
3128 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3129 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3130 			i++) {
3131 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3132 
3133 		if (r) {
3134 			DRM_ERROR("Failed to add crtc irq id!\n");
3135 			return r;
3136 		}
3137 
3138 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3139 		int_params.irq_source =
3140 			dc_interrupt_to_irq_source(dc, i, 0);
3141 
3142 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3143 
3144 		c_irq_params->adev = adev;
3145 		c_irq_params->irq_src = int_params.irq_source;
3146 
3147 		amdgpu_dm_irq_register_interrupt(
3148 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3149 	}
3150 
3151 	/* Use otg vertical line interrupt */
3152 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3153 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3154 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3155 				vrtl_int_srcid[i], &adev->vline0_irq);
3156 
3157 		if (r) {
3158 			DRM_ERROR("Failed to add vline0 irq id!\n");
3159 			return r;
3160 		}
3161 
3162 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3163 		int_params.irq_source =
3164 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3165 
3166 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3167 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3168 			break;
3169 		}
3170 
3171 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3172 					- DC_IRQ_SOURCE_DC1_VLINE0];
3173 
3174 		c_irq_params->adev = adev;
3175 		c_irq_params->irq_src = int_params.irq_source;
3176 
3177 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3178 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3179 	}
3180 #endif
3181 
3182 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3183 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3184 	 * to trigger at end of each vblank, regardless of state of the lock,
3185 	 * matching DCE behaviour.
3186 	 */
3187 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3188 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3189 	     i++) {
3190 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3191 
3192 		if (r) {
3193 			DRM_ERROR("Failed to add vupdate irq id!\n");
3194 			return r;
3195 		}
3196 
3197 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3198 		int_params.irq_source =
3199 			dc_interrupt_to_irq_source(dc, i, 0);
3200 
3201 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3202 
3203 		c_irq_params->adev = adev;
3204 		c_irq_params->irq_src = int_params.irq_source;
3205 
3206 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3207 				dm_vupdate_high_irq, c_irq_params);
3208 	}
3209 
3210 	/* Use GRPH_PFLIP interrupt */
3211 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3212 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3213 			i++) {
3214 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3215 		if (r) {
3216 			DRM_ERROR("Failed to add page flip irq id!\n");
3217 			return r;
3218 		}
3219 
3220 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3221 		int_params.irq_source =
3222 			dc_interrupt_to_irq_source(dc, i, 0);
3223 
3224 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3225 
3226 		c_irq_params->adev = adev;
3227 		c_irq_params->irq_src = int_params.irq_source;
3228 
3229 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3230 				dm_pflip_high_irq, c_irq_params);
3231 
3232 	}
3233 
3234 	/* HPD */
3235 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3236 			&adev->hpd_irq);
3237 	if (r) {
3238 		DRM_ERROR("Failed to add hpd irq id!\n");
3239 		return r;
3240 	}
3241 
3242 	register_hpd_handlers(adev);
3243 
3244 	return 0;
3245 }
3246 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3247 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3248 {
3249 	struct dc *dc = adev->dm.dc;
3250 	struct common_irq_params *c_irq_params;
3251 	struct dc_interrupt_params int_params = {0};
3252 	int r, i;
3253 
3254 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3255 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3256 
3257 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3258 			&adev->dmub_outbox_irq);
3259 	if (r) {
3260 		DRM_ERROR("Failed to add outbox irq id!\n");
3261 		return r;
3262 	}
3263 
3264 	if (dc->ctx->dmub_srv) {
3265 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3266 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3267 		int_params.irq_source =
3268 		dc_interrupt_to_irq_source(dc, i, 0);
3269 
3270 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3271 
3272 		c_irq_params->adev = adev;
3273 		c_irq_params->irq_src = int_params.irq_source;
3274 
3275 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3276 				dm_dmub_outbox1_low_irq, c_irq_params);
3277 	}
3278 
3279 	return 0;
3280 }
3281 #endif
3282 
3283 /*
3284  * Acquires the lock for the atomic state object and returns
3285  * the new atomic state.
3286  *
3287  * This should only be called during atomic check.
3288  */
3289 static int dm_atomic_get_state(struct drm_atomic_state *state,
3290 			       struct dm_atomic_state **dm_state)
3291 {
3292 	struct drm_device *dev = state->dev;
3293 	struct amdgpu_device *adev = drm_to_adev(dev);
3294 	struct amdgpu_display_manager *dm = &adev->dm;
3295 	struct drm_private_state *priv_state;
3296 
3297 	if (*dm_state)
3298 		return 0;
3299 
3300 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3301 	if (IS_ERR(priv_state))
3302 		return PTR_ERR(priv_state);
3303 
3304 	*dm_state = to_dm_atomic_state(priv_state);
3305 
3306 	return 0;
3307 }
3308 
3309 static struct dm_atomic_state *
3310 dm_atomic_get_new_state(struct drm_atomic_state *state)
3311 {
3312 	struct drm_device *dev = state->dev;
3313 	struct amdgpu_device *adev = drm_to_adev(dev);
3314 	struct amdgpu_display_manager *dm = &adev->dm;
3315 	struct drm_private_obj *obj;
3316 	struct drm_private_state *new_obj_state;
3317 	int i;
3318 
3319 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3320 		if (obj->funcs == dm->atomic_obj.funcs)
3321 			return to_dm_atomic_state(new_obj_state);
3322 	}
3323 
3324 	return NULL;
3325 }
3326 
3327 static struct drm_private_state *
3328 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3329 {
3330 	struct dm_atomic_state *old_state, *new_state;
3331 
3332 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3333 	if (!new_state)
3334 		return NULL;
3335 
3336 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3337 
3338 	old_state = to_dm_atomic_state(obj->state);
3339 
3340 	if (old_state && old_state->context)
3341 		new_state->context = dc_copy_state(old_state->context);
3342 
3343 	if (!new_state->context) {
3344 		kfree(new_state);
3345 		return NULL;
3346 	}
3347 
3348 	return &new_state->base;
3349 }
3350 
3351 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3352 				    struct drm_private_state *state)
3353 {
3354 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3355 
3356 	if (dm_state && dm_state->context)
3357 		dc_release_state(dm_state->context);
3358 
3359 	kfree(dm_state);
3360 }
3361 
3362 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3363 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3364 	.atomic_destroy_state = dm_atomic_destroy_state,
3365 };
3366 
3367 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3368 {
3369 	struct dm_atomic_state *state;
3370 	int r;
3371 
3372 	adev->mode_info.mode_config_initialized = true;
3373 
3374 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3375 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3376 
3377 	adev_to_drm(adev)->mode_config.max_width = 16384;
3378 	adev_to_drm(adev)->mode_config.max_height = 16384;
3379 
3380 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3381 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3382 	/* indicates support for immediate flip */
3383 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3384 
3385 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3386 
3387 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3388 	if (!state)
3389 		return -ENOMEM;
3390 
3391 	state->context = dc_create_state(adev->dm.dc);
3392 	if (!state->context) {
3393 		kfree(state);
3394 		return -ENOMEM;
3395 	}
3396 
3397 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3398 
3399 	drm_atomic_private_obj_init(adev_to_drm(adev),
3400 				    &adev->dm.atomic_obj,
3401 				    &state->base,
3402 				    &dm_atomic_state_funcs);
3403 
3404 	r = amdgpu_display_modeset_create_props(adev);
3405 	if (r) {
3406 		dc_release_state(state->context);
3407 		kfree(state);
3408 		return r;
3409 	}
3410 
3411 	r = amdgpu_dm_audio_init(adev);
3412 	if (r) {
3413 		dc_release_state(state->context);
3414 		kfree(state);
3415 		return r;
3416 	}
3417 
3418 	return 0;
3419 }
3420 
3421 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3422 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3423 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3424 
3425 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3426 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3427 
3428 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3429 					    int bl_idx)
3430 {
3431 #if defined(CONFIG_ACPI)
3432 	struct amdgpu_dm_backlight_caps caps;
3433 
3434 	memset(&caps, 0, sizeof(caps));
3435 
3436 	if (dm->backlight_caps[bl_idx].caps_valid)
3437 		return;
3438 
3439 	amdgpu_acpi_get_backlight_caps(&caps);
3440 	if (caps.caps_valid) {
3441 		dm->backlight_caps[bl_idx].caps_valid = true;
3442 		if (caps.aux_support)
3443 			return;
3444 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3445 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3446 	} else {
3447 		dm->backlight_caps[bl_idx].min_input_signal =
3448 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3449 		dm->backlight_caps[bl_idx].max_input_signal =
3450 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3451 	}
3452 #else
3453 	if (dm->backlight_caps[bl_idx].aux_support)
3454 		return;
3455 
3456 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3457 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3458 #endif
3459 }
3460 
3461 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3462 				unsigned *min, unsigned *max)
3463 {
3464 	if (!caps)
3465 		return 0;
3466 
3467 	if (caps->aux_support) {
3468 		// Firmware limits are in nits, DC API wants millinits.
3469 		*max = 1000 * caps->aux_max_input_signal;
3470 		*min = 1000 * caps->aux_min_input_signal;
3471 	} else {
3472 		// Firmware limits are 8-bit, PWM control is 16-bit.
3473 		*max = 0x101 * caps->max_input_signal;
3474 		*min = 0x101 * caps->min_input_signal;
3475 	}
3476 	return 1;
3477 }
3478 
3479 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3480 					uint32_t brightness)
3481 {
3482 	unsigned min, max;
3483 
3484 	if (!get_brightness_range(caps, &min, &max))
3485 		return brightness;
3486 
3487 	// Rescale 0..255 to min..max
3488 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3489 				       AMDGPU_MAX_BL_LEVEL);
3490 }
3491 
3492 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3493 				      uint32_t brightness)
3494 {
3495 	unsigned min, max;
3496 
3497 	if (!get_brightness_range(caps, &min, &max))
3498 		return brightness;
3499 
3500 	if (brightness < min)
3501 		return 0;
3502 	// Rescale min..max to 0..255
3503 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3504 				 max - min);
3505 }
3506 
3507 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3508 					 int bl_idx,
3509 					 u32 user_brightness)
3510 {
3511 	struct amdgpu_dm_backlight_caps caps;
3512 	struct dc_link *link;
3513 	u32 brightness;
3514 	bool rc;
3515 
3516 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3517 	caps = dm->backlight_caps[bl_idx];
3518 
3519 	dm->brightness[bl_idx] = user_brightness;
3520 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3521 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3522 
3523 	/* Change brightness based on AUX property */
3524 	if (caps.aux_support) {
3525 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3526 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3527 		if (!rc)
3528 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3529 	} else {
3530 		rc = dc_link_set_backlight_level(link, brightness, 0);
3531 		if (!rc)
3532 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3533 	}
3534 
3535 	return rc ? 0 : 1;
3536 }
3537 
3538 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3539 {
3540 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3541 	int i;
3542 
3543 	for (i = 0; i < dm->num_of_edps; i++) {
3544 		if (bd == dm->backlight_dev[i])
3545 			break;
3546 	}
3547 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3548 		i = 0;
3549 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3550 
3551 	return 0;
3552 }
3553 
3554 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3555 					 int bl_idx)
3556 {
3557 	struct amdgpu_dm_backlight_caps caps;
3558 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3559 
3560 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3561 	caps = dm->backlight_caps[bl_idx];
3562 
3563 	if (caps.aux_support) {
3564 		u32 avg, peak;
3565 		bool rc;
3566 
3567 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3568 		if (!rc)
3569 			return dm->brightness[bl_idx];
3570 		return convert_brightness_to_user(&caps, avg);
3571 	} else {
3572 		int ret = dc_link_get_backlight_level(link);
3573 
3574 		if (ret == DC_ERROR_UNEXPECTED)
3575 			return dm->brightness[bl_idx];
3576 		return convert_brightness_to_user(&caps, ret);
3577 	}
3578 }
3579 
3580 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3581 {
3582 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3583 	int i;
3584 
3585 	for (i = 0; i < dm->num_of_edps; i++) {
3586 		if (bd == dm->backlight_dev[i])
3587 			break;
3588 	}
3589 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3590 		i = 0;
3591 	return amdgpu_dm_backlight_get_level(dm, i);
3592 }
3593 
3594 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3595 	.options = BL_CORE_SUSPENDRESUME,
3596 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3597 	.update_status	= amdgpu_dm_backlight_update_status,
3598 };
3599 
3600 static void
3601 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3602 {
3603 	char bl_name[16];
3604 	struct backlight_properties props = { 0 };
3605 
3606 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3607 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3608 
3609 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3610 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3611 	props.type = BACKLIGHT_RAW;
3612 
3613 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3614 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3615 
3616 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3617 								       adev_to_drm(dm->adev)->dev,
3618 								       dm,
3619 								       &amdgpu_dm_backlight_ops,
3620 								       &props);
3621 
3622 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3623 		DRM_ERROR("DM: Backlight registration failed!\n");
3624 	else
3625 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3626 }
3627 #endif
3628 
3629 static int initialize_plane(struct amdgpu_display_manager *dm,
3630 			    struct amdgpu_mode_info *mode_info, int plane_id,
3631 			    enum drm_plane_type plane_type,
3632 			    const struct dc_plane_cap *plane_cap)
3633 {
3634 	struct drm_plane *plane;
3635 	unsigned long possible_crtcs;
3636 	int ret = 0;
3637 
3638 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3639 	if (!plane) {
3640 		DRM_ERROR("KMS: Failed to allocate plane\n");
3641 		return -ENOMEM;
3642 	}
3643 	plane->type = plane_type;
3644 
3645 	/*
3646 	 * HACK: IGT tests expect that the primary plane for a CRTC
3647 	 * can only have one possible CRTC. Only expose support for
3648 	 * any CRTC if they're not going to be used as a primary plane
3649 	 * for a CRTC - like overlay or underlay planes.
3650 	 */
3651 	possible_crtcs = 1 << plane_id;
3652 	if (plane_id >= dm->dc->caps.max_streams)
3653 		possible_crtcs = 0xff;
3654 
3655 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3656 
3657 	if (ret) {
3658 		DRM_ERROR("KMS: Failed to initialize plane\n");
3659 		kfree(plane);
3660 		return ret;
3661 	}
3662 
3663 	if (mode_info)
3664 		mode_info->planes[plane_id] = plane;
3665 
3666 	return ret;
3667 }
3668 
3669 
3670 static void register_backlight_device(struct amdgpu_display_manager *dm,
3671 				      struct dc_link *link)
3672 {
3673 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3674 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3675 
3676 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3677 	    link->type != dc_connection_none) {
3678 		/*
3679 		 * Event if registration failed, we should continue with
3680 		 * DM initialization because not having a backlight control
3681 		 * is better then a black screen.
3682 		 */
3683 		if (!dm->backlight_dev[dm->num_of_edps])
3684 			amdgpu_dm_register_backlight_device(dm);
3685 
3686 		if (dm->backlight_dev[dm->num_of_edps]) {
3687 			dm->backlight_link[dm->num_of_edps] = link;
3688 			dm->num_of_edps++;
3689 		}
3690 	}
3691 #endif
3692 }
3693 
3694 
3695 /*
3696  * In this architecture, the association
3697  * connector -> encoder -> crtc
3698  * id not really requried. The crtc and connector will hold the
3699  * display_index as an abstraction to use with DAL component
3700  *
3701  * Returns 0 on success
3702  */
3703 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3704 {
3705 	struct amdgpu_display_manager *dm = &adev->dm;
3706 	int32_t i;
3707 	struct amdgpu_dm_connector *aconnector = NULL;
3708 	struct amdgpu_encoder *aencoder = NULL;
3709 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3710 	uint32_t link_cnt;
3711 	int32_t primary_planes;
3712 	enum dc_connection_type new_connection_type = dc_connection_none;
3713 	const struct dc_plane_cap *plane;
3714 
3715 	dm->display_indexes_num = dm->dc->caps.max_streams;
3716 	/* Update the actual used number of crtc */
3717 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3718 
3719 	link_cnt = dm->dc->caps.max_links;
3720 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3721 		DRM_ERROR("DM: Failed to initialize mode config\n");
3722 		return -EINVAL;
3723 	}
3724 
3725 	/* There is one primary plane per CRTC */
3726 	primary_planes = dm->dc->caps.max_streams;
3727 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3728 
3729 	/*
3730 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3731 	 * Order is reversed to match iteration order in atomic check.
3732 	 */
3733 	for (i = (primary_planes - 1); i >= 0; i--) {
3734 		plane = &dm->dc->caps.planes[i];
3735 
3736 		if (initialize_plane(dm, mode_info, i,
3737 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3738 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3739 			goto fail;
3740 		}
3741 	}
3742 
3743 	/*
3744 	 * Initialize overlay planes, index starting after primary planes.
3745 	 * These planes have a higher DRM index than the primary planes since
3746 	 * they should be considered as having a higher z-order.
3747 	 * Order is reversed to match iteration order in atomic check.
3748 	 *
3749 	 * Only support DCN for now, and only expose one so we don't encourage
3750 	 * userspace to use up all the pipes.
3751 	 */
3752 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3753 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3754 
3755 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3756 			continue;
3757 
3758 		if (!plane->blends_with_above || !plane->blends_with_below)
3759 			continue;
3760 
3761 		if (!plane->pixel_format_support.argb8888)
3762 			continue;
3763 
3764 		if (initialize_plane(dm, NULL, primary_planes + i,
3765 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3766 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3767 			goto fail;
3768 		}
3769 
3770 		/* Only create one overlay plane. */
3771 		break;
3772 	}
3773 
3774 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3775 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3776 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3777 			goto fail;
3778 		}
3779 
3780 #if defined(CONFIG_DRM_AMD_DC_DCN)
3781 	/* Use Outbox interrupt */
3782 	switch (adev->asic_type) {
3783 	case CHIP_SIENNA_CICHLID:
3784 	case CHIP_NAVY_FLOUNDER:
3785 	case CHIP_YELLOW_CARP:
3786 	case CHIP_RENOIR:
3787 		if (register_outbox_irq_handlers(dm->adev)) {
3788 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3789 			goto fail;
3790 		}
3791 		break;
3792 	default:
3793 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3794 	}
3795 #endif
3796 
3797 	/* loops over all connectors on the board */
3798 	for (i = 0; i < link_cnt; i++) {
3799 		struct dc_link *link = NULL;
3800 
3801 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3802 			DRM_ERROR(
3803 				"KMS: Cannot support more than %d display indexes\n",
3804 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3805 			continue;
3806 		}
3807 
3808 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3809 		if (!aconnector)
3810 			goto fail;
3811 
3812 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3813 		if (!aencoder)
3814 			goto fail;
3815 
3816 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3817 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3818 			goto fail;
3819 		}
3820 
3821 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3822 			DRM_ERROR("KMS: Failed to initialize connector\n");
3823 			goto fail;
3824 		}
3825 
3826 		link = dc_get_link_at_index(dm->dc, i);
3827 
3828 		if (!dc_link_detect_sink(link, &new_connection_type))
3829 			DRM_ERROR("KMS: Failed to detect connector\n");
3830 
3831 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3832 			emulated_link_detect(link);
3833 			amdgpu_dm_update_connector_after_detect(aconnector);
3834 
3835 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3836 			amdgpu_dm_update_connector_after_detect(aconnector);
3837 			register_backlight_device(dm, link);
3838 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3839 				amdgpu_dm_set_psr_caps(link);
3840 		}
3841 
3842 
3843 	}
3844 
3845 	/* Software is initialized. Now we can register interrupt handlers. */
3846 	switch (adev->asic_type) {
3847 #if defined(CONFIG_DRM_AMD_DC_SI)
3848 	case CHIP_TAHITI:
3849 	case CHIP_PITCAIRN:
3850 	case CHIP_VERDE:
3851 	case CHIP_OLAND:
3852 		if (dce60_register_irq_handlers(dm->adev)) {
3853 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3854 			goto fail;
3855 		}
3856 		break;
3857 #endif
3858 	case CHIP_BONAIRE:
3859 	case CHIP_HAWAII:
3860 	case CHIP_KAVERI:
3861 	case CHIP_KABINI:
3862 	case CHIP_MULLINS:
3863 	case CHIP_TONGA:
3864 	case CHIP_FIJI:
3865 	case CHIP_CARRIZO:
3866 	case CHIP_STONEY:
3867 	case CHIP_POLARIS11:
3868 	case CHIP_POLARIS10:
3869 	case CHIP_POLARIS12:
3870 	case CHIP_VEGAM:
3871 	case CHIP_VEGA10:
3872 	case CHIP_VEGA12:
3873 	case CHIP_VEGA20:
3874 		if (dce110_register_irq_handlers(dm->adev)) {
3875 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3876 			goto fail;
3877 		}
3878 		break;
3879 #if defined(CONFIG_DRM_AMD_DC_DCN)
3880 	case CHIP_RAVEN:
3881 	case CHIP_NAVI12:
3882 	case CHIP_NAVI10:
3883 	case CHIP_NAVI14:
3884 	case CHIP_RENOIR:
3885 	case CHIP_SIENNA_CICHLID:
3886 	case CHIP_NAVY_FLOUNDER:
3887 	case CHIP_DIMGREY_CAVEFISH:
3888 	case CHIP_BEIGE_GOBY:
3889 	case CHIP_VANGOGH:
3890 	case CHIP_YELLOW_CARP:
3891 		if (dcn10_register_irq_handlers(dm->adev)) {
3892 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3893 			goto fail;
3894 		}
3895 		break;
3896 #endif
3897 	default:
3898 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3899 		goto fail;
3900 	}
3901 
3902 	return 0;
3903 fail:
3904 	kfree(aencoder);
3905 	kfree(aconnector);
3906 
3907 	return -EINVAL;
3908 }
3909 
3910 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3911 {
3912 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3913 	return;
3914 }
3915 
3916 /******************************************************************************
3917  * amdgpu_display_funcs functions
3918  *****************************************************************************/
3919 
3920 /*
3921  * dm_bandwidth_update - program display watermarks
3922  *
3923  * @adev: amdgpu_device pointer
3924  *
3925  * Calculate and program the display watermarks and line buffer allocation.
3926  */
3927 static void dm_bandwidth_update(struct amdgpu_device *adev)
3928 {
3929 	/* TODO: implement later */
3930 }
3931 
3932 static const struct amdgpu_display_funcs dm_display_funcs = {
3933 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3934 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3935 	.backlight_set_level = NULL, /* never called for DC */
3936 	.backlight_get_level = NULL, /* never called for DC */
3937 	.hpd_sense = NULL,/* called unconditionally */
3938 	.hpd_set_polarity = NULL, /* called unconditionally */
3939 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3940 	.page_flip_get_scanoutpos =
3941 		dm_crtc_get_scanoutpos,/* called unconditionally */
3942 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3943 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3944 };
3945 
3946 #if defined(CONFIG_DEBUG_KERNEL_DC)
3947 
3948 static ssize_t s3_debug_store(struct device *device,
3949 			      struct device_attribute *attr,
3950 			      const char *buf,
3951 			      size_t count)
3952 {
3953 	int ret;
3954 	int s3_state;
3955 	struct drm_device *drm_dev = dev_get_drvdata(device);
3956 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3957 
3958 	ret = kstrtoint(buf, 0, &s3_state);
3959 
3960 	if (ret == 0) {
3961 		if (s3_state) {
3962 			dm_resume(adev);
3963 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3964 		} else
3965 			dm_suspend(adev);
3966 	}
3967 
3968 	return ret == 0 ? count : 0;
3969 }
3970 
3971 DEVICE_ATTR_WO(s3_debug);
3972 
3973 #endif
3974 
3975 static int dm_early_init(void *handle)
3976 {
3977 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3978 
3979 	switch (adev->asic_type) {
3980 #if defined(CONFIG_DRM_AMD_DC_SI)
3981 	case CHIP_TAHITI:
3982 	case CHIP_PITCAIRN:
3983 	case CHIP_VERDE:
3984 		adev->mode_info.num_crtc = 6;
3985 		adev->mode_info.num_hpd = 6;
3986 		adev->mode_info.num_dig = 6;
3987 		break;
3988 	case CHIP_OLAND:
3989 		adev->mode_info.num_crtc = 2;
3990 		adev->mode_info.num_hpd = 2;
3991 		adev->mode_info.num_dig = 2;
3992 		break;
3993 #endif
3994 	case CHIP_BONAIRE:
3995 	case CHIP_HAWAII:
3996 		adev->mode_info.num_crtc = 6;
3997 		adev->mode_info.num_hpd = 6;
3998 		adev->mode_info.num_dig = 6;
3999 		break;
4000 	case CHIP_KAVERI:
4001 		adev->mode_info.num_crtc = 4;
4002 		adev->mode_info.num_hpd = 6;
4003 		adev->mode_info.num_dig = 7;
4004 		break;
4005 	case CHIP_KABINI:
4006 	case CHIP_MULLINS:
4007 		adev->mode_info.num_crtc = 2;
4008 		adev->mode_info.num_hpd = 6;
4009 		adev->mode_info.num_dig = 6;
4010 		break;
4011 	case CHIP_FIJI:
4012 	case CHIP_TONGA:
4013 		adev->mode_info.num_crtc = 6;
4014 		adev->mode_info.num_hpd = 6;
4015 		adev->mode_info.num_dig = 7;
4016 		break;
4017 	case CHIP_CARRIZO:
4018 		adev->mode_info.num_crtc = 3;
4019 		adev->mode_info.num_hpd = 6;
4020 		adev->mode_info.num_dig = 9;
4021 		break;
4022 	case CHIP_STONEY:
4023 		adev->mode_info.num_crtc = 2;
4024 		adev->mode_info.num_hpd = 6;
4025 		adev->mode_info.num_dig = 9;
4026 		break;
4027 	case CHIP_POLARIS11:
4028 	case CHIP_POLARIS12:
4029 		adev->mode_info.num_crtc = 5;
4030 		adev->mode_info.num_hpd = 5;
4031 		adev->mode_info.num_dig = 5;
4032 		break;
4033 	case CHIP_POLARIS10:
4034 	case CHIP_VEGAM:
4035 		adev->mode_info.num_crtc = 6;
4036 		adev->mode_info.num_hpd = 6;
4037 		adev->mode_info.num_dig = 6;
4038 		break;
4039 	case CHIP_VEGA10:
4040 	case CHIP_VEGA12:
4041 	case CHIP_VEGA20:
4042 		adev->mode_info.num_crtc = 6;
4043 		adev->mode_info.num_hpd = 6;
4044 		adev->mode_info.num_dig = 6;
4045 		break;
4046 #if defined(CONFIG_DRM_AMD_DC_DCN)
4047 	case CHIP_RAVEN:
4048 	case CHIP_RENOIR:
4049 	case CHIP_VANGOGH:
4050 		adev->mode_info.num_crtc = 4;
4051 		adev->mode_info.num_hpd = 4;
4052 		adev->mode_info.num_dig = 4;
4053 		break;
4054 	case CHIP_NAVI10:
4055 	case CHIP_NAVI12:
4056 	case CHIP_SIENNA_CICHLID:
4057 	case CHIP_NAVY_FLOUNDER:
4058 		adev->mode_info.num_crtc = 6;
4059 		adev->mode_info.num_hpd = 6;
4060 		adev->mode_info.num_dig = 6;
4061 		break;
4062 	case CHIP_YELLOW_CARP:
4063 		adev->mode_info.num_crtc = 4;
4064 		adev->mode_info.num_hpd = 4;
4065 		adev->mode_info.num_dig = 4;
4066 		break;
4067 	case CHIP_NAVI14:
4068 	case CHIP_DIMGREY_CAVEFISH:
4069 		adev->mode_info.num_crtc = 5;
4070 		adev->mode_info.num_hpd = 5;
4071 		adev->mode_info.num_dig = 5;
4072 		break;
4073 	case CHIP_BEIGE_GOBY:
4074 		adev->mode_info.num_crtc = 2;
4075 		adev->mode_info.num_hpd = 2;
4076 		adev->mode_info.num_dig = 2;
4077 		break;
4078 #endif
4079 	default:
4080 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4081 		return -EINVAL;
4082 	}
4083 
4084 	amdgpu_dm_set_irq_funcs(adev);
4085 
4086 	if (adev->mode_info.funcs == NULL)
4087 		adev->mode_info.funcs = &dm_display_funcs;
4088 
4089 	/*
4090 	 * Note: Do NOT change adev->audio_endpt_rreg and
4091 	 * adev->audio_endpt_wreg because they are initialised in
4092 	 * amdgpu_device_init()
4093 	 */
4094 #if defined(CONFIG_DEBUG_KERNEL_DC)
4095 	device_create_file(
4096 		adev_to_drm(adev)->dev,
4097 		&dev_attr_s3_debug);
4098 #endif
4099 
4100 	return 0;
4101 }
4102 
4103 static bool modeset_required(struct drm_crtc_state *crtc_state,
4104 			     struct dc_stream_state *new_stream,
4105 			     struct dc_stream_state *old_stream)
4106 {
4107 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4108 }
4109 
4110 static bool modereset_required(struct drm_crtc_state *crtc_state)
4111 {
4112 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4113 }
4114 
4115 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4116 {
4117 	drm_encoder_cleanup(encoder);
4118 	kfree(encoder);
4119 }
4120 
4121 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4122 	.destroy = amdgpu_dm_encoder_destroy,
4123 };
4124 
4125 
4126 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4127 					 struct drm_framebuffer *fb,
4128 					 int *min_downscale, int *max_upscale)
4129 {
4130 	struct amdgpu_device *adev = drm_to_adev(dev);
4131 	struct dc *dc = adev->dm.dc;
4132 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4133 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4134 
4135 	switch (fb->format->format) {
4136 	case DRM_FORMAT_P010:
4137 	case DRM_FORMAT_NV12:
4138 	case DRM_FORMAT_NV21:
4139 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4140 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4141 		break;
4142 
4143 	case DRM_FORMAT_XRGB16161616F:
4144 	case DRM_FORMAT_ARGB16161616F:
4145 	case DRM_FORMAT_XBGR16161616F:
4146 	case DRM_FORMAT_ABGR16161616F:
4147 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4148 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4149 		break;
4150 
4151 	default:
4152 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4153 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4154 		break;
4155 	}
4156 
4157 	/*
4158 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4159 	 * scaling factor of 1.0 == 1000 units.
4160 	 */
4161 	if (*max_upscale == 1)
4162 		*max_upscale = 1000;
4163 
4164 	if (*min_downscale == 1)
4165 		*min_downscale = 1000;
4166 }
4167 
4168 
4169 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4170 				struct dc_scaling_info *scaling_info)
4171 {
4172 	int scale_w, scale_h, min_downscale, max_upscale;
4173 
4174 	memset(scaling_info, 0, sizeof(*scaling_info));
4175 
4176 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4177 	scaling_info->src_rect.x = state->src_x >> 16;
4178 	scaling_info->src_rect.y = state->src_y >> 16;
4179 
4180 	/*
4181 	 * For reasons we don't (yet) fully understand a non-zero
4182 	 * src_y coordinate into an NV12 buffer can cause a
4183 	 * system hang. To avoid hangs (and maybe be overly cautious)
4184 	 * let's reject both non-zero src_x and src_y.
4185 	 *
4186 	 * We currently know of only one use-case to reproduce a
4187 	 * scenario with non-zero src_x and src_y for NV12, which
4188 	 * is to gesture the YouTube Android app into full screen
4189 	 * on ChromeOS.
4190 	 */
4191 	if (state->fb &&
4192 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4193 	    (scaling_info->src_rect.x != 0 ||
4194 	     scaling_info->src_rect.y != 0))
4195 		return -EINVAL;
4196 
4197 	scaling_info->src_rect.width = state->src_w >> 16;
4198 	if (scaling_info->src_rect.width == 0)
4199 		return -EINVAL;
4200 
4201 	scaling_info->src_rect.height = state->src_h >> 16;
4202 	if (scaling_info->src_rect.height == 0)
4203 		return -EINVAL;
4204 
4205 	scaling_info->dst_rect.x = state->crtc_x;
4206 	scaling_info->dst_rect.y = state->crtc_y;
4207 
4208 	if (state->crtc_w == 0)
4209 		return -EINVAL;
4210 
4211 	scaling_info->dst_rect.width = state->crtc_w;
4212 
4213 	if (state->crtc_h == 0)
4214 		return -EINVAL;
4215 
4216 	scaling_info->dst_rect.height = state->crtc_h;
4217 
4218 	/* DRM doesn't specify clipping on destination output. */
4219 	scaling_info->clip_rect = scaling_info->dst_rect;
4220 
4221 	/* Validate scaling per-format with DC plane caps */
4222 	if (state->plane && state->plane->dev && state->fb) {
4223 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4224 					     &min_downscale, &max_upscale);
4225 	} else {
4226 		min_downscale = 250;
4227 		max_upscale = 16000;
4228 	}
4229 
4230 	scale_w = scaling_info->dst_rect.width * 1000 /
4231 		  scaling_info->src_rect.width;
4232 
4233 	if (scale_w < min_downscale || scale_w > max_upscale)
4234 		return -EINVAL;
4235 
4236 	scale_h = scaling_info->dst_rect.height * 1000 /
4237 		  scaling_info->src_rect.height;
4238 
4239 	if (scale_h < min_downscale || scale_h > max_upscale)
4240 		return -EINVAL;
4241 
4242 	/*
4243 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4244 	 * assume reasonable defaults based on the format.
4245 	 */
4246 
4247 	return 0;
4248 }
4249 
4250 static void
4251 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4252 				 uint64_t tiling_flags)
4253 {
4254 	/* Fill GFX8 params */
4255 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4256 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4257 
4258 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4259 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4260 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4261 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4262 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4263 
4264 		/* XXX fix me for VI */
4265 		tiling_info->gfx8.num_banks = num_banks;
4266 		tiling_info->gfx8.array_mode =
4267 				DC_ARRAY_2D_TILED_THIN1;
4268 		tiling_info->gfx8.tile_split = tile_split;
4269 		tiling_info->gfx8.bank_width = bankw;
4270 		tiling_info->gfx8.bank_height = bankh;
4271 		tiling_info->gfx8.tile_aspect = mtaspect;
4272 		tiling_info->gfx8.tile_mode =
4273 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4274 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4275 			== DC_ARRAY_1D_TILED_THIN1) {
4276 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4277 	}
4278 
4279 	tiling_info->gfx8.pipe_config =
4280 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4281 }
4282 
4283 static void
4284 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4285 				  union dc_tiling_info *tiling_info)
4286 {
4287 	tiling_info->gfx9.num_pipes =
4288 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4289 	tiling_info->gfx9.num_banks =
4290 		adev->gfx.config.gb_addr_config_fields.num_banks;
4291 	tiling_info->gfx9.pipe_interleave =
4292 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4293 	tiling_info->gfx9.num_shader_engines =
4294 		adev->gfx.config.gb_addr_config_fields.num_se;
4295 	tiling_info->gfx9.max_compressed_frags =
4296 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4297 	tiling_info->gfx9.num_rb_per_se =
4298 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4299 	tiling_info->gfx9.shaderEnable = 1;
4300 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4301 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4302 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4303 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4304 	    adev->asic_type == CHIP_YELLOW_CARP ||
4305 	    adev->asic_type == CHIP_VANGOGH)
4306 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4307 }
4308 
4309 static int
4310 validate_dcc(struct amdgpu_device *adev,
4311 	     const enum surface_pixel_format format,
4312 	     const enum dc_rotation_angle rotation,
4313 	     const union dc_tiling_info *tiling_info,
4314 	     const struct dc_plane_dcc_param *dcc,
4315 	     const struct dc_plane_address *address,
4316 	     const struct plane_size *plane_size)
4317 {
4318 	struct dc *dc = adev->dm.dc;
4319 	struct dc_dcc_surface_param input;
4320 	struct dc_surface_dcc_cap output;
4321 
4322 	memset(&input, 0, sizeof(input));
4323 	memset(&output, 0, sizeof(output));
4324 
4325 	if (!dcc->enable)
4326 		return 0;
4327 
4328 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4329 	    !dc->cap_funcs.get_dcc_compression_cap)
4330 		return -EINVAL;
4331 
4332 	input.format = format;
4333 	input.surface_size.width = plane_size->surface_size.width;
4334 	input.surface_size.height = plane_size->surface_size.height;
4335 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4336 
4337 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4338 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4339 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4340 		input.scan = SCAN_DIRECTION_VERTICAL;
4341 
4342 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4343 		return -EINVAL;
4344 
4345 	if (!output.capable)
4346 		return -EINVAL;
4347 
4348 	if (dcc->independent_64b_blks == 0 &&
4349 	    output.grph.rgb.independent_64b_blks != 0)
4350 		return -EINVAL;
4351 
4352 	return 0;
4353 }
4354 
4355 static bool
4356 modifier_has_dcc(uint64_t modifier)
4357 {
4358 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4359 }
4360 
4361 static unsigned
4362 modifier_gfx9_swizzle_mode(uint64_t modifier)
4363 {
4364 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4365 		return 0;
4366 
4367 	return AMD_FMT_MOD_GET(TILE, modifier);
4368 }
4369 
4370 static const struct drm_format_info *
4371 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4372 {
4373 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4374 }
4375 
4376 static void
4377 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4378 				    union dc_tiling_info *tiling_info,
4379 				    uint64_t modifier)
4380 {
4381 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4382 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4383 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4384 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4385 
4386 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4387 
4388 	if (!IS_AMD_FMT_MOD(modifier))
4389 		return;
4390 
4391 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4392 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4393 
4394 	if (adev->family >= AMDGPU_FAMILY_NV) {
4395 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4396 	} else {
4397 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4398 
4399 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4400 	}
4401 }
4402 
4403 enum dm_micro_swizzle {
4404 	MICRO_SWIZZLE_Z = 0,
4405 	MICRO_SWIZZLE_S = 1,
4406 	MICRO_SWIZZLE_D = 2,
4407 	MICRO_SWIZZLE_R = 3
4408 };
4409 
4410 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4411 					  uint32_t format,
4412 					  uint64_t modifier)
4413 {
4414 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4415 	const struct drm_format_info *info = drm_format_info(format);
4416 	int i;
4417 
4418 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4419 
4420 	if (!info)
4421 		return false;
4422 
4423 	/*
4424 	 * We always have to allow these modifiers:
4425 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4426 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4427 	 */
4428 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4429 	    modifier == DRM_FORMAT_MOD_INVALID) {
4430 		return true;
4431 	}
4432 
4433 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4434 	for (i = 0; i < plane->modifier_count; i++) {
4435 		if (modifier == plane->modifiers[i])
4436 			break;
4437 	}
4438 	if (i == plane->modifier_count)
4439 		return false;
4440 
4441 	/*
4442 	 * For D swizzle the canonical modifier depends on the bpp, so check
4443 	 * it here.
4444 	 */
4445 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4446 	    adev->family >= AMDGPU_FAMILY_NV) {
4447 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4448 			return false;
4449 	}
4450 
4451 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4452 	    info->cpp[0] < 8)
4453 		return false;
4454 
4455 	if (modifier_has_dcc(modifier)) {
4456 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4457 		if (info->cpp[0] != 4)
4458 			return false;
4459 		/* We support multi-planar formats, but not when combined with
4460 		 * additional DCC metadata planes. */
4461 		if (info->num_planes > 1)
4462 			return false;
4463 	}
4464 
4465 	return true;
4466 }
4467 
4468 static void
4469 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4470 {
4471 	if (!*mods)
4472 		return;
4473 
4474 	if (*cap - *size < 1) {
4475 		uint64_t new_cap = *cap * 2;
4476 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4477 
4478 		if (!new_mods) {
4479 			kfree(*mods);
4480 			*mods = NULL;
4481 			return;
4482 		}
4483 
4484 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4485 		kfree(*mods);
4486 		*mods = new_mods;
4487 		*cap = new_cap;
4488 	}
4489 
4490 	(*mods)[*size] = mod;
4491 	*size += 1;
4492 }
4493 
4494 static void
4495 add_gfx9_modifiers(const struct amdgpu_device *adev,
4496 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4497 {
4498 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4499 	int pipe_xor_bits = min(8, pipes +
4500 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4501 	int bank_xor_bits = min(8 - pipe_xor_bits,
4502 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4503 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4504 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4505 
4506 
4507 	if (adev->family == AMDGPU_FAMILY_RV) {
4508 		/* Raven2 and later */
4509 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4510 
4511 		/*
4512 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4513 		 * doesn't support _D on DCN
4514 		 */
4515 
4516 		if (has_constant_encode) {
4517 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4518 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4519 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4520 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4521 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4522 				    AMD_FMT_MOD_SET(DCC, 1) |
4523 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4524 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4525 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4526 		}
4527 
4528 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4529 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4530 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4531 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4532 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4533 			    AMD_FMT_MOD_SET(DCC, 1) |
4534 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4535 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4536 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4537 
4538 		if (has_constant_encode) {
4539 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4540 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4541 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4542 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4543 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4544 				    AMD_FMT_MOD_SET(DCC, 1) |
4545 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4546 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4547 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4548 
4549 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4550 				    AMD_FMT_MOD_SET(RB, rb) |
4551 				    AMD_FMT_MOD_SET(PIPE, pipes));
4552 		}
4553 
4554 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4555 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4556 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4557 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4558 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4559 			    AMD_FMT_MOD_SET(DCC, 1) |
4560 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4561 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4562 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4563 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4564 			    AMD_FMT_MOD_SET(RB, rb) |
4565 			    AMD_FMT_MOD_SET(PIPE, pipes));
4566 	}
4567 
4568 	/*
4569 	 * Only supported for 64bpp on Raven, will be filtered on format in
4570 	 * dm_plane_format_mod_supported.
4571 	 */
4572 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4573 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4574 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4575 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4576 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4577 
4578 	if (adev->family == AMDGPU_FAMILY_RV) {
4579 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4580 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4581 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4582 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4583 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4584 	}
4585 
4586 	/*
4587 	 * Only supported for 64bpp on Raven, will be filtered on format in
4588 	 * dm_plane_format_mod_supported.
4589 	 */
4590 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4591 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4592 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4593 
4594 	if (adev->family == AMDGPU_FAMILY_RV) {
4595 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4596 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4597 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4598 	}
4599 }
4600 
4601 static void
4602 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4603 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4604 {
4605 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4606 
4607 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4609 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4610 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4611 		    AMD_FMT_MOD_SET(DCC, 1) |
4612 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4613 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4614 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4615 
4616 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4617 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4618 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4619 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4620 		    AMD_FMT_MOD_SET(DCC, 1) |
4621 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4622 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4623 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4624 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4625 
4626 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4627 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4628 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4629 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4630 
4631 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4632 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4633 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4634 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4635 
4636 
4637 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4638 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4639 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4640 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4641 
4642 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4643 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4644 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4645 }
4646 
4647 static void
4648 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4649 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4650 {
4651 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4652 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4653 
4654 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4655 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4656 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4657 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4658 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4659 		    AMD_FMT_MOD_SET(DCC, 1) |
4660 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4661 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4662 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4663 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4664 
4665 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4666 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4667 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4668 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4669 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4670 		    AMD_FMT_MOD_SET(DCC, 1) |
4671 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4672 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4673 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4674 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4675 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4676 
4677 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4678 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4679 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4680 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4681 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4682 
4683 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4684 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4685 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4686 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4687 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4688 
4689 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4690 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4691 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4692 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4693 
4694 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4695 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4696 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4697 }
4698 
4699 static int
4700 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4701 {
4702 	uint64_t size = 0, capacity = 128;
4703 	*mods = NULL;
4704 
4705 	/* We have not hooked up any pre-GFX9 modifiers. */
4706 	if (adev->family < AMDGPU_FAMILY_AI)
4707 		return 0;
4708 
4709 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4710 
4711 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4712 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4713 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4714 		return *mods ? 0 : -ENOMEM;
4715 	}
4716 
4717 	switch (adev->family) {
4718 	case AMDGPU_FAMILY_AI:
4719 	case AMDGPU_FAMILY_RV:
4720 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4721 		break;
4722 	case AMDGPU_FAMILY_NV:
4723 	case AMDGPU_FAMILY_VGH:
4724 	case AMDGPU_FAMILY_YC:
4725 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4726 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4727 		else
4728 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4729 		break;
4730 	}
4731 
4732 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4733 
4734 	/* INVALID marks the end of the list. */
4735 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4736 
4737 	if (!*mods)
4738 		return -ENOMEM;
4739 
4740 	return 0;
4741 }
4742 
4743 static int
4744 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4745 					  const struct amdgpu_framebuffer *afb,
4746 					  const enum surface_pixel_format format,
4747 					  const enum dc_rotation_angle rotation,
4748 					  const struct plane_size *plane_size,
4749 					  union dc_tiling_info *tiling_info,
4750 					  struct dc_plane_dcc_param *dcc,
4751 					  struct dc_plane_address *address,
4752 					  const bool force_disable_dcc)
4753 {
4754 	const uint64_t modifier = afb->base.modifier;
4755 	int ret = 0;
4756 
4757 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4758 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4759 
4760 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4761 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4762 
4763 		dcc->enable = 1;
4764 		dcc->meta_pitch = afb->base.pitches[1];
4765 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4766 
4767 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4768 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4769 	}
4770 
4771 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4772 	if (ret)
4773 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
4774 
4775 	return ret;
4776 }
4777 
4778 static int
4779 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4780 			     const struct amdgpu_framebuffer *afb,
4781 			     const enum surface_pixel_format format,
4782 			     const enum dc_rotation_angle rotation,
4783 			     const uint64_t tiling_flags,
4784 			     union dc_tiling_info *tiling_info,
4785 			     struct plane_size *plane_size,
4786 			     struct dc_plane_dcc_param *dcc,
4787 			     struct dc_plane_address *address,
4788 			     bool tmz_surface,
4789 			     bool force_disable_dcc)
4790 {
4791 	const struct drm_framebuffer *fb = &afb->base;
4792 	int ret;
4793 
4794 	memset(tiling_info, 0, sizeof(*tiling_info));
4795 	memset(plane_size, 0, sizeof(*plane_size));
4796 	memset(dcc, 0, sizeof(*dcc));
4797 	memset(address, 0, sizeof(*address));
4798 
4799 	address->tmz_surface = tmz_surface;
4800 
4801 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4802 		uint64_t addr = afb->address + fb->offsets[0];
4803 
4804 		plane_size->surface_size.x = 0;
4805 		plane_size->surface_size.y = 0;
4806 		plane_size->surface_size.width = fb->width;
4807 		plane_size->surface_size.height = fb->height;
4808 		plane_size->surface_pitch =
4809 			fb->pitches[0] / fb->format->cpp[0];
4810 
4811 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4812 		address->grph.addr.low_part = lower_32_bits(addr);
4813 		address->grph.addr.high_part = upper_32_bits(addr);
4814 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4815 		uint64_t luma_addr = afb->address + fb->offsets[0];
4816 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4817 
4818 		plane_size->surface_size.x = 0;
4819 		plane_size->surface_size.y = 0;
4820 		plane_size->surface_size.width = fb->width;
4821 		plane_size->surface_size.height = fb->height;
4822 		plane_size->surface_pitch =
4823 			fb->pitches[0] / fb->format->cpp[0];
4824 
4825 		plane_size->chroma_size.x = 0;
4826 		plane_size->chroma_size.y = 0;
4827 		/* TODO: set these based on surface format */
4828 		plane_size->chroma_size.width = fb->width / 2;
4829 		plane_size->chroma_size.height = fb->height / 2;
4830 
4831 		plane_size->chroma_pitch =
4832 			fb->pitches[1] / fb->format->cpp[1];
4833 
4834 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4835 		address->video_progressive.luma_addr.low_part =
4836 			lower_32_bits(luma_addr);
4837 		address->video_progressive.luma_addr.high_part =
4838 			upper_32_bits(luma_addr);
4839 		address->video_progressive.chroma_addr.low_part =
4840 			lower_32_bits(chroma_addr);
4841 		address->video_progressive.chroma_addr.high_part =
4842 			upper_32_bits(chroma_addr);
4843 	}
4844 
4845 	if (adev->family >= AMDGPU_FAMILY_AI) {
4846 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4847 								rotation, plane_size,
4848 								tiling_info, dcc,
4849 								address,
4850 								force_disable_dcc);
4851 		if (ret)
4852 			return ret;
4853 	} else {
4854 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4855 	}
4856 
4857 	return 0;
4858 }
4859 
4860 static void
4861 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4862 			       bool *per_pixel_alpha, bool *global_alpha,
4863 			       int *global_alpha_value)
4864 {
4865 	*per_pixel_alpha = false;
4866 	*global_alpha = false;
4867 	*global_alpha_value = 0xff;
4868 
4869 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4870 		return;
4871 
4872 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4873 		static const uint32_t alpha_formats[] = {
4874 			DRM_FORMAT_ARGB8888,
4875 			DRM_FORMAT_RGBA8888,
4876 			DRM_FORMAT_ABGR8888,
4877 		};
4878 		uint32_t format = plane_state->fb->format->format;
4879 		unsigned int i;
4880 
4881 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4882 			if (format == alpha_formats[i]) {
4883 				*per_pixel_alpha = true;
4884 				break;
4885 			}
4886 		}
4887 	}
4888 
4889 	if (plane_state->alpha < 0xffff) {
4890 		*global_alpha = true;
4891 		*global_alpha_value = plane_state->alpha >> 8;
4892 	}
4893 }
4894 
4895 static int
4896 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4897 			    const enum surface_pixel_format format,
4898 			    enum dc_color_space *color_space)
4899 {
4900 	bool full_range;
4901 
4902 	*color_space = COLOR_SPACE_SRGB;
4903 
4904 	/* DRM color properties only affect non-RGB formats. */
4905 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4906 		return 0;
4907 
4908 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4909 
4910 	switch (plane_state->color_encoding) {
4911 	case DRM_COLOR_YCBCR_BT601:
4912 		if (full_range)
4913 			*color_space = COLOR_SPACE_YCBCR601;
4914 		else
4915 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4916 		break;
4917 
4918 	case DRM_COLOR_YCBCR_BT709:
4919 		if (full_range)
4920 			*color_space = COLOR_SPACE_YCBCR709;
4921 		else
4922 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4923 		break;
4924 
4925 	case DRM_COLOR_YCBCR_BT2020:
4926 		if (full_range)
4927 			*color_space = COLOR_SPACE_2020_YCBCR;
4928 		else
4929 			return -EINVAL;
4930 		break;
4931 
4932 	default:
4933 		return -EINVAL;
4934 	}
4935 
4936 	return 0;
4937 }
4938 
4939 static int
4940 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4941 			    const struct drm_plane_state *plane_state,
4942 			    const uint64_t tiling_flags,
4943 			    struct dc_plane_info *plane_info,
4944 			    struct dc_plane_address *address,
4945 			    bool tmz_surface,
4946 			    bool force_disable_dcc)
4947 {
4948 	const struct drm_framebuffer *fb = plane_state->fb;
4949 	const struct amdgpu_framebuffer *afb =
4950 		to_amdgpu_framebuffer(plane_state->fb);
4951 	int ret;
4952 
4953 	memset(plane_info, 0, sizeof(*plane_info));
4954 
4955 	switch (fb->format->format) {
4956 	case DRM_FORMAT_C8:
4957 		plane_info->format =
4958 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4959 		break;
4960 	case DRM_FORMAT_RGB565:
4961 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4962 		break;
4963 	case DRM_FORMAT_XRGB8888:
4964 	case DRM_FORMAT_ARGB8888:
4965 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4966 		break;
4967 	case DRM_FORMAT_XRGB2101010:
4968 	case DRM_FORMAT_ARGB2101010:
4969 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4970 		break;
4971 	case DRM_FORMAT_XBGR2101010:
4972 	case DRM_FORMAT_ABGR2101010:
4973 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4974 		break;
4975 	case DRM_FORMAT_XBGR8888:
4976 	case DRM_FORMAT_ABGR8888:
4977 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4978 		break;
4979 	case DRM_FORMAT_NV21:
4980 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4981 		break;
4982 	case DRM_FORMAT_NV12:
4983 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4984 		break;
4985 	case DRM_FORMAT_P010:
4986 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4987 		break;
4988 	case DRM_FORMAT_XRGB16161616F:
4989 	case DRM_FORMAT_ARGB16161616F:
4990 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4991 		break;
4992 	case DRM_FORMAT_XBGR16161616F:
4993 	case DRM_FORMAT_ABGR16161616F:
4994 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4995 		break;
4996 	case DRM_FORMAT_XRGB16161616:
4997 	case DRM_FORMAT_ARGB16161616:
4998 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4999 		break;
5000 	case DRM_FORMAT_XBGR16161616:
5001 	case DRM_FORMAT_ABGR16161616:
5002 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5003 		break;
5004 	default:
5005 		DRM_ERROR(
5006 			"Unsupported screen format %p4cc\n",
5007 			&fb->format->format);
5008 		return -EINVAL;
5009 	}
5010 
5011 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5012 	case DRM_MODE_ROTATE_0:
5013 		plane_info->rotation = ROTATION_ANGLE_0;
5014 		break;
5015 	case DRM_MODE_ROTATE_90:
5016 		plane_info->rotation = ROTATION_ANGLE_90;
5017 		break;
5018 	case DRM_MODE_ROTATE_180:
5019 		plane_info->rotation = ROTATION_ANGLE_180;
5020 		break;
5021 	case DRM_MODE_ROTATE_270:
5022 		plane_info->rotation = ROTATION_ANGLE_270;
5023 		break;
5024 	default:
5025 		plane_info->rotation = ROTATION_ANGLE_0;
5026 		break;
5027 	}
5028 
5029 	plane_info->visible = true;
5030 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5031 
5032 	plane_info->layer_index = 0;
5033 
5034 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5035 					  &plane_info->color_space);
5036 	if (ret)
5037 		return ret;
5038 
5039 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5040 					   plane_info->rotation, tiling_flags,
5041 					   &plane_info->tiling_info,
5042 					   &plane_info->plane_size,
5043 					   &plane_info->dcc, address, tmz_surface,
5044 					   force_disable_dcc);
5045 	if (ret)
5046 		return ret;
5047 
5048 	fill_blending_from_plane_state(
5049 		plane_state, &plane_info->per_pixel_alpha,
5050 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5051 
5052 	return 0;
5053 }
5054 
5055 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5056 				    struct dc_plane_state *dc_plane_state,
5057 				    struct drm_plane_state *plane_state,
5058 				    struct drm_crtc_state *crtc_state)
5059 {
5060 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5061 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5062 	struct dc_scaling_info scaling_info;
5063 	struct dc_plane_info plane_info;
5064 	int ret;
5065 	bool force_disable_dcc = false;
5066 
5067 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5068 	if (ret)
5069 		return ret;
5070 
5071 	dc_plane_state->src_rect = scaling_info.src_rect;
5072 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5073 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5074 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5075 
5076 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5077 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5078 					  afb->tiling_flags,
5079 					  &plane_info,
5080 					  &dc_plane_state->address,
5081 					  afb->tmz_surface,
5082 					  force_disable_dcc);
5083 	if (ret)
5084 		return ret;
5085 
5086 	dc_plane_state->format = plane_info.format;
5087 	dc_plane_state->color_space = plane_info.color_space;
5088 	dc_plane_state->format = plane_info.format;
5089 	dc_plane_state->plane_size = plane_info.plane_size;
5090 	dc_plane_state->rotation = plane_info.rotation;
5091 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5092 	dc_plane_state->stereo_format = plane_info.stereo_format;
5093 	dc_plane_state->tiling_info = plane_info.tiling_info;
5094 	dc_plane_state->visible = plane_info.visible;
5095 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5096 	dc_plane_state->global_alpha = plane_info.global_alpha;
5097 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5098 	dc_plane_state->dcc = plane_info.dcc;
5099 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5100 	dc_plane_state->flip_int_enabled = true;
5101 
5102 	/*
5103 	 * Always set input transfer function, since plane state is refreshed
5104 	 * every time.
5105 	 */
5106 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5107 	if (ret)
5108 		return ret;
5109 
5110 	return 0;
5111 }
5112 
5113 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5114 					   const struct dm_connector_state *dm_state,
5115 					   struct dc_stream_state *stream)
5116 {
5117 	enum amdgpu_rmx_type rmx_type;
5118 
5119 	struct rect src = { 0 }; /* viewport in composition space*/
5120 	struct rect dst = { 0 }; /* stream addressable area */
5121 
5122 	/* no mode. nothing to be done */
5123 	if (!mode)
5124 		return;
5125 
5126 	/* Full screen scaling by default */
5127 	src.width = mode->hdisplay;
5128 	src.height = mode->vdisplay;
5129 	dst.width = stream->timing.h_addressable;
5130 	dst.height = stream->timing.v_addressable;
5131 
5132 	if (dm_state) {
5133 		rmx_type = dm_state->scaling;
5134 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5135 			if (src.width * dst.height <
5136 					src.height * dst.width) {
5137 				/* height needs less upscaling/more downscaling */
5138 				dst.width = src.width *
5139 						dst.height / src.height;
5140 			} else {
5141 				/* width needs less upscaling/more downscaling */
5142 				dst.height = src.height *
5143 						dst.width / src.width;
5144 			}
5145 		} else if (rmx_type == RMX_CENTER) {
5146 			dst = src;
5147 		}
5148 
5149 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5150 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5151 
5152 		if (dm_state->underscan_enable) {
5153 			dst.x += dm_state->underscan_hborder / 2;
5154 			dst.y += dm_state->underscan_vborder / 2;
5155 			dst.width -= dm_state->underscan_hborder;
5156 			dst.height -= dm_state->underscan_vborder;
5157 		}
5158 	}
5159 
5160 	stream->src = src;
5161 	stream->dst = dst;
5162 
5163 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5164 		      dst.x, dst.y, dst.width, dst.height);
5165 
5166 }
5167 
5168 static enum dc_color_depth
5169 convert_color_depth_from_display_info(const struct drm_connector *connector,
5170 				      bool is_y420, int requested_bpc)
5171 {
5172 	uint8_t bpc;
5173 
5174 	if (is_y420) {
5175 		bpc = 8;
5176 
5177 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5178 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5179 			bpc = 16;
5180 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5181 			bpc = 12;
5182 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5183 			bpc = 10;
5184 	} else {
5185 		bpc = (uint8_t)connector->display_info.bpc;
5186 		/* Assume 8 bpc by default if no bpc is specified. */
5187 		bpc = bpc ? bpc : 8;
5188 	}
5189 
5190 	if (requested_bpc > 0) {
5191 		/*
5192 		 * Cap display bpc based on the user requested value.
5193 		 *
5194 		 * The value for state->max_bpc may not correctly updated
5195 		 * depending on when the connector gets added to the state
5196 		 * or if this was called outside of atomic check, so it
5197 		 * can't be used directly.
5198 		 */
5199 		bpc = min_t(u8, bpc, requested_bpc);
5200 
5201 		/* Round down to the nearest even number. */
5202 		bpc = bpc - (bpc & 1);
5203 	}
5204 
5205 	switch (bpc) {
5206 	case 0:
5207 		/*
5208 		 * Temporary Work around, DRM doesn't parse color depth for
5209 		 * EDID revision before 1.4
5210 		 * TODO: Fix edid parsing
5211 		 */
5212 		return COLOR_DEPTH_888;
5213 	case 6:
5214 		return COLOR_DEPTH_666;
5215 	case 8:
5216 		return COLOR_DEPTH_888;
5217 	case 10:
5218 		return COLOR_DEPTH_101010;
5219 	case 12:
5220 		return COLOR_DEPTH_121212;
5221 	case 14:
5222 		return COLOR_DEPTH_141414;
5223 	case 16:
5224 		return COLOR_DEPTH_161616;
5225 	default:
5226 		return COLOR_DEPTH_UNDEFINED;
5227 	}
5228 }
5229 
5230 static enum dc_aspect_ratio
5231 get_aspect_ratio(const struct drm_display_mode *mode_in)
5232 {
5233 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5234 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5235 }
5236 
5237 static enum dc_color_space
5238 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5239 {
5240 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5241 
5242 	switch (dc_crtc_timing->pixel_encoding)	{
5243 	case PIXEL_ENCODING_YCBCR422:
5244 	case PIXEL_ENCODING_YCBCR444:
5245 	case PIXEL_ENCODING_YCBCR420:
5246 	{
5247 		/*
5248 		 * 27030khz is the separation point between HDTV and SDTV
5249 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5250 		 * respectively
5251 		 */
5252 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5253 			if (dc_crtc_timing->flags.Y_ONLY)
5254 				color_space =
5255 					COLOR_SPACE_YCBCR709_LIMITED;
5256 			else
5257 				color_space = COLOR_SPACE_YCBCR709;
5258 		} else {
5259 			if (dc_crtc_timing->flags.Y_ONLY)
5260 				color_space =
5261 					COLOR_SPACE_YCBCR601_LIMITED;
5262 			else
5263 				color_space = COLOR_SPACE_YCBCR601;
5264 		}
5265 
5266 	}
5267 	break;
5268 	case PIXEL_ENCODING_RGB:
5269 		color_space = COLOR_SPACE_SRGB;
5270 		break;
5271 
5272 	default:
5273 		WARN_ON(1);
5274 		break;
5275 	}
5276 
5277 	return color_space;
5278 }
5279 
5280 static bool adjust_colour_depth_from_display_info(
5281 	struct dc_crtc_timing *timing_out,
5282 	const struct drm_display_info *info)
5283 {
5284 	enum dc_color_depth depth = timing_out->display_color_depth;
5285 	int normalized_clk;
5286 	do {
5287 		normalized_clk = timing_out->pix_clk_100hz / 10;
5288 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5289 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5290 			normalized_clk /= 2;
5291 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5292 		switch (depth) {
5293 		case COLOR_DEPTH_888:
5294 			break;
5295 		case COLOR_DEPTH_101010:
5296 			normalized_clk = (normalized_clk * 30) / 24;
5297 			break;
5298 		case COLOR_DEPTH_121212:
5299 			normalized_clk = (normalized_clk * 36) / 24;
5300 			break;
5301 		case COLOR_DEPTH_161616:
5302 			normalized_clk = (normalized_clk * 48) / 24;
5303 			break;
5304 		default:
5305 			/* The above depths are the only ones valid for HDMI. */
5306 			return false;
5307 		}
5308 		if (normalized_clk <= info->max_tmds_clock) {
5309 			timing_out->display_color_depth = depth;
5310 			return true;
5311 		}
5312 	} while (--depth > COLOR_DEPTH_666);
5313 	return false;
5314 }
5315 
5316 static void fill_stream_properties_from_drm_display_mode(
5317 	struct dc_stream_state *stream,
5318 	const struct drm_display_mode *mode_in,
5319 	const struct drm_connector *connector,
5320 	const struct drm_connector_state *connector_state,
5321 	const struct dc_stream_state *old_stream,
5322 	int requested_bpc)
5323 {
5324 	struct dc_crtc_timing *timing_out = &stream->timing;
5325 	const struct drm_display_info *info = &connector->display_info;
5326 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5327 	struct hdmi_vendor_infoframe hv_frame;
5328 	struct hdmi_avi_infoframe avi_frame;
5329 
5330 	memset(&hv_frame, 0, sizeof(hv_frame));
5331 	memset(&avi_frame, 0, sizeof(avi_frame));
5332 
5333 	timing_out->h_border_left = 0;
5334 	timing_out->h_border_right = 0;
5335 	timing_out->v_border_top = 0;
5336 	timing_out->v_border_bottom = 0;
5337 	/* TODO: un-hardcode */
5338 	if (drm_mode_is_420_only(info, mode_in)
5339 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5340 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5341 	else if (drm_mode_is_420_also(info, mode_in)
5342 			&& aconnector->force_yuv420_output)
5343 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5344 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5345 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5346 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5347 	else
5348 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5349 
5350 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5351 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5352 		connector,
5353 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5354 		requested_bpc);
5355 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5356 	timing_out->hdmi_vic = 0;
5357 
5358 	if(old_stream) {
5359 		timing_out->vic = old_stream->timing.vic;
5360 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5361 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5362 	} else {
5363 		timing_out->vic = drm_match_cea_mode(mode_in);
5364 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5365 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5366 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5367 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5368 	}
5369 
5370 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5371 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5372 		timing_out->vic = avi_frame.video_code;
5373 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5374 		timing_out->hdmi_vic = hv_frame.vic;
5375 	}
5376 
5377 	if (is_freesync_video_mode(mode_in, aconnector)) {
5378 		timing_out->h_addressable = mode_in->hdisplay;
5379 		timing_out->h_total = mode_in->htotal;
5380 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5381 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5382 		timing_out->v_total = mode_in->vtotal;
5383 		timing_out->v_addressable = mode_in->vdisplay;
5384 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5385 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5386 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5387 	} else {
5388 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5389 		timing_out->h_total = mode_in->crtc_htotal;
5390 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5391 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5392 		timing_out->v_total = mode_in->crtc_vtotal;
5393 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5394 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5395 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5396 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5397 	}
5398 
5399 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5400 
5401 	stream->output_color_space = get_output_color_space(timing_out);
5402 
5403 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5404 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5405 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5406 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5407 		    drm_mode_is_420_also(info, mode_in) &&
5408 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5409 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5410 			adjust_colour_depth_from_display_info(timing_out, info);
5411 		}
5412 	}
5413 }
5414 
5415 static void fill_audio_info(struct audio_info *audio_info,
5416 			    const struct drm_connector *drm_connector,
5417 			    const struct dc_sink *dc_sink)
5418 {
5419 	int i = 0;
5420 	int cea_revision = 0;
5421 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5422 
5423 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5424 	audio_info->product_id = edid_caps->product_id;
5425 
5426 	cea_revision = drm_connector->display_info.cea_rev;
5427 
5428 	strscpy(audio_info->display_name,
5429 		edid_caps->display_name,
5430 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5431 
5432 	if (cea_revision >= 3) {
5433 		audio_info->mode_count = edid_caps->audio_mode_count;
5434 
5435 		for (i = 0; i < audio_info->mode_count; ++i) {
5436 			audio_info->modes[i].format_code =
5437 					(enum audio_format_code)
5438 					(edid_caps->audio_modes[i].format_code);
5439 			audio_info->modes[i].channel_count =
5440 					edid_caps->audio_modes[i].channel_count;
5441 			audio_info->modes[i].sample_rates.all =
5442 					edid_caps->audio_modes[i].sample_rate;
5443 			audio_info->modes[i].sample_size =
5444 					edid_caps->audio_modes[i].sample_size;
5445 		}
5446 	}
5447 
5448 	audio_info->flags.all = edid_caps->speaker_flags;
5449 
5450 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5451 	if (drm_connector->latency_present[0]) {
5452 		audio_info->video_latency = drm_connector->video_latency[0];
5453 		audio_info->audio_latency = drm_connector->audio_latency[0];
5454 	}
5455 
5456 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5457 
5458 }
5459 
5460 static void
5461 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5462 				      struct drm_display_mode *dst_mode)
5463 {
5464 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5465 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5466 	dst_mode->crtc_clock = src_mode->crtc_clock;
5467 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5468 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5469 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5470 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5471 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5472 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5473 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5474 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5475 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5476 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5477 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5478 }
5479 
5480 static void
5481 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5482 					const struct drm_display_mode *native_mode,
5483 					bool scale_enabled)
5484 {
5485 	if (scale_enabled) {
5486 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5487 	} else if (native_mode->clock == drm_mode->clock &&
5488 			native_mode->htotal == drm_mode->htotal &&
5489 			native_mode->vtotal == drm_mode->vtotal) {
5490 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5491 	} else {
5492 		/* no scaling nor amdgpu inserted, no need to patch */
5493 	}
5494 }
5495 
5496 static struct dc_sink *
5497 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5498 {
5499 	struct dc_sink_init_data sink_init_data = { 0 };
5500 	struct dc_sink *sink = NULL;
5501 	sink_init_data.link = aconnector->dc_link;
5502 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5503 
5504 	sink = dc_sink_create(&sink_init_data);
5505 	if (!sink) {
5506 		DRM_ERROR("Failed to create sink!\n");
5507 		return NULL;
5508 	}
5509 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5510 
5511 	return sink;
5512 }
5513 
5514 static void set_multisync_trigger_params(
5515 		struct dc_stream_state *stream)
5516 {
5517 	struct dc_stream_state *master = NULL;
5518 
5519 	if (stream->triggered_crtc_reset.enabled) {
5520 		master = stream->triggered_crtc_reset.event_source;
5521 		stream->triggered_crtc_reset.event =
5522 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5523 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5524 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5525 	}
5526 }
5527 
5528 static void set_master_stream(struct dc_stream_state *stream_set[],
5529 			      int stream_count)
5530 {
5531 	int j, highest_rfr = 0, master_stream = 0;
5532 
5533 	for (j = 0;  j < stream_count; j++) {
5534 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5535 			int refresh_rate = 0;
5536 
5537 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5538 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5539 			if (refresh_rate > highest_rfr) {
5540 				highest_rfr = refresh_rate;
5541 				master_stream = j;
5542 			}
5543 		}
5544 	}
5545 	for (j = 0;  j < stream_count; j++) {
5546 		if (stream_set[j])
5547 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5548 	}
5549 }
5550 
5551 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5552 {
5553 	int i = 0;
5554 	struct dc_stream_state *stream;
5555 
5556 	if (context->stream_count < 2)
5557 		return;
5558 	for (i = 0; i < context->stream_count ; i++) {
5559 		if (!context->streams[i])
5560 			continue;
5561 		/*
5562 		 * TODO: add a function to read AMD VSDB bits and set
5563 		 * crtc_sync_master.multi_sync_enabled flag
5564 		 * For now it's set to false
5565 		 */
5566 	}
5567 
5568 	set_master_stream(context->streams, context->stream_count);
5569 
5570 	for (i = 0; i < context->stream_count ; i++) {
5571 		stream = context->streams[i];
5572 
5573 		if (!stream)
5574 			continue;
5575 
5576 		set_multisync_trigger_params(stream);
5577 	}
5578 }
5579 
5580 #if defined(CONFIG_DRM_AMD_DC_DCN)
5581 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5582 							struct dc_sink *sink, struct dc_stream_state *stream,
5583 							struct dsc_dec_dpcd_caps *dsc_caps)
5584 {
5585 	stream->timing.flags.DSC = 0;
5586 
5587 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5588 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5589 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5590 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5591 				      dsc_caps);
5592 	}
5593 }
5594 
5595 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5596 										struct dc_sink *sink, struct dc_stream_state *stream,
5597 										struct dsc_dec_dpcd_caps *dsc_caps)
5598 {
5599 	struct drm_connector *drm_connector = &aconnector->base;
5600 	uint32_t link_bandwidth_kbps;
5601 
5602 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5603 							dc_link_get_link_cap(aconnector->dc_link));
5604 	/* Set DSC policy according to dsc_clock_en */
5605 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5606 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5607 
5608 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5609 
5610 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5611 						dsc_caps,
5612 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5613 						0,
5614 						link_bandwidth_kbps,
5615 						&stream->timing,
5616 						&stream->timing.dsc_cfg)) {
5617 			stream->timing.flags.DSC = 1;
5618 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5619 		}
5620 	}
5621 
5622 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5623 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5624 		stream->timing.flags.DSC = 1;
5625 
5626 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5627 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5628 
5629 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5630 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5631 
5632 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5633 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5634 }
5635 #endif
5636 
5637 /**
5638  * DOC: FreeSync Video
5639  *
5640  * When a userspace application wants to play a video, the content follows a
5641  * standard format definition that usually specifies the FPS for that format.
5642  * The below list illustrates some video format and the expected FPS,
5643  * respectively:
5644  *
5645  * - TV/NTSC (23.976 FPS)
5646  * - Cinema (24 FPS)
5647  * - TV/PAL (25 FPS)
5648  * - TV/NTSC (29.97 FPS)
5649  * - TV/NTSC (30 FPS)
5650  * - Cinema HFR (48 FPS)
5651  * - TV/PAL (50 FPS)
5652  * - Commonly used (60 FPS)
5653  * - Multiples of 24 (48,72,96 FPS)
5654  *
5655  * The list of standards video format is not huge and can be added to the
5656  * connector modeset list beforehand. With that, userspace can leverage
5657  * FreeSync to extends the front porch in order to attain the target refresh
5658  * rate. Such a switch will happen seamlessly, without screen blanking or
5659  * reprogramming of the output in any other way. If the userspace requests a
5660  * modesetting change compatible with FreeSync modes that only differ in the
5661  * refresh rate, DC will skip the full update and avoid blink during the
5662  * transition. For example, the video player can change the modesetting from
5663  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5664  * causing any display blink. This same concept can be applied to a mode
5665  * setting change.
5666  */
5667 static struct drm_display_mode *
5668 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5669 			  bool use_probed_modes)
5670 {
5671 	struct drm_display_mode *m, *m_pref = NULL;
5672 	u16 current_refresh, highest_refresh;
5673 	struct list_head *list_head = use_probed_modes ?
5674 						    &aconnector->base.probed_modes :
5675 						    &aconnector->base.modes;
5676 
5677 	if (aconnector->freesync_vid_base.clock != 0)
5678 		return &aconnector->freesync_vid_base;
5679 
5680 	/* Find the preferred mode */
5681 	list_for_each_entry (m, list_head, head) {
5682 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5683 			m_pref = m;
5684 			break;
5685 		}
5686 	}
5687 
5688 	if (!m_pref) {
5689 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5690 		m_pref = list_first_entry_or_null(
5691 			&aconnector->base.modes, struct drm_display_mode, head);
5692 		if (!m_pref) {
5693 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5694 			return NULL;
5695 		}
5696 	}
5697 
5698 	highest_refresh = drm_mode_vrefresh(m_pref);
5699 
5700 	/*
5701 	 * Find the mode with highest refresh rate with same resolution.
5702 	 * For some monitors, preferred mode is not the mode with highest
5703 	 * supported refresh rate.
5704 	 */
5705 	list_for_each_entry (m, list_head, head) {
5706 		current_refresh  = drm_mode_vrefresh(m);
5707 
5708 		if (m->hdisplay == m_pref->hdisplay &&
5709 		    m->vdisplay == m_pref->vdisplay &&
5710 		    highest_refresh < current_refresh) {
5711 			highest_refresh = current_refresh;
5712 			m_pref = m;
5713 		}
5714 	}
5715 
5716 	aconnector->freesync_vid_base = *m_pref;
5717 	return m_pref;
5718 }
5719 
5720 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5721 				   struct amdgpu_dm_connector *aconnector)
5722 {
5723 	struct drm_display_mode *high_mode;
5724 	int timing_diff;
5725 
5726 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5727 	if (!high_mode || !mode)
5728 		return false;
5729 
5730 	timing_diff = high_mode->vtotal - mode->vtotal;
5731 
5732 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5733 	    high_mode->hdisplay != mode->hdisplay ||
5734 	    high_mode->vdisplay != mode->vdisplay ||
5735 	    high_mode->hsync_start != mode->hsync_start ||
5736 	    high_mode->hsync_end != mode->hsync_end ||
5737 	    high_mode->htotal != mode->htotal ||
5738 	    high_mode->hskew != mode->hskew ||
5739 	    high_mode->vscan != mode->vscan ||
5740 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5741 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5742 		return false;
5743 	else
5744 		return true;
5745 }
5746 
5747 static struct dc_stream_state *
5748 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5749 		       const struct drm_display_mode *drm_mode,
5750 		       const struct dm_connector_state *dm_state,
5751 		       const struct dc_stream_state *old_stream,
5752 		       int requested_bpc)
5753 {
5754 	struct drm_display_mode *preferred_mode = NULL;
5755 	struct drm_connector *drm_connector;
5756 	const struct drm_connector_state *con_state =
5757 		dm_state ? &dm_state->base : NULL;
5758 	struct dc_stream_state *stream = NULL;
5759 	struct drm_display_mode mode = *drm_mode;
5760 	struct drm_display_mode saved_mode;
5761 	struct drm_display_mode *freesync_mode = NULL;
5762 	bool native_mode_found = false;
5763 	bool recalculate_timing = false;
5764 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5765 	int mode_refresh;
5766 	int preferred_refresh = 0;
5767 #if defined(CONFIG_DRM_AMD_DC_DCN)
5768 	struct dsc_dec_dpcd_caps dsc_caps;
5769 #endif
5770 	struct dc_sink *sink = NULL;
5771 
5772 	memset(&saved_mode, 0, sizeof(saved_mode));
5773 
5774 	if (aconnector == NULL) {
5775 		DRM_ERROR("aconnector is NULL!\n");
5776 		return stream;
5777 	}
5778 
5779 	drm_connector = &aconnector->base;
5780 
5781 	if (!aconnector->dc_sink) {
5782 		sink = create_fake_sink(aconnector);
5783 		if (!sink)
5784 			return stream;
5785 	} else {
5786 		sink = aconnector->dc_sink;
5787 		dc_sink_retain(sink);
5788 	}
5789 
5790 	stream = dc_create_stream_for_sink(sink);
5791 
5792 	if (stream == NULL) {
5793 		DRM_ERROR("Failed to create stream for sink!\n");
5794 		goto finish;
5795 	}
5796 
5797 	stream->dm_stream_context = aconnector;
5798 
5799 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5800 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5801 
5802 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5803 		/* Search for preferred mode */
5804 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5805 			native_mode_found = true;
5806 			break;
5807 		}
5808 	}
5809 	if (!native_mode_found)
5810 		preferred_mode = list_first_entry_or_null(
5811 				&aconnector->base.modes,
5812 				struct drm_display_mode,
5813 				head);
5814 
5815 	mode_refresh = drm_mode_vrefresh(&mode);
5816 
5817 	if (preferred_mode == NULL) {
5818 		/*
5819 		 * This may not be an error, the use case is when we have no
5820 		 * usermode calls to reset and set mode upon hotplug. In this
5821 		 * case, we call set mode ourselves to restore the previous mode
5822 		 * and the modelist may not be filled in in time.
5823 		 */
5824 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5825 	} else {
5826 		recalculate_timing = amdgpu_freesync_vid_mode &&
5827 				 is_freesync_video_mode(&mode, aconnector);
5828 		if (recalculate_timing) {
5829 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5830 			saved_mode = mode;
5831 			mode = *freesync_mode;
5832 		} else {
5833 			decide_crtc_timing_for_drm_display_mode(
5834 				&mode, preferred_mode, scale);
5835 
5836 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5837 		}
5838 	}
5839 
5840 	if (recalculate_timing)
5841 		drm_mode_set_crtcinfo(&saved_mode, 0);
5842 	else if (!dm_state)
5843 		drm_mode_set_crtcinfo(&mode, 0);
5844 
5845        /*
5846 	* If scaling is enabled and refresh rate didn't change
5847 	* we copy the vic and polarities of the old timings
5848 	*/
5849 	if (!scale || mode_refresh != preferred_refresh)
5850 		fill_stream_properties_from_drm_display_mode(
5851 			stream, &mode, &aconnector->base, con_state, NULL,
5852 			requested_bpc);
5853 	else
5854 		fill_stream_properties_from_drm_display_mode(
5855 			stream, &mode, &aconnector->base, con_state, old_stream,
5856 			requested_bpc);
5857 
5858 #if defined(CONFIG_DRM_AMD_DC_DCN)
5859 	/* SST DSC determination policy */
5860 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5861 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5862 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5863 #endif
5864 
5865 	update_stream_scaling_settings(&mode, dm_state, stream);
5866 
5867 	fill_audio_info(
5868 		&stream->audio_info,
5869 		drm_connector,
5870 		sink);
5871 
5872 	update_stream_signal(stream, sink);
5873 
5874 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5875 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5876 
5877 	if (stream->link->psr_settings.psr_feature_enabled) {
5878 		//
5879 		// should decide stream support vsc sdp colorimetry capability
5880 		// before building vsc info packet
5881 		//
5882 		stream->use_vsc_sdp_for_colorimetry = false;
5883 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5884 			stream->use_vsc_sdp_for_colorimetry =
5885 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5886 		} else {
5887 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5888 				stream->use_vsc_sdp_for_colorimetry = true;
5889 		}
5890 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5891 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5892 
5893 	}
5894 finish:
5895 	dc_sink_release(sink);
5896 
5897 	return stream;
5898 }
5899 
5900 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5901 {
5902 	drm_crtc_cleanup(crtc);
5903 	kfree(crtc);
5904 }
5905 
5906 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5907 				  struct drm_crtc_state *state)
5908 {
5909 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5910 
5911 	/* TODO Destroy dc_stream objects are stream object is flattened */
5912 	if (cur->stream)
5913 		dc_stream_release(cur->stream);
5914 
5915 
5916 	__drm_atomic_helper_crtc_destroy_state(state);
5917 
5918 
5919 	kfree(state);
5920 }
5921 
5922 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5923 {
5924 	struct dm_crtc_state *state;
5925 
5926 	if (crtc->state)
5927 		dm_crtc_destroy_state(crtc, crtc->state);
5928 
5929 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5930 	if (WARN_ON(!state))
5931 		return;
5932 
5933 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5934 }
5935 
5936 static struct drm_crtc_state *
5937 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5938 {
5939 	struct dm_crtc_state *state, *cur;
5940 
5941 	cur = to_dm_crtc_state(crtc->state);
5942 
5943 	if (WARN_ON(!crtc->state))
5944 		return NULL;
5945 
5946 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5947 	if (!state)
5948 		return NULL;
5949 
5950 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5951 
5952 	if (cur->stream) {
5953 		state->stream = cur->stream;
5954 		dc_stream_retain(state->stream);
5955 	}
5956 
5957 	state->active_planes = cur->active_planes;
5958 	state->vrr_infopacket = cur->vrr_infopacket;
5959 	state->abm_level = cur->abm_level;
5960 	state->vrr_supported = cur->vrr_supported;
5961 	state->freesync_config = cur->freesync_config;
5962 	state->cm_has_degamma = cur->cm_has_degamma;
5963 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5964 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5965 
5966 	return &state->base;
5967 }
5968 
5969 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5970 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5971 {
5972 	crtc_debugfs_init(crtc);
5973 
5974 	return 0;
5975 }
5976 #endif
5977 
5978 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5979 {
5980 	enum dc_irq_source irq_source;
5981 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5982 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5983 	int rc;
5984 
5985 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5986 
5987 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5988 
5989 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5990 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5991 	return rc;
5992 }
5993 
5994 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5995 {
5996 	enum dc_irq_source irq_source;
5997 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5998 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5999 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6000 #if defined(CONFIG_DRM_AMD_DC_DCN)
6001 	struct amdgpu_display_manager *dm = &adev->dm;
6002 	struct vblank_control_work *work;
6003 #endif
6004 	int rc = 0;
6005 
6006 	if (enable) {
6007 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6008 		if (amdgpu_dm_vrr_active(acrtc_state))
6009 			rc = dm_set_vupdate_irq(crtc, true);
6010 	} else {
6011 		/* vblank irq off -> vupdate irq off */
6012 		rc = dm_set_vupdate_irq(crtc, false);
6013 	}
6014 
6015 	if (rc)
6016 		return rc;
6017 
6018 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6019 
6020 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6021 		return -EBUSY;
6022 
6023 	if (amdgpu_in_reset(adev))
6024 		return 0;
6025 
6026 #if defined(CONFIG_DRM_AMD_DC_DCN)
6027 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
6028 	if (!work)
6029 		return -ENOMEM;
6030 
6031 	INIT_WORK(&work->work, vblank_control_worker);
6032 	work->dm = dm;
6033 	work->acrtc = acrtc;
6034 	work->enable = enable;
6035 
6036 	if (acrtc_state->stream) {
6037 		dc_stream_retain(acrtc_state->stream);
6038 		work->stream = acrtc_state->stream;
6039 	}
6040 
6041 	queue_work(dm->vblank_control_workqueue, &work->work);
6042 #endif
6043 
6044 	return 0;
6045 }
6046 
6047 static int dm_enable_vblank(struct drm_crtc *crtc)
6048 {
6049 	return dm_set_vblank(crtc, true);
6050 }
6051 
6052 static void dm_disable_vblank(struct drm_crtc *crtc)
6053 {
6054 	dm_set_vblank(crtc, false);
6055 }
6056 
6057 /* Implemented only the options currently availible for the driver */
6058 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6059 	.reset = dm_crtc_reset_state,
6060 	.destroy = amdgpu_dm_crtc_destroy,
6061 	.set_config = drm_atomic_helper_set_config,
6062 	.page_flip = drm_atomic_helper_page_flip,
6063 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6064 	.atomic_destroy_state = dm_crtc_destroy_state,
6065 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6066 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6067 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6068 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6069 	.enable_vblank = dm_enable_vblank,
6070 	.disable_vblank = dm_disable_vblank,
6071 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6072 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6073 	.late_register = amdgpu_dm_crtc_late_register,
6074 #endif
6075 };
6076 
6077 static enum drm_connector_status
6078 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6079 {
6080 	bool connected;
6081 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6082 
6083 	/*
6084 	 * Notes:
6085 	 * 1. This interface is NOT called in context of HPD irq.
6086 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6087 	 * makes it a bad place for *any* MST-related activity.
6088 	 */
6089 
6090 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6091 	    !aconnector->fake_enable)
6092 		connected = (aconnector->dc_sink != NULL);
6093 	else
6094 		connected = (aconnector->base.force == DRM_FORCE_ON);
6095 
6096 	update_subconnector_property(aconnector);
6097 
6098 	return (connected ? connector_status_connected :
6099 			connector_status_disconnected);
6100 }
6101 
6102 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6103 					    struct drm_connector_state *connector_state,
6104 					    struct drm_property *property,
6105 					    uint64_t val)
6106 {
6107 	struct drm_device *dev = connector->dev;
6108 	struct amdgpu_device *adev = drm_to_adev(dev);
6109 	struct dm_connector_state *dm_old_state =
6110 		to_dm_connector_state(connector->state);
6111 	struct dm_connector_state *dm_new_state =
6112 		to_dm_connector_state(connector_state);
6113 
6114 	int ret = -EINVAL;
6115 
6116 	if (property == dev->mode_config.scaling_mode_property) {
6117 		enum amdgpu_rmx_type rmx_type;
6118 
6119 		switch (val) {
6120 		case DRM_MODE_SCALE_CENTER:
6121 			rmx_type = RMX_CENTER;
6122 			break;
6123 		case DRM_MODE_SCALE_ASPECT:
6124 			rmx_type = RMX_ASPECT;
6125 			break;
6126 		case DRM_MODE_SCALE_FULLSCREEN:
6127 			rmx_type = RMX_FULL;
6128 			break;
6129 		case DRM_MODE_SCALE_NONE:
6130 		default:
6131 			rmx_type = RMX_OFF;
6132 			break;
6133 		}
6134 
6135 		if (dm_old_state->scaling == rmx_type)
6136 			return 0;
6137 
6138 		dm_new_state->scaling = rmx_type;
6139 		ret = 0;
6140 	} else if (property == adev->mode_info.underscan_hborder_property) {
6141 		dm_new_state->underscan_hborder = val;
6142 		ret = 0;
6143 	} else if (property == adev->mode_info.underscan_vborder_property) {
6144 		dm_new_state->underscan_vborder = val;
6145 		ret = 0;
6146 	} else if (property == adev->mode_info.underscan_property) {
6147 		dm_new_state->underscan_enable = val;
6148 		ret = 0;
6149 	} else if (property == adev->mode_info.abm_level_property) {
6150 		dm_new_state->abm_level = val;
6151 		ret = 0;
6152 	}
6153 
6154 	return ret;
6155 }
6156 
6157 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6158 					    const struct drm_connector_state *state,
6159 					    struct drm_property *property,
6160 					    uint64_t *val)
6161 {
6162 	struct drm_device *dev = connector->dev;
6163 	struct amdgpu_device *adev = drm_to_adev(dev);
6164 	struct dm_connector_state *dm_state =
6165 		to_dm_connector_state(state);
6166 	int ret = -EINVAL;
6167 
6168 	if (property == dev->mode_config.scaling_mode_property) {
6169 		switch (dm_state->scaling) {
6170 		case RMX_CENTER:
6171 			*val = DRM_MODE_SCALE_CENTER;
6172 			break;
6173 		case RMX_ASPECT:
6174 			*val = DRM_MODE_SCALE_ASPECT;
6175 			break;
6176 		case RMX_FULL:
6177 			*val = DRM_MODE_SCALE_FULLSCREEN;
6178 			break;
6179 		case RMX_OFF:
6180 		default:
6181 			*val = DRM_MODE_SCALE_NONE;
6182 			break;
6183 		}
6184 		ret = 0;
6185 	} else if (property == adev->mode_info.underscan_hborder_property) {
6186 		*val = dm_state->underscan_hborder;
6187 		ret = 0;
6188 	} else if (property == adev->mode_info.underscan_vborder_property) {
6189 		*val = dm_state->underscan_vborder;
6190 		ret = 0;
6191 	} else if (property == adev->mode_info.underscan_property) {
6192 		*val = dm_state->underscan_enable;
6193 		ret = 0;
6194 	} else if (property == adev->mode_info.abm_level_property) {
6195 		*val = dm_state->abm_level;
6196 		ret = 0;
6197 	}
6198 
6199 	return ret;
6200 }
6201 
6202 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6203 {
6204 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6205 
6206 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6207 }
6208 
6209 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6210 {
6211 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6212 	const struct dc_link *link = aconnector->dc_link;
6213 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6214 	struct amdgpu_display_manager *dm = &adev->dm;
6215 	int i;
6216 
6217 	/*
6218 	 * Call only if mst_mgr was iniitalized before since it's not done
6219 	 * for all connector types.
6220 	 */
6221 	if (aconnector->mst_mgr.dev)
6222 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6223 
6224 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6225 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6226 	for (i = 0; i < dm->num_of_edps; i++) {
6227 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6228 			backlight_device_unregister(dm->backlight_dev[i]);
6229 			dm->backlight_dev[i] = NULL;
6230 		}
6231 	}
6232 #endif
6233 
6234 	if (aconnector->dc_em_sink)
6235 		dc_sink_release(aconnector->dc_em_sink);
6236 	aconnector->dc_em_sink = NULL;
6237 	if (aconnector->dc_sink)
6238 		dc_sink_release(aconnector->dc_sink);
6239 	aconnector->dc_sink = NULL;
6240 
6241 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6242 	drm_connector_unregister(connector);
6243 	drm_connector_cleanup(connector);
6244 	if (aconnector->i2c) {
6245 		i2c_del_adapter(&aconnector->i2c->base);
6246 		kfree(aconnector->i2c);
6247 	}
6248 	kfree(aconnector->dm_dp_aux.aux.name);
6249 
6250 	kfree(connector);
6251 }
6252 
6253 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6254 {
6255 	struct dm_connector_state *state =
6256 		to_dm_connector_state(connector->state);
6257 
6258 	if (connector->state)
6259 		__drm_atomic_helper_connector_destroy_state(connector->state);
6260 
6261 	kfree(state);
6262 
6263 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6264 
6265 	if (state) {
6266 		state->scaling = RMX_OFF;
6267 		state->underscan_enable = false;
6268 		state->underscan_hborder = 0;
6269 		state->underscan_vborder = 0;
6270 		state->base.max_requested_bpc = 8;
6271 		state->vcpi_slots = 0;
6272 		state->pbn = 0;
6273 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6274 			state->abm_level = amdgpu_dm_abm_level;
6275 
6276 		__drm_atomic_helper_connector_reset(connector, &state->base);
6277 	}
6278 }
6279 
6280 struct drm_connector_state *
6281 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6282 {
6283 	struct dm_connector_state *state =
6284 		to_dm_connector_state(connector->state);
6285 
6286 	struct dm_connector_state *new_state =
6287 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6288 
6289 	if (!new_state)
6290 		return NULL;
6291 
6292 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6293 
6294 	new_state->freesync_capable = state->freesync_capable;
6295 	new_state->abm_level = state->abm_level;
6296 	new_state->scaling = state->scaling;
6297 	new_state->underscan_enable = state->underscan_enable;
6298 	new_state->underscan_hborder = state->underscan_hborder;
6299 	new_state->underscan_vborder = state->underscan_vborder;
6300 	new_state->vcpi_slots = state->vcpi_slots;
6301 	new_state->pbn = state->pbn;
6302 	return &new_state->base;
6303 }
6304 
6305 static int
6306 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6307 {
6308 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6309 		to_amdgpu_dm_connector(connector);
6310 	int r;
6311 
6312 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6313 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6314 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6315 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6316 		if (r)
6317 			return r;
6318 	}
6319 
6320 #if defined(CONFIG_DEBUG_FS)
6321 	connector_debugfs_init(amdgpu_dm_connector);
6322 #endif
6323 
6324 	return 0;
6325 }
6326 
6327 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6328 	.reset = amdgpu_dm_connector_funcs_reset,
6329 	.detect = amdgpu_dm_connector_detect,
6330 	.fill_modes = drm_helper_probe_single_connector_modes,
6331 	.destroy = amdgpu_dm_connector_destroy,
6332 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6333 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6334 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6335 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6336 	.late_register = amdgpu_dm_connector_late_register,
6337 	.early_unregister = amdgpu_dm_connector_unregister
6338 };
6339 
6340 static int get_modes(struct drm_connector *connector)
6341 {
6342 	return amdgpu_dm_connector_get_modes(connector);
6343 }
6344 
6345 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6346 {
6347 	struct dc_sink_init_data init_params = {
6348 			.link = aconnector->dc_link,
6349 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6350 	};
6351 	struct edid *edid;
6352 
6353 	if (!aconnector->base.edid_blob_ptr) {
6354 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6355 				aconnector->base.name);
6356 
6357 		aconnector->base.force = DRM_FORCE_OFF;
6358 		aconnector->base.override_edid = false;
6359 		return;
6360 	}
6361 
6362 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6363 
6364 	aconnector->edid = edid;
6365 
6366 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6367 		aconnector->dc_link,
6368 		(uint8_t *)edid,
6369 		(edid->extensions + 1) * EDID_LENGTH,
6370 		&init_params);
6371 
6372 	if (aconnector->base.force == DRM_FORCE_ON) {
6373 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6374 		aconnector->dc_link->local_sink :
6375 		aconnector->dc_em_sink;
6376 		dc_sink_retain(aconnector->dc_sink);
6377 	}
6378 }
6379 
6380 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6381 {
6382 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6383 
6384 	/*
6385 	 * In case of headless boot with force on for DP managed connector
6386 	 * Those settings have to be != 0 to get initial modeset
6387 	 */
6388 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6389 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6390 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6391 	}
6392 
6393 
6394 	aconnector->base.override_edid = true;
6395 	create_eml_sink(aconnector);
6396 }
6397 
6398 static struct dc_stream_state *
6399 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6400 				const struct drm_display_mode *drm_mode,
6401 				const struct dm_connector_state *dm_state,
6402 				const struct dc_stream_state *old_stream)
6403 {
6404 	struct drm_connector *connector = &aconnector->base;
6405 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6406 	struct dc_stream_state *stream;
6407 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6408 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6409 	enum dc_status dc_result = DC_OK;
6410 
6411 	do {
6412 		stream = create_stream_for_sink(aconnector, drm_mode,
6413 						dm_state, old_stream,
6414 						requested_bpc);
6415 		if (stream == NULL) {
6416 			DRM_ERROR("Failed to create stream for sink!\n");
6417 			break;
6418 		}
6419 
6420 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6421 
6422 		if (dc_result != DC_OK) {
6423 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6424 				      drm_mode->hdisplay,
6425 				      drm_mode->vdisplay,
6426 				      drm_mode->clock,
6427 				      dc_result,
6428 				      dc_status_to_str(dc_result));
6429 
6430 			dc_stream_release(stream);
6431 			stream = NULL;
6432 			requested_bpc -= 2; /* lower bpc to retry validation */
6433 		}
6434 
6435 	} while (stream == NULL && requested_bpc >= 6);
6436 
6437 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6438 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6439 
6440 		aconnector->force_yuv420_output = true;
6441 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6442 						dm_state, old_stream);
6443 		aconnector->force_yuv420_output = false;
6444 	}
6445 
6446 	return stream;
6447 }
6448 
6449 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6450 				   struct drm_display_mode *mode)
6451 {
6452 	int result = MODE_ERROR;
6453 	struct dc_sink *dc_sink;
6454 	/* TODO: Unhardcode stream count */
6455 	struct dc_stream_state *stream;
6456 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6457 
6458 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6459 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6460 		return result;
6461 
6462 	/*
6463 	 * Only run this the first time mode_valid is called to initilialize
6464 	 * EDID mgmt
6465 	 */
6466 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6467 		!aconnector->dc_em_sink)
6468 		handle_edid_mgmt(aconnector);
6469 
6470 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6471 
6472 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6473 				aconnector->base.force != DRM_FORCE_ON) {
6474 		DRM_ERROR("dc_sink is NULL!\n");
6475 		goto fail;
6476 	}
6477 
6478 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6479 	if (stream) {
6480 		dc_stream_release(stream);
6481 		result = MODE_OK;
6482 	}
6483 
6484 fail:
6485 	/* TODO: error handling*/
6486 	return result;
6487 }
6488 
6489 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6490 				struct dc_info_packet *out)
6491 {
6492 	struct hdmi_drm_infoframe frame;
6493 	unsigned char buf[30]; /* 26 + 4 */
6494 	ssize_t len;
6495 	int ret, i;
6496 
6497 	memset(out, 0, sizeof(*out));
6498 
6499 	if (!state->hdr_output_metadata)
6500 		return 0;
6501 
6502 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6503 	if (ret)
6504 		return ret;
6505 
6506 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6507 	if (len < 0)
6508 		return (int)len;
6509 
6510 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6511 	if (len != 30)
6512 		return -EINVAL;
6513 
6514 	/* Prepare the infopacket for DC. */
6515 	switch (state->connector->connector_type) {
6516 	case DRM_MODE_CONNECTOR_HDMIA:
6517 		out->hb0 = 0x87; /* type */
6518 		out->hb1 = 0x01; /* version */
6519 		out->hb2 = 0x1A; /* length */
6520 		out->sb[0] = buf[3]; /* checksum */
6521 		i = 1;
6522 		break;
6523 
6524 	case DRM_MODE_CONNECTOR_DisplayPort:
6525 	case DRM_MODE_CONNECTOR_eDP:
6526 		out->hb0 = 0x00; /* sdp id, zero */
6527 		out->hb1 = 0x87; /* type */
6528 		out->hb2 = 0x1D; /* payload len - 1 */
6529 		out->hb3 = (0x13 << 2); /* sdp version */
6530 		out->sb[0] = 0x01; /* version */
6531 		out->sb[1] = 0x1A; /* length */
6532 		i = 2;
6533 		break;
6534 
6535 	default:
6536 		return -EINVAL;
6537 	}
6538 
6539 	memcpy(&out->sb[i], &buf[4], 26);
6540 	out->valid = true;
6541 
6542 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6543 		       sizeof(out->sb), false);
6544 
6545 	return 0;
6546 }
6547 
6548 static int
6549 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6550 				 struct drm_atomic_state *state)
6551 {
6552 	struct drm_connector_state *new_con_state =
6553 		drm_atomic_get_new_connector_state(state, conn);
6554 	struct drm_connector_state *old_con_state =
6555 		drm_atomic_get_old_connector_state(state, conn);
6556 	struct drm_crtc *crtc = new_con_state->crtc;
6557 	struct drm_crtc_state *new_crtc_state;
6558 	int ret;
6559 
6560 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6561 
6562 	if (!crtc)
6563 		return 0;
6564 
6565 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6566 		struct dc_info_packet hdr_infopacket;
6567 
6568 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6569 		if (ret)
6570 			return ret;
6571 
6572 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6573 		if (IS_ERR(new_crtc_state))
6574 			return PTR_ERR(new_crtc_state);
6575 
6576 		/*
6577 		 * DC considers the stream backends changed if the
6578 		 * static metadata changes. Forcing the modeset also
6579 		 * gives a simple way for userspace to switch from
6580 		 * 8bpc to 10bpc when setting the metadata to enter
6581 		 * or exit HDR.
6582 		 *
6583 		 * Changing the static metadata after it's been
6584 		 * set is permissible, however. So only force a
6585 		 * modeset if we're entering or exiting HDR.
6586 		 */
6587 		new_crtc_state->mode_changed =
6588 			!old_con_state->hdr_output_metadata ||
6589 			!new_con_state->hdr_output_metadata;
6590 	}
6591 
6592 	return 0;
6593 }
6594 
6595 static const struct drm_connector_helper_funcs
6596 amdgpu_dm_connector_helper_funcs = {
6597 	/*
6598 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6599 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6600 	 * are missing after user start lightdm. So we need to renew modes list.
6601 	 * in get_modes call back, not just return the modes count
6602 	 */
6603 	.get_modes = get_modes,
6604 	.mode_valid = amdgpu_dm_connector_mode_valid,
6605 	.atomic_check = amdgpu_dm_connector_atomic_check,
6606 };
6607 
6608 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6609 {
6610 }
6611 
6612 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6613 {
6614 	struct drm_atomic_state *state = new_crtc_state->state;
6615 	struct drm_plane *plane;
6616 	int num_active = 0;
6617 
6618 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6619 		struct drm_plane_state *new_plane_state;
6620 
6621 		/* Cursor planes are "fake". */
6622 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6623 			continue;
6624 
6625 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6626 
6627 		if (!new_plane_state) {
6628 			/*
6629 			 * The plane is enable on the CRTC and hasn't changed
6630 			 * state. This means that it previously passed
6631 			 * validation and is therefore enabled.
6632 			 */
6633 			num_active += 1;
6634 			continue;
6635 		}
6636 
6637 		/* We need a framebuffer to be considered enabled. */
6638 		num_active += (new_plane_state->fb != NULL);
6639 	}
6640 
6641 	return num_active;
6642 }
6643 
6644 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6645 					 struct drm_crtc_state *new_crtc_state)
6646 {
6647 	struct dm_crtc_state *dm_new_crtc_state =
6648 		to_dm_crtc_state(new_crtc_state);
6649 
6650 	dm_new_crtc_state->active_planes = 0;
6651 
6652 	if (!dm_new_crtc_state->stream)
6653 		return;
6654 
6655 	dm_new_crtc_state->active_planes =
6656 		count_crtc_active_planes(new_crtc_state);
6657 }
6658 
6659 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6660 				       struct drm_atomic_state *state)
6661 {
6662 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6663 									  crtc);
6664 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6665 	struct dc *dc = adev->dm.dc;
6666 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6667 	int ret = -EINVAL;
6668 
6669 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6670 
6671 	dm_update_crtc_active_planes(crtc, crtc_state);
6672 
6673 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6674 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6675 		return ret;
6676 	}
6677 
6678 	/*
6679 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6680 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6681 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6682 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6683 	 */
6684 	if (crtc_state->enable &&
6685 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6686 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6687 		return -EINVAL;
6688 	}
6689 
6690 	/* In some use cases, like reset, no stream is attached */
6691 	if (!dm_crtc_state->stream)
6692 		return 0;
6693 
6694 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6695 		return 0;
6696 
6697 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6698 	return ret;
6699 }
6700 
6701 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6702 				      const struct drm_display_mode *mode,
6703 				      struct drm_display_mode *adjusted_mode)
6704 {
6705 	return true;
6706 }
6707 
6708 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6709 	.disable = dm_crtc_helper_disable,
6710 	.atomic_check = dm_crtc_helper_atomic_check,
6711 	.mode_fixup = dm_crtc_helper_mode_fixup,
6712 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6713 };
6714 
6715 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6716 {
6717 
6718 }
6719 
6720 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6721 {
6722 	switch (display_color_depth) {
6723 		case COLOR_DEPTH_666:
6724 			return 6;
6725 		case COLOR_DEPTH_888:
6726 			return 8;
6727 		case COLOR_DEPTH_101010:
6728 			return 10;
6729 		case COLOR_DEPTH_121212:
6730 			return 12;
6731 		case COLOR_DEPTH_141414:
6732 			return 14;
6733 		case COLOR_DEPTH_161616:
6734 			return 16;
6735 		default:
6736 			break;
6737 		}
6738 	return 0;
6739 }
6740 
6741 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6742 					  struct drm_crtc_state *crtc_state,
6743 					  struct drm_connector_state *conn_state)
6744 {
6745 	struct drm_atomic_state *state = crtc_state->state;
6746 	struct drm_connector *connector = conn_state->connector;
6747 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6748 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6749 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6750 	struct drm_dp_mst_topology_mgr *mst_mgr;
6751 	struct drm_dp_mst_port *mst_port;
6752 	enum dc_color_depth color_depth;
6753 	int clock, bpp = 0;
6754 	bool is_y420 = false;
6755 
6756 	if (!aconnector->port || !aconnector->dc_sink)
6757 		return 0;
6758 
6759 	mst_port = aconnector->port;
6760 	mst_mgr = &aconnector->mst_port->mst_mgr;
6761 
6762 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6763 		return 0;
6764 
6765 	if (!state->duplicated) {
6766 		int max_bpc = conn_state->max_requested_bpc;
6767 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6768 				aconnector->force_yuv420_output;
6769 		color_depth = convert_color_depth_from_display_info(connector,
6770 								    is_y420,
6771 								    max_bpc);
6772 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6773 		clock = adjusted_mode->clock;
6774 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6775 	}
6776 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6777 									   mst_mgr,
6778 									   mst_port,
6779 									   dm_new_connector_state->pbn,
6780 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6781 	if (dm_new_connector_state->vcpi_slots < 0) {
6782 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6783 		return dm_new_connector_state->vcpi_slots;
6784 	}
6785 	return 0;
6786 }
6787 
6788 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6789 	.disable = dm_encoder_helper_disable,
6790 	.atomic_check = dm_encoder_helper_atomic_check
6791 };
6792 
6793 #if defined(CONFIG_DRM_AMD_DC_DCN)
6794 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6795 					    struct dc_state *dc_state)
6796 {
6797 	struct dc_stream_state *stream = NULL;
6798 	struct drm_connector *connector;
6799 	struct drm_connector_state *new_con_state;
6800 	struct amdgpu_dm_connector *aconnector;
6801 	struct dm_connector_state *dm_conn_state;
6802 	int i, j, clock, bpp;
6803 	int vcpi, pbn_div, pbn = 0;
6804 
6805 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6806 
6807 		aconnector = to_amdgpu_dm_connector(connector);
6808 
6809 		if (!aconnector->port)
6810 			continue;
6811 
6812 		if (!new_con_state || !new_con_state->crtc)
6813 			continue;
6814 
6815 		dm_conn_state = to_dm_connector_state(new_con_state);
6816 
6817 		for (j = 0; j < dc_state->stream_count; j++) {
6818 			stream = dc_state->streams[j];
6819 			if (!stream)
6820 				continue;
6821 
6822 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6823 				break;
6824 
6825 			stream = NULL;
6826 		}
6827 
6828 		if (!stream)
6829 			continue;
6830 
6831 		if (stream->timing.flags.DSC != 1) {
6832 			drm_dp_mst_atomic_enable_dsc(state,
6833 						     aconnector->port,
6834 						     dm_conn_state->pbn,
6835 						     0,
6836 						     false);
6837 			continue;
6838 		}
6839 
6840 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6841 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6842 		clock = stream->timing.pix_clk_100hz / 10;
6843 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6844 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6845 						    aconnector->port,
6846 						    pbn, pbn_div,
6847 						    true);
6848 		if (vcpi < 0)
6849 			return vcpi;
6850 
6851 		dm_conn_state->pbn = pbn;
6852 		dm_conn_state->vcpi_slots = vcpi;
6853 	}
6854 	return 0;
6855 }
6856 #endif
6857 
6858 static void dm_drm_plane_reset(struct drm_plane *plane)
6859 {
6860 	struct dm_plane_state *amdgpu_state = NULL;
6861 
6862 	if (plane->state)
6863 		plane->funcs->atomic_destroy_state(plane, plane->state);
6864 
6865 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6866 	WARN_ON(amdgpu_state == NULL);
6867 
6868 	if (amdgpu_state)
6869 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6870 }
6871 
6872 static struct drm_plane_state *
6873 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6874 {
6875 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6876 
6877 	old_dm_plane_state = to_dm_plane_state(plane->state);
6878 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6879 	if (!dm_plane_state)
6880 		return NULL;
6881 
6882 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6883 
6884 	if (old_dm_plane_state->dc_state) {
6885 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6886 		dc_plane_state_retain(dm_plane_state->dc_state);
6887 	}
6888 
6889 	return &dm_plane_state->base;
6890 }
6891 
6892 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6893 				struct drm_plane_state *state)
6894 {
6895 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6896 
6897 	if (dm_plane_state->dc_state)
6898 		dc_plane_state_release(dm_plane_state->dc_state);
6899 
6900 	drm_atomic_helper_plane_destroy_state(plane, state);
6901 }
6902 
6903 static const struct drm_plane_funcs dm_plane_funcs = {
6904 	.update_plane	= drm_atomic_helper_update_plane,
6905 	.disable_plane	= drm_atomic_helper_disable_plane,
6906 	.destroy	= drm_primary_helper_destroy,
6907 	.reset = dm_drm_plane_reset,
6908 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6909 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6910 	.format_mod_supported = dm_plane_format_mod_supported,
6911 };
6912 
6913 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6914 				      struct drm_plane_state *new_state)
6915 {
6916 	struct amdgpu_framebuffer *afb;
6917 	struct drm_gem_object *obj;
6918 	struct amdgpu_device *adev;
6919 	struct amdgpu_bo *rbo;
6920 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6921 	struct list_head list;
6922 	struct ttm_validate_buffer tv;
6923 	struct ww_acquire_ctx ticket;
6924 	uint32_t domain;
6925 	int r;
6926 
6927 	if (!new_state->fb) {
6928 		DRM_DEBUG_KMS("No FB bound\n");
6929 		return 0;
6930 	}
6931 
6932 	afb = to_amdgpu_framebuffer(new_state->fb);
6933 	obj = new_state->fb->obj[0];
6934 	rbo = gem_to_amdgpu_bo(obj);
6935 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6936 	INIT_LIST_HEAD(&list);
6937 
6938 	tv.bo = &rbo->tbo;
6939 	tv.num_shared = 1;
6940 	list_add(&tv.head, &list);
6941 
6942 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6943 	if (r) {
6944 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6945 		return r;
6946 	}
6947 
6948 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6949 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6950 	else
6951 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6952 
6953 	r = amdgpu_bo_pin(rbo, domain);
6954 	if (unlikely(r != 0)) {
6955 		if (r != -ERESTARTSYS)
6956 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6957 		ttm_eu_backoff_reservation(&ticket, &list);
6958 		return r;
6959 	}
6960 
6961 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6962 	if (unlikely(r != 0)) {
6963 		amdgpu_bo_unpin(rbo);
6964 		ttm_eu_backoff_reservation(&ticket, &list);
6965 		DRM_ERROR("%p bind failed\n", rbo);
6966 		return r;
6967 	}
6968 
6969 	ttm_eu_backoff_reservation(&ticket, &list);
6970 
6971 	afb->address = amdgpu_bo_gpu_offset(rbo);
6972 
6973 	amdgpu_bo_ref(rbo);
6974 
6975 	/**
6976 	 * We don't do surface updates on planes that have been newly created,
6977 	 * but we also don't have the afb->address during atomic check.
6978 	 *
6979 	 * Fill in buffer attributes depending on the address here, but only on
6980 	 * newly created planes since they're not being used by DC yet and this
6981 	 * won't modify global state.
6982 	 */
6983 	dm_plane_state_old = to_dm_plane_state(plane->state);
6984 	dm_plane_state_new = to_dm_plane_state(new_state);
6985 
6986 	if (dm_plane_state_new->dc_state &&
6987 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6988 		struct dc_plane_state *plane_state =
6989 			dm_plane_state_new->dc_state;
6990 		bool force_disable_dcc = !plane_state->dcc.enable;
6991 
6992 		fill_plane_buffer_attributes(
6993 			adev, afb, plane_state->format, plane_state->rotation,
6994 			afb->tiling_flags,
6995 			&plane_state->tiling_info, &plane_state->plane_size,
6996 			&plane_state->dcc, &plane_state->address,
6997 			afb->tmz_surface, force_disable_dcc);
6998 	}
6999 
7000 	return 0;
7001 }
7002 
7003 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7004 				       struct drm_plane_state *old_state)
7005 {
7006 	struct amdgpu_bo *rbo;
7007 	int r;
7008 
7009 	if (!old_state->fb)
7010 		return;
7011 
7012 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7013 	r = amdgpu_bo_reserve(rbo, false);
7014 	if (unlikely(r)) {
7015 		DRM_ERROR("failed to reserve rbo before unpin\n");
7016 		return;
7017 	}
7018 
7019 	amdgpu_bo_unpin(rbo);
7020 	amdgpu_bo_unreserve(rbo);
7021 	amdgpu_bo_unref(&rbo);
7022 }
7023 
7024 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7025 				       struct drm_crtc_state *new_crtc_state)
7026 {
7027 	struct drm_framebuffer *fb = state->fb;
7028 	int min_downscale, max_upscale;
7029 	int min_scale = 0;
7030 	int max_scale = INT_MAX;
7031 
7032 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7033 	if (fb && state->crtc) {
7034 		/* Validate viewport to cover the case when only the position changes */
7035 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7036 			int viewport_width = state->crtc_w;
7037 			int viewport_height = state->crtc_h;
7038 
7039 			if (state->crtc_x < 0)
7040 				viewport_width += state->crtc_x;
7041 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7042 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7043 
7044 			if (state->crtc_y < 0)
7045 				viewport_height += state->crtc_y;
7046 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7047 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7048 
7049 			if (viewport_width < 0 || viewport_height < 0) {
7050 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7051 				return -EINVAL;
7052 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7053 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7054 				return -EINVAL;
7055 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7056 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7057 				return -EINVAL;
7058 			}
7059 
7060 		}
7061 
7062 		/* Get min/max allowed scaling factors from plane caps. */
7063 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7064 					     &min_downscale, &max_upscale);
7065 		/*
7066 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7067 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7068 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7069 		 */
7070 		min_scale = (1000 << 16) / max_upscale;
7071 		max_scale = (1000 << 16) / min_downscale;
7072 	}
7073 
7074 	return drm_atomic_helper_check_plane_state(
7075 		state, new_crtc_state, min_scale, max_scale, true, true);
7076 }
7077 
7078 static int dm_plane_atomic_check(struct drm_plane *plane,
7079 				 struct drm_atomic_state *state)
7080 {
7081 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7082 										 plane);
7083 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7084 	struct dc *dc = adev->dm.dc;
7085 	struct dm_plane_state *dm_plane_state;
7086 	struct dc_scaling_info scaling_info;
7087 	struct drm_crtc_state *new_crtc_state;
7088 	int ret;
7089 
7090 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7091 
7092 	dm_plane_state = to_dm_plane_state(new_plane_state);
7093 
7094 	if (!dm_plane_state->dc_state)
7095 		return 0;
7096 
7097 	new_crtc_state =
7098 		drm_atomic_get_new_crtc_state(state,
7099 					      new_plane_state->crtc);
7100 	if (!new_crtc_state)
7101 		return -EINVAL;
7102 
7103 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7104 	if (ret)
7105 		return ret;
7106 
7107 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7108 	if (ret)
7109 		return ret;
7110 
7111 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7112 		return 0;
7113 
7114 	return -EINVAL;
7115 }
7116 
7117 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7118 				       struct drm_atomic_state *state)
7119 {
7120 	/* Only support async updates on cursor planes. */
7121 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7122 		return -EINVAL;
7123 
7124 	return 0;
7125 }
7126 
7127 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7128 					 struct drm_atomic_state *state)
7129 {
7130 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7131 									   plane);
7132 	struct drm_plane_state *old_state =
7133 		drm_atomic_get_old_plane_state(state, plane);
7134 
7135 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7136 
7137 	swap(plane->state->fb, new_state->fb);
7138 
7139 	plane->state->src_x = new_state->src_x;
7140 	plane->state->src_y = new_state->src_y;
7141 	plane->state->src_w = new_state->src_w;
7142 	plane->state->src_h = new_state->src_h;
7143 	plane->state->crtc_x = new_state->crtc_x;
7144 	plane->state->crtc_y = new_state->crtc_y;
7145 	plane->state->crtc_w = new_state->crtc_w;
7146 	plane->state->crtc_h = new_state->crtc_h;
7147 
7148 	handle_cursor_update(plane, old_state);
7149 }
7150 
7151 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7152 	.prepare_fb = dm_plane_helper_prepare_fb,
7153 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7154 	.atomic_check = dm_plane_atomic_check,
7155 	.atomic_async_check = dm_plane_atomic_async_check,
7156 	.atomic_async_update = dm_plane_atomic_async_update
7157 };
7158 
7159 /*
7160  * TODO: these are currently initialized to rgb formats only.
7161  * For future use cases we should either initialize them dynamically based on
7162  * plane capabilities, or initialize this array to all formats, so internal drm
7163  * check will succeed, and let DC implement proper check
7164  */
7165 static const uint32_t rgb_formats[] = {
7166 	DRM_FORMAT_XRGB8888,
7167 	DRM_FORMAT_ARGB8888,
7168 	DRM_FORMAT_RGBA8888,
7169 	DRM_FORMAT_XRGB2101010,
7170 	DRM_FORMAT_XBGR2101010,
7171 	DRM_FORMAT_ARGB2101010,
7172 	DRM_FORMAT_ABGR2101010,
7173 	DRM_FORMAT_XRGB16161616,
7174 	DRM_FORMAT_XBGR16161616,
7175 	DRM_FORMAT_ARGB16161616,
7176 	DRM_FORMAT_ABGR16161616,
7177 	DRM_FORMAT_XBGR8888,
7178 	DRM_FORMAT_ABGR8888,
7179 	DRM_FORMAT_RGB565,
7180 };
7181 
7182 static const uint32_t overlay_formats[] = {
7183 	DRM_FORMAT_XRGB8888,
7184 	DRM_FORMAT_ARGB8888,
7185 	DRM_FORMAT_RGBA8888,
7186 	DRM_FORMAT_XBGR8888,
7187 	DRM_FORMAT_ABGR8888,
7188 	DRM_FORMAT_RGB565
7189 };
7190 
7191 static const u32 cursor_formats[] = {
7192 	DRM_FORMAT_ARGB8888
7193 };
7194 
7195 static int get_plane_formats(const struct drm_plane *plane,
7196 			     const struct dc_plane_cap *plane_cap,
7197 			     uint32_t *formats, int max_formats)
7198 {
7199 	int i, num_formats = 0;
7200 
7201 	/*
7202 	 * TODO: Query support for each group of formats directly from
7203 	 * DC plane caps. This will require adding more formats to the
7204 	 * caps list.
7205 	 */
7206 
7207 	switch (plane->type) {
7208 	case DRM_PLANE_TYPE_PRIMARY:
7209 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7210 			if (num_formats >= max_formats)
7211 				break;
7212 
7213 			formats[num_formats++] = rgb_formats[i];
7214 		}
7215 
7216 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7217 			formats[num_formats++] = DRM_FORMAT_NV12;
7218 		if (plane_cap && plane_cap->pixel_format_support.p010)
7219 			formats[num_formats++] = DRM_FORMAT_P010;
7220 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7221 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7222 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7223 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7224 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7225 		}
7226 		break;
7227 
7228 	case DRM_PLANE_TYPE_OVERLAY:
7229 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7230 			if (num_formats >= max_formats)
7231 				break;
7232 
7233 			formats[num_formats++] = overlay_formats[i];
7234 		}
7235 		break;
7236 
7237 	case DRM_PLANE_TYPE_CURSOR:
7238 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7239 			if (num_formats >= max_formats)
7240 				break;
7241 
7242 			formats[num_formats++] = cursor_formats[i];
7243 		}
7244 		break;
7245 	}
7246 
7247 	return num_formats;
7248 }
7249 
7250 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7251 				struct drm_plane *plane,
7252 				unsigned long possible_crtcs,
7253 				const struct dc_plane_cap *plane_cap)
7254 {
7255 	uint32_t formats[32];
7256 	int num_formats;
7257 	int res = -EPERM;
7258 	unsigned int supported_rotations;
7259 	uint64_t *modifiers = NULL;
7260 
7261 	num_formats = get_plane_formats(plane, plane_cap, formats,
7262 					ARRAY_SIZE(formats));
7263 
7264 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7265 	if (res)
7266 		return res;
7267 
7268 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7269 				       &dm_plane_funcs, formats, num_formats,
7270 				       modifiers, plane->type, NULL);
7271 	kfree(modifiers);
7272 	if (res)
7273 		return res;
7274 
7275 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7276 	    plane_cap && plane_cap->per_pixel_alpha) {
7277 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7278 					  BIT(DRM_MODE_BLEND_PREMULTI);
7279 
7280 		drm_plane_create_alpha_property(plane);
7281 		drm_plane_create_blend_mode_property(plane, blend_caps);
7282 	}
7283 
7284 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7285 	    plane_cap &&
7286 	    (plane_cap->pixel_format_support.nv12 ||
7287 	     plane_cap->pixel_format_support.p010)) {
7288 		/* This only affects YUV formats. */
7289 		drm_plane_create_color_properties(
7290 			plane,
7291 			BIT(DRM_COLOR_YCBCR_BT601) |
7292 			BIT(DRM_COLOR_YCBCR_BT709) |
7293 			BIT(DRM_COLOR_YCBCR_BT2020),
7294 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7295 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7296 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7297 	}
7298 
7299 	supported_rotations =
7300 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7301 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7302 
7303 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7304 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7305 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7306 						   supported_rotations);
7307 
7308 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7309 
7310 	/* Create (reset) the plane state */
7311 	if (plane->funcs->reset)
7312 		plane->funcs->reset(plane);
7313 
7314 	return 0;
7315 }
7316 
7317 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7318 			       struct drm_plane *plane,
7319 			       uint32_t crtc_index)
7320 {
7321 	struct amdgpu_crtc *acrtc = NULL;
7322 	struct drm_plane *cursor_plane;
7323 
7324 	int res = -ENOMEM;
7325 
7326 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7327 	if (!cursor_plane)
7328 		goto fail;
7329 
7330 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7331 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7332 
7333 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7334 	if (!acrtc)
7335 		goto fail;
7336 
7337 	res = drm_crtc_init_with_planes(
7338 			dm->ddev,
7339 			&acrtc->base,
7340 			plane,
7341 			cursor_plane,
7342 			&amdgpu_dm_crtc_funcs, NULL);
7343 
7344 	if (res)
7345 		goto fail;
7346 
7347 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7348 
7349 	/* Create (reset) the plane state */
7350 	if (acrtc->base.funcs->reset)
7351 		acrtc->base.funcs->reset(&acrtc->base);
7352 
7353 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7354 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7355 
7356 	acrtc->crtc_id = crtc_index;
7357 	acrtc->base.enabled = false;
7358 	acrtc->otg_inst = -1;
7359 
7360 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7361 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7362 				   true, MAX_COLOR_LUT_ENTRIES);
7363 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7364 
7365 	return 0;
7366 
7367 fail:
7368 	kfree(acrtc);
7369 	kfree(cursor_plane);
7370 	return res;
7371 }
7372 
7373 
7374 static int to_drm_connector_type(enum signal_type st)
7375 {
7376 	switch (st) {
7377 	case SIGNAL_TYPE_HDMI_TYPE_A:
7378 		return DRM_MODE_CONNECTOR_HDMIA;
7379 	case SIGNAL_TYPE_EDP:
7380 		return DRM_MODE_CONNECTOR_eDP;
7381 	case SIGNAL_TYPE_LVDS:
7382 		return DRM_MODE_CONNECTOR_LVDS;
7383 	case SIGNAL_TYPE_RGB:
7384 		return DRM_MODE_CONNECTOR_VGA;
7385 	case SIGNAL_TYPE_DISPLAY_PORT:
7386 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7387 		return DRM_MODE_CONNECTOR_DisplayPort;
7388 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7389 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7390 		return DRM_MODE_CONNECTOR_DVID;
7391 	case SIGNAL_TYPE_VIRTUAL:
7392 		return DRM_MODE_CONNECTOR_VIRTUAL;
7393 
7394 	default:
7395 		return DRM_MODE_CONNECTOR_Unknown;
7396 	}
7397 }
7398 
7399 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7400 {
7401 	struct drm_encoder *encoder;
7402 
7403 	/* There is only one encoder per connector */
7404 	drm_connector_for_each_possible_encoder(connector, encoder)
7405 		return encoder;
7406 
7407 	return NULL;
7408 }
7409 
7410 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7411 {
7412 	struct drm_encoder *encoder;
7413 	struct amdgpu_encoder *amdgpu_encoder;
7414 
7415 	encoder = amdgpu_dm_connector_to_encoder(connector);
7416 
7417 	if (encoder == NULL)
7418 		return;
7419 
7420 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7421 
7422 	amdgpu_encoder->native_mode.clock = 0;
7423 
7424 	if (!list_empty(&connector->probed_modes)) {
7425 		struct drm_display_mode *preferred_mode = NULL;
7426 
7427 		list_for_each_entry(preferred_mode,
7428 				    &connector->probed_modes,
7429 				    head) {
7430 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7431 				amdgpu_encoder->native_mode = *preferred_mode;
7432 
7433 			break;
7434 		}
7435 
7436 	}
7437 }
7438 
7439 static struct drm_display_mode *
7440 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7441 			     char *name,
7442 			     int hdisplay, int vdisplay)
7443 {
7444 	struct drm_device *dev = encoder->dev;
7445 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7446 	struct drm_display_mode *mode = NULL;
7447 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7448 
7449 	mode = drm_mode_duplicate(dev, native_mode);
7450 
7451 	if (mode == NULL)
7452 		return NULL;
7453 
7454 	mode->hdisplay = hdisplay;
7455 	mode->vdisplay = vdisplay;
7456 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7457 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7458 
7459 	return mode;
7460 
7461 }
7462 
7463 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7464 						 struct drm_connector *connector)
7465 {
7466 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7467 	struct drm_display_mode *mode = NULL;
7468 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7469 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7470 				to_amdgpu_dm_connector(connector);
7471 	int i;
7472 	int n;
7473 	struct mode_size {
7474 		char name[DRM_DISPLAY_MODE_LEN];
7475 		int w;
7476 		int h;
7477 	} common_modes[] = {
7478 		{  "640x480",  640,  480},
7479 		{  "800x600",  800,  600},
7480 		{ "1024x768", 1024,  768},
7481 		{ "1280x720", 1280,  720},
7482 		{ "1280x800", 1280,  800},
7483 		{"1280x1024", 1280, 1024},
7484 		{ "1440x900", 1440,  900},
7485 		{"1680x1050", 1680, 1050},
7486 		{"1600x1200", 1600, 1200},
7487 		{"1920x1080", 1920, 1080},
7488 		{"1920x1200", 1920, 1200}
7489 	};
7490 
7491 	n = ARRAY_SIZE(common_modes);
7492 
7493 	for (i = 0; i < n; i++) {
7494 		struct drm_display_mode *curmode = NULL;
7495 		bool mode_existed = false;
7496 
7497 		if (common_modes[i].w > native_mode->hdisplay ||
7498 		    common_modes[i].h > native_mode->vdisplay ||
7499 		   (common_modes[i].w == native_mode->hdisplay &&
7500 		    common_modes[i].h == native_mode->vdisplay))
7501 			continue;
7502 
7503 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7504 			if (common_modes[i].w == curmode->hdisplay &&
7505 			    common_modes[i].h == curmode->vdisplay) {
7506 				mode_existed = true;
7507 				break;
7508 			}
7509 		}
7510 
7511 		if (mode_existed)
7512 			continue;
7513 
7514 		mode = amdgpu_dm_create_common_mode(encoder,
7515 				common_modes[i].name, common_modes[i].w,
7516 				common_modes[i].h);
7517 		drm_mode_probed_add(connector, mode);
7518 		amdgpu_dm_connector->num_modes++;
7519 	}
7520 }
7521 
7522 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7523 					      struct edid *edid)
7524 {
7525 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7526 			to_amdgpu_dm_connector(connector);
7527 
7528 	if (edid) {
7529 		/* empty probed_modes */
7530 		INIT_LIST_HEAD(&connector->probed_modes);
7531 		amdgpu_dm_connector->num_modes =
7532 				drm_add_edid_modes(connector, edid);
7533 
7534 		/* sorting the probed modes before calling function
7535 		 * amdgpu_dm_get_native_mode() since EDID can have
7536 		 * more than one preferred mode. The modes that are
7537 		 * later in the probed mode list could be of higher
7538 		 * and preferred resolution. For example, 3840x2160
7539 		 * resolution in base EDID preferred timing and 4096x2160
7540 		 * preferred resolution in DID extension block later.
7541 		 */
7542 		drm_mode_sort(&connector->probed_modes);
7543 		amdgpu_dm_get_native_mode(connector);
7544 
7545 		/* Freesync capabilities are reset by calling
7546 		 * drm_add_edid_modes() and need to be
7547 		 * restored here.
7548 		 */
7549 		amdgpu_dm_update_freesync_caps(connector, edid);
7550 	} else {
7551 		amdgpu_dm_connector->num_modes = 0;
7552 	}
7553 }
7554 
7555 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7556 			      struct drm_display_mode *mode)
7557 {
7558 	struct drm_display_mode *m;
7559 
7560 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7561 		if (drm_mode_equal(m, mode))
7562 			return true;
7563 	}
7564 
7565 	return false;
7566 }
7567 
7568 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7569 {
7570 	const struct drm_display_mode *m;
7571 	struct drm_display_mode *new_mode;
7572 	uint i;
7573 	uint32_t new_modes_count = 0;
7574 
7575 	/* Standard FPS values
7576 	 *
7577 	 * 23.976   - TV/NTSC
7578 	 * 24 	    - Cinema
7579 	 * 25 	    - TV/PAL
7580 	 * 29.97    - TV/NTSC
7581 	 * 30 	    - TV/NTSC
7582 	 * 48 	    - Cinema HFR
7583 	 * 50 	    - TV/PAL
7584 	 * 60 	    - Commonly used
7585 	 * 48,72,96 - Multiples of 24
7586 	 */
7587 	static const uint32_t common_rates[] = {
7588 		23976, 24000, 25000, 29970, 30000,
7589 		48000, 50000, 60000, 72000, 96000
7590 	};
7591 
7592 	/*
7593 	 * Find mode with highest refresh rate with the same resolution
7594 	 * as the preferred mode. Some monitors report a preferred mode
7595 	 * with lower resolution than the highest refresh rate supported.
7596 	 */
7597 
7598 	m = get_highest_refresh_rate_mode(aconnector, true);
7599 	if (!m)
7600 		return 0;
7601 
7602 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7603 		uint64_t target_vtotal, target_vtotal_diff;
7604 		uint64_t num, den;
7605 
7606 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7607 			continue;
7608 
7609 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7610 		    common_rates[i] > aconnector->max_vfreq * 1000)
7611 			continue;
7612 
7613 		num = (unsigned long long)m->clock * 1000 * 1000;
7614 		den = common_rates[i] * (unsigned long long)m->htotal;
7615 		target_vtotal = div_u64(num, den);
7616 		target_vtotal_diff = target_vtotal - m->vtotal;
7617 
7618 		/* Check for illegal modes */
7619 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7620 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7621 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7622 			continue;
7623 
7624 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7625 		if (!new_mode)
7626 			goto out;
7627 
7628 		new_mode->vtotal += (u16)target_vtotal_diff;
7629 		new_mode->vsync_start += (u16)target_vtotal_diff;
7630 		new_mode->vsync_end += (u16)target_vtotal_diff;
7631 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7632 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7633 
7634 		if (!is_duplicate_mode(aconnector, new_mode)) {
7635 			drm_mode_probed_add(&aconnector->base, new_mode);
7636 			new_modes_count += 1;
7637 		} else
7638 			drm_mode_destroy(aconnector->base.dev, new_mode);
7639 	}
7640  out:
7641 	return new_modes_count;
7642 }
7643 
7644 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7645 						   struct edid *edid)
7646 {
7647 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7648 		to_amdgpu_dm_connector(connector);
7649 
7650 	if (!(amdgpu_freesync_vid_mode && edid))
7651 		return;
7652 
7653 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7654 		amdgpu_dm_connector->num_modes +=
7655 			add_fs_modes(amdgpu_dm_connector);
7656 }
7657 
7658 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7659 {
7660 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7661 			to_amdgpu_dm_connector(connector);
7662 	struct drm_encoder *encoder;
7663 	struct edid *edid = amdgpu_dm_connector->edid;
7664 
7665 	encoder = amdgpu_dm_connector_to_encoder(connector);
7666 
7667 	if (!drm_edid_is_valid(edid)) {
7668 		amdgpu_dm_connector->num_modes =
7669 				drm_add_modes_noedid(connector, 640, 480);
7670 	} else {
7671 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7672 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7673 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7674 	}
7675 	amdgpu_dm_fbc_init(connector);
7676 
7677 	return amdgpu_dm_connector->num_modes;
7678 }
7679 
7680 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7681 				     struct amdgpu_dm_connector *aconnector,
7682 				     int connector_type,
7683 				     struct dc_link *link,
7684 				     int link_index)
7685 {
7686 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7687 
7688 	/*
7689 	 * Some of the properties below require access to state, like bpc.
7690 	 * Allocate some default initial connector state with our reset helper.
7691 	 */
7692 	if (aconnector->base.funcs->reset)
7693 		aconnector->base.funcs->reset(&aconnector->base);
7694 
7695 	aconnector->connector_id = link_index;
7696 	aconnector->dc_link = link;
7697 	aconnector->base.interlace_allowed = false;
7698 	aconnector->base.doublescan_allowed = false;
7699 	aconnector->base.stereo_allowed = false;
7700 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7701 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7702 	aconnector->audio_inst = -1;
7703 	mutex_init(&aconnector->hpd_lock);
7704 
7705 	/*
7706 	 * configure support HPD hot plug connector_>polled default value is 0
7707 	 * which means HPD hot plug not supported
7708 	 */
7709 	switch (connector_type) {
7710 	case DRM_MODE_CONNECTOR_HDMIA:
7711 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7712 		aconnector->base.ycbcr_420_allowed =
7713 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7714 		break;
7715 	case DRM_MODE_CONNECTOR_DisplayPort:
7716 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7717 		aconnector->base.ycbcr_420_allowed =
7718 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7719 		break;
7720 	case DRM_MODE_CONNECTOR_DVID:
7721 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7722 		break;
7723 	default:
7724 		break;
7725 	}
7726 
7727 	drm_object_attach_property(&aconnector->base.base,
7728 				dm->ddev->mode_config.scaling_mode_property,
7729 				DRM_MODE_SCALE_NONE);
7730 
7731 	drm_object_attach_property(&aconnector->base.base,
7732 				adev->mode_info.underscan_property,
7733 				UNDERSCAN_OFF);
7734 	drm_object_attach_property(&aconnector->base.base,
7735 				adev->mode_info.underscan_hborder_property,
7736 				0);
7737 	drm_object_attach_property(&aconnector->base.base,
7738 				adev->mode_info.underscan_vborder_property,
7739 				0);
7740 
7741 	if (!aconnector->mst_port)
7742 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7743 
7744 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7745 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7746 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7747 
7748 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7749 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7750 		drm_object_attach_property(&aconnector->base.base,
7751 				adev->mode_info.abm_level_property, 0);
7752 	}
7753 
7754 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7755 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7756 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7757 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7758 
7759 		if (!aconnector->mst_port)
7760 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7761 
7762 #ifdef CONFIG_DRM_AMD_DC_HDCP
7763 		if (adev->dm.hdcp_workqueue)
7764 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7765 #endif
7766 	}
7767 }
7768 
7769 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7770 			      struct i2c_msg *msgs, int num)
7771 {
7772 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7773 	struct ddc_service *ddc_service = i2c->ddc_service;
7774 	struct i2c_command cmd;
7775 	int i;
7776 	int result = -EIO;
7777 
7778 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7779 
7780 	if (!cmd.payloads)
7781 		return result;
7782 
7783 	cmd.number_of_payloads = num;
7784 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7785 	cmd.speed = 100;
7786 
7787 	for (i = 0; i < num; i++) {
7788 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7789 		cmd.payloads[i].address = msgs[i].addr;
7790 		cmd.payloads[i].length = msgs[i].len;
7791 		cmd.payloads[i].data = msgs[i].buf;
7792 	}
7793 
7794 	if (dc_submit_i2c(
7795 			ddc_service->ctx->dc,
7796 			ddc_service->ddc_pin->hw_info.ddc_channel,
7797 			&cmd))
7798 		result = num;
7799 
7800 	kfree(cmd.payloads);
7801 	return result;
7802 }
7803 
7804 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7805 {
7806 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7807 }
7808 
7809 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7810 	.master_xfer = amdgpu_dm_i2c_xfer,
7811 	.functionality = amdgpu_dm_i2c_func,
7812 };
7813 
7814 static struct amdgpu_i2c_adapter *
7815 create_i2c(struct ddc_service *ddc_service,
7816 	   int link_index,
7817 	   int *res)
7818 {
7819 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7820 	struct amdgpu_i2c_adapter *i2c;
7821 
7822 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7823 	if (!i2c)
7824 		return NULL;
7825 	i2c->base.owner = THIS_MODULE;
7826 	i2c->base.class = I2C_CLASS_DDC;
7827 	i2c->base.dev.parent = &adev->pdev->dev;
7828 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7829 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7830 	i2c_set_adapdata(&i2c->base, i2c);
7831 	i2c->ddc_service = ddc_service;
7832 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7833 
7834 	return i2c;
7835 }
7836 
7837 
7838 /*
7839  * Note: this function assumes that dc_link_detect() was called for the
7840  * dc_link which will be represented by this aconnector.
7841  */
7842 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7843 				    struct amdgpu_dm_connector *aconnector,
7844 				    uint32_t link_index,
7845 				    struct amdgpu_encoder *aencoder)
7846 {
7847 	int res = 0;
7848 	int connector_type;
7849 	struct dc *dc = dm->dc;
7850 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7851 	struct amdgpu_i2c_adapter *i2c;
7852 
7853 	link->priv = aconnector;
7854 
7855 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7856 
7857 	i2c = create_i2c(link->ddc, link->link_index, &res);
7858 	if (!i2c) {
7859 		DRM_ERROR("Failed to create i2c adapter data\n");
7860 		return -ENOMEM;
7861 	}
7862 
7863 	aconnector->i2c = i2c;
7864 	res = i2c_add_adapter(&i2c->base);
7865 
7866 	if (res) {
7867 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7868 		goto out_free;
7869 	}
7870 
7871 	connector_type = to_drm_connector_type(link->connector_signal);
7872 
7873 	res = drm_connector_init_with_ddc(
7874 			dm->ddev,
7875 			&aconnector->base,
7876 			&amdgpu_dm_connector_funcs,
7877 			connector_type,
7878 			&i2c->base);
7879 
7880 	if (res) {
7881 		DRM_ERROR("connector_init failed\n");
7882 		aconnector->connector_id = -1;
7883 		goto out_free;
7884 	}
7885 
7886 	drm_connector_helper_add(
7887 			&aconnector->base,
7888 			&amdgpu_dm_connector_helper_funcs);
7889 
7890 	amdgpu_dm_connector_init_helper(
7891 		dm,
7892 		aconnector,
7893 		connector_type,
7894 		link,
7895 		link_index);
7896 
7897 	drm_connector_attach_encoder(
7898 		&aconnector->base, &aencoder->base);
7899 
7900 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7901 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7902 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7903 
7904 out_free:
7905 	if (res) {
7906 		kfree(i2c);
7907 		aconnector->i2c = NULL;
7908 	}
7909 	return res;
7910 }
7911 
7912 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7913 {
7914 	switch (adev->mode_info.num_crtc) {
7915 	case 1:
7916 		return 0x1;
7917 	case 2:
7918 		return 0x3;
7919 	case 3:
7920 		return 0x7;
7921 	case 4:
7922 		return 0xf;
7923 	case 5:
7924 		return 0x1f;
7925 	case 6:
7926 	default:
7927 		return 0x3f;
7928 	}
7929 }
7930 
7931 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7932 				  struct amdgpu_encoder *aencoder,
7933 				  uint32_t link_index)
7934 {
7935 	struct amdgpu_device *adev = drm_to_adev(dev);
7936 
7937 	int res = drm_encoder_init(dev,
7938 				   &aencoder->base,
7939 				   &amdgpu_dm_encoder_funcs,
7940 				   DRM_MODE_ENCODER_TMDS,
7941 				   NULL);
7942 
7943 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7944 
7945 	if (!res)
7946 		aencoder->encoder_id = link_index;
7947 	else
7948 		aencoder->encoder_id = -1;
7949 
7950 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7951 
7952 	return res;
7953 }
7954 
7955 static void manage_dm_interrupts(struct amdgpu_device *adev,
7956 				 struct amdgpu_crtc *acrtc,
7957 				 bool enable)
7958 {
7959 	/*
7960 	 * We have no guarantee that the frontend index maps to the same
7961 	 * backend index - some even map to more than one.
7962 	 *
7963 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7964 	 */
7965 	int irq_type =
7966 		amdgpu_display_crtc_idx_to_irq_type(
7967 			adev,
7968 			acrtc->crtc_id);
7969 
7970 	if (enable) {
7971 		drm_crtc_vblank_on(&acrtc->base);
7972 		amdgpu_irq_get(
7973 			adev,
7974 			&adev->pageflip_irq,
7975 			irq_type);
7976 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7977 		amdgpu_irq_get(
7978 			adev,
7979 			&adev->vline0_irq,
7980 			irq_type);
7981 #endif
7982 	} else {
7983 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7984 		amdgpu_irq_put(
7985 			adev,
7986 			&adev->vline0_irq,
7987 			irq_type);
7988 #endif
7989 		amdgpu_irq_put(
7990 			adev,
7991 			&adev->pageflip_irq,
7992 			irq_type);
7993 		drm_crtc_vblank_off(&acrtc->base);
7994 	}
7995 }
7996 
7997 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7998 				      struct amdgpu_crtc *acrtc)
7999 {
8000 	int irq_type =
8001 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8002 
8003 	/**
8004 	 * This reads the current state for the IRQ and force reapplies
8005 	 * the setting to hardware.
8006 	 */
8007 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8008 }
8009 
8010 static bool
8011 is_scaling_state_different(const struct dm_connector_state *dm_state,
8012 			   const struct dm_connector_state *old_dm_state)
8013 {
8014 	if (dm_state->scaling != old_dm_state->scaling)
8015 		return true;
8016 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8017 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8018 			return true;
8019 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8020 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8021 			return true;
8022 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8023 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8024 		return true;
8025 	return false;
8026 }
8027 
8028 #ifdef CONFIG_DRM_AMD_DC_HDCP
8029 static bool is_content_protection_different(struct drm_connector_state *state,
8030 					    const struct drm_connector_state *old_state,
8031 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8032 {
8033 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8034 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8035 
8036 	/* Handle: Type0/1 change */
8037 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8038 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8039 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8040 		return true;
8041 	}
8042 
8043 	/* CP is being re enabled, ignore this
8044 	 *
8045 	 * Handles:	ENABLED -> DESIRED
8046 	 */
8047 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8048 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8049 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8050 		return false;
8051 	}
8052 
8053 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8054 	 *
8055 	 * Handles:	UNDESIRED -> ENABLED
8056 	 */
8057 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8058 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8059 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8060 
8061 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8062 	 * hot-plug, headless s3, dpms
8063 	 *
8064 	 * Handles:	DESIRED -> DESIRED (Special case)
8065 	 */
8066 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8067 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8068 		dm_con_state->update_hdcp = false;
8069 		return true;
8070 	}
8071 
8072 	/*
8073 	 * Handles:	UNDESIRED -> UNDESIRED
8074 	 *		DESIRED -> DESIRED
8075 	 *		ENABLED -> ENABLED
8076 	 */
8077 	if (old_state->content_protection == state->content_protection)
8078 		return false;
8079 
8080 	/*
8081 	 * Handles:	UNDESIRED -> DESIRED
8082 	 *		DESIRED -> UNDESIRED
8083 	 *		ENABLED -> UNDESIRED
8084 	 */
8085 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8086 		return true;
8087 
8088 	/*
8089 	 * Handles:	DESIRED -> ENABLED
8090 	 */
8091 	return false;
8092 }
8093 
8094 #endif
8095 static void remove_stream(struct amdgpu_device *adev,
8096 			  struct amdgpu_crtc *acrtc,
8097 			  struct dc_stream_state *stream)
8098 {
8099 	/* this is the update mode case */
8100 
8101 	acrtc->otg_inst = -1;
8102 	acrtc->enabled = false;
8103 }
8104 
8105 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8106 			       struct dc_cursor_position *position)
8107 {
8108 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8109 	int x, y;
8110 	int xorigin = 0, yorigin = 0;
8111 
8112 	if (!crtc || !plane->state->fb)
8113 		return 0;
8114 
8115 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8116 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8117 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8118 			  __func__,
8119 			  plane->state->crtc_w,
8120 			  plane->state->crtc_h);
8121 		return -EINVAL;
8122 	}
8123 
8124 	x = plane->state->crtc_x;
8125 	y = plane->state->crtc_y;
8126 
8127 	if (x <= -amdgpu_crtc->max_cursor_width ||
8128 	    y <= -amdgpu_crtc->max_cursor_height)
8129 		return 0;
8130 
8131 	if (x < 0) {
8132 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8133 		x = 0;
8134 	}
8135 	if (y < 0) {
8136 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8137 		y = 0;
8138 	}
8139 	position->enable = true;
8140 	position->translate_by_source = true;
8141 	position->x = x;
8142 	position->y = y;
8143 	position->x_hotspot = xorigin;
8144 	position->y_hotspot = yorigin;
8145 
8146 	return 0;
8147 }
8148 
8149 static void handle_cursor_update(struct drm_plane *plane,
8150 				 struct drm_plane_state *old_plane_state)
8151 {
8152 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8153 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8154 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8155 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8156 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8157 	uint64_t address = afb ? afb->address : 0;
8158 	struct dc_cursor_position position = {0};
8159 	struct dc_cursor_attributes attributes;
8160 	int ret;
8161 
8162 	if (!plane->state->fb && !old_plane_state->fb)
8163 		return;
8164 
8165 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8166 		      __func__,
8167 		      amdgpu_crtc->crtc_id,
8168 		      plane->state->crtc_w,
8169 		      plane->state->crtc_h);
8170 
8171 	ret = get_cursor_position(plane, crtc, &position);
8172 	if (ret)
8173 		return;
8174 
8175 	if (!position.enable) {
8176 		/* turn off cursor */
8177 		if (crtc_state && crtc_state->stream) {
8178 			mutex_lock(&adev->dm.dc_lock);
8179 			dc_stream_set_cursor_position(crtc_state->stream,
8180 						      &position);
8181 			mutex_unlock(&adev->dm.dc_lock);
8182 		}
8183 		return;
8184 	}
8185 
8186 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8187 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8188 
8189 	memset(&attributes, 0, sizeof(attributes));
8190 	attributes.address.high_part = upper_32_bits(address);
8191 	attributes.address.low_part  = lower_32_bits(address);
8192 	attributes.width             = plane->state->crtc_w;
8193 	attributes.height            = plane->state->crtc_h;
8194 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8195 	attributes.rotation_angle    = 0;
8196 	attributes.attribute_flags.value = 0;
8197 
8198 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8199 
8200 	if (crtc_state->stream) {
8201 		mutex_lock(&adev->dm.dc_lock);
8202 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8203 							 &attributes))
8204 			DRM_ERROR("DC failed to set cursor attributes\n");
8205 
8206 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8207 						   &position))
8208 			DRM_ERROR("DC failed to set cursor position\n");
8209 		mutex_unlock(&adev->dm.dc_lock);
8210 	}
8211 }
8212 
8213 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8214 {
8215 
8216 	assert_spin_locked(&acrtc->base.dev->event_lock);
8217 	WARN_ON(acrtc->event);
8218 
8219 	acrtc->event = acrtc->base.state->event;
8220 
8221 	/* Set the flip status */
8222 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8223 
8224 	/* Mark this event as consumed */
8225 	acrtc->base.state->event = NULL;
8226 
8227 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8228 		     acrtc->crtc_id);
8229 }
8230 
8231 static void update_freesync_state_on_stream(
8232 	struct amdgpu_display_manager *dm,
8233 	struct dm_crtc_state *new_crtc_state,
8234 	struct dc_stream_state *new_stream,
8235 	struct dc_plane_state *surface,
8236 	u32 flip_timestamp_in_us)
8237 {
8238 	struct mod_vrr_params vrr_params;
8239 	struct dc_info_packet vrr_infopacket = {0};
8240 	struct amdgpu_device *adev = dm->adev;
8241 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8242 	unsigned long flags;
8243 	bool pack_sdp_v1_3 = false;
8244 
8245 	if (!new_stream)
8246 		return;
8247 
8248 	/*
8249 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8250 	 * For now it's sufficient to just guard against these conditions.
8251 	 */
8252 
8253 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8254 		return;
8255 
8256 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8257         vrr_params = acrtc->dm_irq_params.vrr_params;
8258 
8259 	if (surface) {
8260 		mod_freesync_handle_preflip(
8261 			dm->freesync_module,
8262 			surface,
8263 			new_stream,
8264 			flip_timestamp_in_us,
8265 			&vrr_params);
8266 
8267 		if (adev->family < AMDGPU_FAMILY_AI &&
8268 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8269 			mod_freesync_handle_v_update(dm->freesync_module,
8270 						     new_stream, &vrr_params);
8271 
8272 			/* Need to call this before the frame ends. */
8273 			dc_stream_adjust_vmin_vmax(dm->dc,
8274 						   new_crtc_state->stream,
8275 						   &vrr_params.adjust);
8276 		}
8277 	}
8278 
8279 	mod_freesync_build_vrr_infopacket(
8280 		dm->freesync_module,
8281 		new_stream,
8282 		&vrr_params,
8283 		PACKET_TYPE_VRR,
8284 		TRANSFER_FUNC_UNKNOWN,
8285 		&vrr_infopacket,
8286 		pack_sdp_v1_3);
8287 
8288 	new_crtc_state->freesync_timing_changed |=
8289 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8290 			&vrr_params.adjust,
8291 			sizeof(vrr_params.adjust)) != 0);
8292 
8293 	new_crtc_state->freesync_vrr_info_changed |=
8294 		(memcmp(&new_crtc_state->vrr_infopacket,
8295 			&vrr_infopacket,
8296 			sizeof(vrr_infopacket)) != 0);
8297 
8298 	acrtc->dm_irq_params.vrr_params = vrr_params;
8299 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8300 
8301 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8302 	new_stream->vrr_infopacket = vrr_infopacket;
8303 
8304 	if (new_crtc_state->freesync_vrr_info_changed)
8305 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8306 			      new_crtc_state->base.crtc->base.id,
8307 			      (int)new_crtc_state->base.vrr_enabled,
8308 			      (int)vrr_params.state);
8309 
8310 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8311 }
8312 
8313 static void update_stream_irq_parameters(
8314 	struct amdgpu_display_manager *dm,
8315 	struct dm_crtc_state *new_crtc_state)
8316 {
8317 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8318 	struct mod_vrr_params vrr_params;
8319 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8320 	struct amdgpu_device *adev = dm->adev;
8321 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8322 	unsigned long flags;
8323 
8324 	if (!new_stream)
8325 		return;
8326 
8327 	/*
8328 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8329 	 * For now it's sufficient to just guard against these conditions.
8330 	 */
8331 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8332 		return;
8333 
8334 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8335 	vrr_params = acrtc->dm_irq_params.vrr_params;
8336 
8337 	if (new_crtc_state->vrr_supported &&
8338 	    config.min_refresh_in_uhz &&
8339 	    config.max_refresh_in_uhz) {
8340 		/*
8341 		 * if freesync compatible mode was set, config.state will be set
8342 		 * in atomic check
8343 		 */
8344 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8345 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8346 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8347 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8348 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8349 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8350 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8351 		} else {
8352 			config.state = new_crtc_state->base.vrr_enabled ?
8353 						     VRR_STATE_ACTIVE_VARIABLE :
8354 						     VRR_STATE_INACTIVE;
8355 		}
8356 	} else {
8357 		config.state = VRR_STATE_UNSUPPORTED;
8358 	}
8359 
8360 	mod_freesync_build_vrr_params(dm->freesync_module,
8361 				      new_stream,
8362 				      &config, &vrr_params);
8363 
8364 	new_crtc_state->freesync_timing_changed |=
8365 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8366 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8367 
8368 	new_crtc_state->freesync_config = config;
8369 	/* Copy state for access from DM IRQ handler */
8370 	acrtc->dm_irq_params.freesync_config = config;
8371 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8372 	acrtc->dm_irq_params.vrr_params = vrr_params;
8373 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8374 }
8375 
8376 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8377 					    struct dm_crtc_state *new_state)
8378 {
8379 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8380 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8381 
8382 	if (!old_vrr_active && new_vrr_active) {
8383 		/* Transition VRR inactive -> active:
8384 		 * While VRR is active, we must not disable vblank irq, as a
8385 		 * reenable after disable would compute bogus vblank/pflip
8386 		 * timestamps if it likely happened inside display front-porch.
8387 		 *
8388 		 * We also need vupdate irq for the actual core vblank handling
8389 		 * at end of vblank.
8390 		 */
8391 		dm_set_vupdate_irq(new_state->base.crtc, true);
8392 		drm_crtc_vblank_get(new_state->base.crtc);
8393 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8394 				 __func__, new_state->base.crtc->base.id);
8395 	} else if (old_vrr_active && !new_vrr_active) {
8396 		/* Transition VRR active -> inactive:
8397 		 * Allow vblank irq disable again for fixed refresh rate.
8398 		 */
8399 		dm_set_vupdate_irq(new_state->base.crtc, false);
8400 		drm_crtc_vblank_put(new_state->base.crtc);
8401 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8402 				 __func__, new_state->base.crtc->base.id);
8403 	}
8404 }
8405 
8406 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8407 {
8408 	struct drm_plane *plane;
8409 	struct drm_plane_state *old_plane_state;
8410 	int i;
8411 
8412 	/*
8413 	 * TODO: Make this per-stream so we don't issue redundant updates for
8414 	 * commits with multiple streams.
8415 	 */
8416 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8417 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8418 			handle_cursor_update(plane, old_plane_state);
8419 }
8420 
8421 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8422 				    struct dc_state *dc_state,
8423 				    struct drm_device *dev,
8424 				    struct amdgpu_display_manager *dm,
8425 				    struct drm_crtc *pcrtc,
8426 				    bool wait_for_vblank)
8427 {
8428 	uint32_t i;
8429 	uint64_t timestamp_ns;
8430 	struct drm_plane *plane;
8431 	struct drm_plane_state *old_plane_state, *new_plane_state;
8432 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8433 	struct drm_crtc_state *new_pcrtc_state =
8434 			drm_atomic_get_new_crtc_state(state, pcrtc);
8435 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8436 	struct dm_crtc_state *dm_old_crtc_state =
8437 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8438 	int planes_count = 0, vpos, hpos;
8439 	long r;
8440 	unsigned long flags;
8441 	struct amdgpu_bo *abo;
8442 	uint32_t target_vblank, last_flip_vblank;
8443 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8444 	bool pflip_present = false;
8445 	struct {
8446 		struct dc_surface_update surface_updates[MAX_SURFACES];
8447 		struct dc_plane_info plane_infos[MAX_SURFACES];
8448 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8449 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8450 		struct dc_stream_update stream_update;
8451 	} *bundle;
8452 
8453 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8454 
8455 	if (!bundle) {
8456 		dm_error("Failed to allocate update bundle\n");
8457 		goto cleanup;
8458 	}
8459 
8460 	/*
8461 	 * Disable the cursor first if we're disabling all the planes.
8462 	 * It'll remain on the screen after the planes are re-enabled
8463 	 * if we don't.
8464 	 */
8465 	if (acrtc_state->active_planes == 0)
8466 		amdgpu_dm_commit_cursors(state);
8467 
8468 	/* update planes when needed */
8469 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8470 		struct drm_crtc *crtc = new_plane_state->crtc;
8471 		struct drm_crtc_state *new_crtc_state;
8472 		struct drm_framebuffer *fb = new_plane_state->fb;
8473 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8474 		bool plane_needs_flip;
8475 		struct dc_plane_state *dc_plane;
8476 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8477 
8478 		/* Cursor plane is handled after stream updates */
8479 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8480 			continue;
8481 
8482 		if (!fb || !crtc || pcrtc != crtc)
8483 			continue;
8484 
8485 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8486 		if (!new_crtc_state->active)
8487 			continue;
8488 
8489 		dc_plane = dm_new_plane_state->dc_state;
8490 
8491 		bundle->surface_updates[planes_count].surface = dc_plane;
8492 		if (new_pcrtc_state->color_mgmt_changed) {
8493 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8494 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8495 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8496 		}
8497 
8498 		fill_dc_scaling_info(new_plane_state,
8499 				     &bundle->scaling_infos[planes_count]);
8500 
8501 		bundle->surface_updates[planes_count].scaling_info =
8502 			&bundle->scaling_infos[planes_count];
8503 
8504 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8505 
8506 		pflip_present = pflip_present || plane_needs_flip;
8507 
8508 		if (!plane_needs_flip) {
8509 			planes_count += 1;
8510 			continue;
8511 		}
8512 
8513 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8514 
8515 		/*
8516 		 * Wait for all fences on this FB. Do limited wait to avoid
8517 		 * deadlock during GPU reset when this fence will not signal
8518 		 * but we hold reservation lock for the BO.
8519 		 */
8520 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8521 					  msecs_to_jiffies(5000));
8522 		if (unlikely(r <= 0))
8523 			DRM_ERROR("Waiting for fences timed out!");
8524 
8525 		fill_dc_plane_info_and_addr(
8526 			dm->adev, new_plane_state,
8527 			afb->tiling_flags,
8528 			&bundle->plane_infos[planes_count],
8529 			&bundle->flip_addrs[planes_count].address,
8530 			afb->tmz_surface, false);
8531 
8532 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8533 				 new_plane_state->plane->index,
8534 				 bundle->plane_infos[planes_count].dcc.enable);
8535 
8536 		bundle->surface_updates[planes_count].plane_info =
8537 			&bundle->plane_infos[planes_count];
8538 
8539 		/*
8540 		 * Only allow immediate flips for fast updates that don't
8541 		 * change FB pitch, DCC state, rotation or mirroing.
8542 		 */
8543 		bundle->flip_addrs[planes_count].flip_immediate =
8544 			crtc->state->async_flip &&
8545 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8546 
8547 		timestamp_ns = ktime_get_ns();
8548 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8549 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8550 		bundle->surface_updates[planes_count].surface = dc_plane;
8551 
8552 		if (!bundle->surface_updates[planes_count].surface) {
8553 			DRM_ERROR("No surface for CRTC: id=%d\n",
8554 					acrtc_attach->crtc_id);
8555 			continue;
8556 		}
8557 
8558 		if (plane == pcrtc->primary)
8559 			update_freesync_state_on_stream(
8560 				dm,
8561 				acrtc_state,
8562 				acrtc_state->stream,
8563 				dc_plane,
8564 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8565 
8566 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8567 				 __func__,
8568 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8569 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8570 
8571 		planes_count += 1;
8572 
8573 	}
8574 
8575 	if (pflip_present) {
8576 		if (!vrr_active) {
8577 			/* Use old throttling in non-vrr fixed refresh rate mode
8578 			 * to keep flip scheduling based on target vblank counts
8579 			 * working in a backwards compatible way, e.g., for
8580 			 * clients using the GLX_OML_sync_control extension or
8581 			 * DRI3/Present extension with defined target_msc.
8582 			 */
8583 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8584 		}
8585 		else {
8586 			/* For variable refresh rate mode only:
8587 			 * Get vblank of last completed flip to avoid > 1 vrr
8588 			 * flips per video frame by use of throttling, but allow
8589 			 * flip programming anywhere in the possibly large
8590 			 * variable vrr vblank interval for fine-grained flip
8591 			 * timing control and more opportunity to avoid stutter
8592 			 * on late submission of flips.
8593 			 */
8594 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8595 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8596 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8597 		}
8598 
8599 		target_vblank = last_flip_vblank + wait_for_vblank;
8600 
8601 		/*
8602 		 * Wait until we're out of the vertical blank period before the one
8603 		 * targeted by the flip
8604 		 */
8605 		while ((acrtc_attach->enabled &&
8606 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8607 							    0, &vpos, &hpos, NULL,
8608 							    NULL, &pcrtc->hwmode)
8609 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8610 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8611 			(int)(target_vblank -
8612 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8613 			usleep_range(1000, 1100);
8614 		}
8615 
8616 		/**
8617 		 * Prepare the flip event for the pageflip interrupt to handle.
8618 		 *
8619 		 * This only works in the case where we've already turned on the
8620 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8621 		 * from 0 -> n planes we have to skip a hardware generated event
8622 		 * and rely on sending it from software.
8623 		 */
8624 		if (acrtc_attach->base.state->event &&
8625 		    acrtc_state->active_planes > 0) {
8626 			drm_crtc_vblank_get(pcrtc);
8627 
8628 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8629 
8630 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8631 			prepare_flip_isr(acrtc_attach);
8632 
8633 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8634 		}
8635 
8636 		if (acrtc_state->stream) {
8637 			if (acrtc_state->freesync_vrr_info_changed)
8638 				bundle->stream_update.vrr_infopacket =
8639 					&acrtc_state->stream->vrr_infopacket;
8640 		}
8641 	}
8642 
8643 	/* Update the planes if changed or disable if we don't have any. */
8644 	if ((planes_count || acrtc_state->active_planes == 0) &&
8645 		acrtc_state->stream) {
8646 #if defined(CONFIG_DRM_AMD_DC_DCN)
8647 		/*
8648 		 * If PSR or idle optimizations are enabled then flush out
8649 		 * any pending work before hardware programming.
8650 		 */
8651 		flush_workqueue(dm->vblank_control_workqueue);
8652 #endif
8653 
8654 		bundle->stream_update.stream = acrtc_state->stream;
8655 		if (new_pcrtc_state->mode_changed) {
8656 			bundle->stream_update.src = acrtc_state->stream->src;
8657 			bundle->stream_update.dst = acrtc_state->stream->dst;
8658 		}
8659 
8660 		if (new_pcrtc_state->color_mgmt_changed) {
8661 			/*
8662 			 * TODO: This isn't fully correct since we've actually
8663 			 * already modified the stream in place.
8664 			 */
8665 			bundle->stream_update.gamut_remap =
8666 				&acrtc_state->stream->gamut_remap_matrix;
8667 			bundle->stream_update.output_csc_transform =
8668 				&acrtc_state->stream->csc_color_matrix;
8669 			bundle->stream_update.out_transfer_func =
8670 				acrtc_state->stream->out_transfer_func;
8671 		}
8672 
8673 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8674 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8675 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8676 
8677 		/*
8678 		 * If FreeSync state on the stream has changed then we need to
8679 		 * re-adjust the min/max bounds now that DC doesn't handle this
8680 		 * as part of commit.
8681 		 */
8682 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8683 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8684 			dc_stream_adjust_vmin_vmax(
8685 				dm->dc, acrtc_state->stream,
8686 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8687 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8688 		}
8689 		mutex_lock(&dm->dc_lock);
8690 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8691 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8692 			amdgpu_dm_psr_disable(acrtc_state->stream);
8693 
8694 		dc_commit_updates_for_stream(dm->dc,
8695 						     bundle->surface_updates,
8696 						     planes_count,
8697 						     acrtc_state->stream,
8698 						     &bundle->stream_update,
8699 						     dc_state);
8700 
8701 		/**
8702 		 * Enable or disable the interrupts on the backend.
8703 		 *
8704 		 * Most pipes are put into power gating when unused.
8705 		 *
8706 		 * When power gating is enabled on a pipe we lose the
8707 		 * interrupt enablement state when power gating is disabled.
8708 		 *
8709 		 * So we need to update the IRQ control state in hardware
8710 		 * whenever the pipe turns on (since it could be previously
8711 		 * power gated) or off (since some pipes can't be power gated
8712 		 * on some ASICs).
8713 		 */
8714 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8715 			dm_update_pflip_irq_state(drm_to_adev(dev),
8716 						  acrtc_attach);
8717 
8718 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8719 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8720 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8721 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8722 
8723 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
8724 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8725 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8726 			struct amdgpu_dm_connector *aconn =
8727 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8728 
8729 			if (aconn->psr_skip_count > 0)
8730 				aconn->psr_skip_count--;
8731 
8732 			/* Allow PSR when skip count is 0. */
8733 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8734 		} else {
8735 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
8736 		}
8737 
8738 		mutex_unlock(&dm->dc_lock);
8739 	}
8740 
8741 	/*
8742 	 * Update cursor state *after* programming all the planes.
8743 	 * This avoids redundant programming in the case where we're going
8744 	 * to be disabling a single plane - those pipes are being disabled.
8745 	 */
8746 	if (acrtc_state->active_planes)
8747 		amdgpu_dm_commit_cursors(state);
8748 
8749 cleanup:
8750 	kfree(bundle);
8751 }
8752 
8753 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8754 				   struct drm_atomic_state *state)
8755 {
8756 	struct amdgpu_device *adev = drm_to_adev(dev);
8757 	struct amdgpu_dm_connector *aconnector;
8758 	struct drm_connector *connector;
8759 	struct drm_connector_state *old_con_state, *new_con_state;
8760 	struct drm_crtc_state *new_crtc_state;
8761 	struct dm_crtc_state *new_dm_crtc_state;
8762 	const struct dc_stream_status *status;
8763 	int i, inst;
8764 
8765 	/* Notify device removals. */
8766 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8767 		if (old_con_state->crtc != new_con_state->crtc) {
8768 			/* CRTC changes require notification. */
8769 			goto notify;
8770 		}
8771 
8772 		if (!new_con_state->crtc)
8773 			continue;
8774 
8775 		new_crtc_state = drm_atomic_get_new_crtc_state(
8776 			state, new_con_state->crtc);
8777 
8778 		if (!new_crtc_state)
8779 			continue;
8780 
8781 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8782 			continue;
8783 
8784 	notify:
8785 		aconnector = to_amdgpu_dm_connector(connector);
8786 
8787 		mutex_lock(&adev->dm.audio_lock);
8788 		inst = aconnector->audio_inst;
8789 		aconnector->audio_inst = -1;
8790 		mutex_unlock(&adev->dm.audio_lock);
8791 
8792 		amdgpu_dm_audio_eld_notify(adev, inst);
8793 	}
8794 
8795 	/* Notify audio device additions. */
8796 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8797 		if (!new_con_state->crtc)
8798 			continue;
8799 
8800 		new_crtc_state = drm_atomic_get_new_crtc_state(
8801 			state, new_con_state->crtc);
8802 
8803 		if (!new_crtc_state)
8804 			continue;
8805 
8806 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8807 			continue;
8808 
8809 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8810 		if (!new_dm_crtc_state->stream)
8811 			continue;
8812 
8813 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8814 		if (!status)
8815 			continue;
8816 
8817 		aconnector = to_amdgpu_dm_connector(connector);
8818 
8819 		mutex_lock(&adev->dm.audio_lock);
8820 		inst = status->audio_inst;
8821 		aconnector->audio_inst = inst;
8822 		mutex_unlock(&adev->dm.audio_lock);
8823 
8824 		amdgpu_dm_audio_eld_notify(adev, inst);
8825 	}
8826 }
8827 
8828 /*
8829  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8830  * @crtc_state: the DRM CRTC state
8831  * @stream_state: the DC stream state.
8832  *
8833  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8834  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8835  */
8836 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8837 						struct dc_stream_state *stream_state)
8838 {
8839 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8840 }
8841 
8842 /**
8843  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8844  * @state: The atomic state to commit
8845  *
8846  * This will tell DC to commit the constructed DC state from atomic_check,
8847  * programming the hardware. Any failures here implies a hardware failure, since
8848  * atomic check should have filtered anything non-kosher.
8849  */
8850 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8851 {
8852 	struct drm_device *dev = state->dev;
8853 	struct amdgpu_device *adev = drm_to_adev(dev);
8854 	struct amdgpu_display_manager *dm = &adev->dm;
8855 	struct dm_atomic_state *dm_state;
8856 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8857 	uint32_t i, j;
8858 	struct drm_crtc *crtc;
8859 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8860 	unsigned long flags;
8861 	bool wait_for_vblank = true;
8862 	struct drm_connector *connector;
8863 	struct drm_connector_state *old_con_state, *new_con_state;
8864 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8865 	int crtc_disable_count = 0;
8866 	bool mode_set_reset_required = false;
8867 
8868 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8869 
8870 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8871 
8872 	dm_state = dm_atomic_get_new_state(state);
8873 	if (dm_state && dm_state->context) {
8874 		dc_state = dm_state->context;
8875 	} else {
8876 		/* No state changes, retain current state. */
8877 		dc_state_temp = dc_create_state(dm->dc);
8878 		ASSERT(dc_state_temp);
8879 		dc_state = dc_state_temp;
8880 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8881 	}
8882 
8883 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8884 				       new_crtc_state, i) {
8885 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8886 
8887 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8888 
8889 		if (old_crtc_state->active &&
8890 		    (!new_crtc_state->active ||
8891 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8892 			manage_dm_interrupts(adev, acrtc, false);
8893 			dc_stream_release(dm_old_crtc_state->stream);
8894 		}
8895 	}
8896 
8897 	drm_atomic_helper_calc_timestamping_constants(state);
8898 
8899 	/* update changed items */
8900 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8901 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8902 
8903 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8904 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8905 
8906 		DRM_DEBUG_ATOMIC(
8907 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8908 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8909 			"connectors_changed:%d\n",
8910 			acrtc->crtc_id,
8911 			new_crtc_state->enable,
8912 			new_crtc_state->active,
8913 			new_crtc_state->planes_changed,
8914 			new_crtc_state->mode_changed,
8915 			new_crtc_state->active_changed,
8916 			new_crtc_state->connectors_changed);
8917 
8918 		/* Disable cursor if disabling crtc */
8919 		if (old_crtc_state->active && !new_crtc_state->active) {
8920 			struct dc_cursor_position position;
8921 
8922 			memset(&position, 0, sizeof(position));
8923 			mutex_lock(&dm->dc_lock);
8924 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8925 			mutex_unlock(&dm->dc_lock);
8926 		}
8927 
8928 		/* Copy all transient state flags into dc state */
8929 		if (dm_new_crtc_state->stream) {
8930 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8931 							    dm_new_crtc_state->stream);
8932 		}
8933 
8934 		/* handles headless hotplug case, updating new_state and
8935 		 * aconnector as needed
8936 		 */
8937 
8938 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8939 
8940 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8941 
8942 			if (!dm_new_crtc_state->stream) {
8943 				/*
8944 				 * this could happen because of issues with
8945 				 * userspace notifications delivery.
8946 				 * In this case userspace tries to set mode on
8947 				 * display which is disconnected in fact.
8948 				 * dc_sink is NULL in this case on aconnector.
8949 				 * We expect reset mode will come soon.
8950 				 *
8951 				 * This can also happen when unplug is done
8952 				 * during resume sequence ended
8953 				 *
8954 				 * In this case, we want to pretend we still
8955 				 * have a sink to keep the pipe running so that
8956 				 * hw state is consistent with the sw state
8957 				 */
8958 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8959 						__func__, acrtc->base.base.id);
8960 				continue;
8961 			}
8962 
8963 			if (dm_old_crtc_state->stream)
8964 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8965 
8966 			pm_runtime_get_noresume(dev->dev);
8967 
8968 			acrtc->enabled = true;
8969 			acrtc->hw_mode = new_crtc_state->mode;
8970 			crtc->hwmode = new_crtc_state->mode;
8971 			mode_set_reset_required = true;
8972 		} else if (modereset_required(new_crtc_state)) {
8973 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8974 			/* i.e. reset mode */
8975 			if (dm_old_crtc_state->stream)
8976 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8977 
8978 			mode_set_reset_required = true;
8979 		}
8980 	} /* for_each_crtc_in_state() */
8981 
8982 	if (dc_state) {
8983 		/* if there mode set or reset, disable eDP PSR */
8984 		if (mode_set_reset_required) {
8985 #if defined(CONFIG_DRM_AMD_DC_DCN)
8986 			flush_workqueue(dm->vblank_control_workqueue);
8987 #endif
8988 			amdgpu_dm_psr_disable_all(dm);
8989 		}
8990 
8991 		dm_enable_per_frame_crtc_master_sync(dc_state);
8992 		mutex_lock(&dm->dc_lock);
8993 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8994 #if defined(CONFIG_DRM_AMD_DC_DCN)
8995                /* Allow idle optimization when vblank count is 0 for display off */
8996                if (dm->active_vblank_irq_count == 0)
8997                    dc_allow_idle_optimizations(dm->dc,true);
8998 #endif
8999 		mutex_unlock(&dm->dc_lock);
9000 	}
9001 
9002 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9003 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9004 
9005 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9006 
9007 		if (dm_new_crtc_state->stream != NULL) {
9008 			const struct dc_stream_status *status =
9009 					dc_stream_get_status(dm_new_crtc_state->stream);
9010 
9011 			if (!status)
9012 				status = dc_stream_get_status_from_state(dc_state,
9013 									 dm_new_crtc_state->stream);
9014 			if (!status)
9015 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9016 			else
9017 				acrtc->otg_inst = status->primary_otg_inst;
9018 		}
9019 	}
9020 #ifdef CONFIG_DRM_AMD_DC_HDCP
9021 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9022 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9023 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9024 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9025 
9026 		new_crtc_state = NULL;
9027 
9028 		if (acrtc)
9029 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9030 
9031 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9032 
9033 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9034 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9035 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9036 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9037 			dm_new_con_state->update_hdcp = true;
9038 			continue;
9039 		}
9040 
9041 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9042 			hdcp_update_display(
9043 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9044 				new_con_state->hdcp_content_type,
9045 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9046 	}
9047 #endif
9048 
9049 	/* Handle connector state changes */
9050 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9051 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9052 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9053 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9054 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9055 		struct dc_stream_update stream_update;
9056 		struct dc_info_packet hdr_packet;
9057 		struct dc_stream_status *status = NULL;
9058 		bool abm_changed, hdr_changed, scaling_changed;
9059 
9060 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9061 		memset(&stream_update, 0, sizeof(stream_update));
9062 
9063 		if (acrtc) {
9064 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9065 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9066 		}
9067 
9068 		/* Skip any modesets/resets */
9069 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9070 			continue;
9071 
9072 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9073 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9074 
9075 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9076 							     dm_old_con_state);
9077 
9078 		abm_changed = dm_new_crtc_state->abm_level !=
9079 			      dm_old_crtc_state->abm_level;
9080 
9081 		hdr_changed =
9082 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9083 
9084 		if (!scaling_changed && !abm_changed && !hdr_changed)
9085 			continue;
9086 
9087 		stream_update.stream = dm_new_crtc_state->stream;
9088 		if (scaling_changed) {
9089 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9090 					dm_new_con_state, dm_new_crtc_state->stream);
9091 
9092 			stream_update.src = dm_new_crtc_state->stream->src;
9093 			stream_update.dst = dm_new_crtc_state->stream->dst;
9094 		}
9095 
9096 		if (abm_changed) {
9097 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9098 
9099 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9100 		}
9101 
9102 		if (hdr_changed) {
9103 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9104 			stream_update.hdr_static_metadata = &hdr_packet;
9105 		}
9106 
9107 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9108 
9109 		if (WARN_ON(!status))
9110 			continue;
9111 
9112 		WARN_ON(!status->plane_count);
9113 
9114 		/*
9115 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9116 		 * Here we create an empty update on each plane.
9117 		 * To fix this, DC should permit updating only stream properties.
9118 		 */
9119 		for (j = 0; j < status->plane_count; j++)
9120 			dummy_updates[j].surface = status->plane_states[0];
9121 
9122 
9123 		mutex_lock(&dm->dc_lock);
9124 		dc_commit_updates_for_stream(dm->dc,
9125 						     dummy_updates,
9126 						     status->plane_count,
9127 						     dm_new_crtc_state->stream,
9128 						     &stream_update,
9129 						     dc_state);
9130 		mutex_unlock(&dm->dc_lock);
9131 	}
9132 
9133 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9134 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9135 				      new_crtc_state, i) {
9136 		if (old_crtc_state->active && !new_crtc_state->active)
9137 			crtc_disable_count++;
9138 
9139 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9140 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9141 
9142 		/* For freesync config update on crtc state and params for irq */
9143 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9144 
9145 		/* Handle vrr on->off / off->on transitions */
9146 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9147 						dm_new_crtc_state);
9148 	}
9149 
9150 	/**
9151 	 * Enable interrupts for CRTCs that are newly enabled or went through
9152 	 * a modeset. It was intentionally deferred until after the front end
9153 	 * state was modified to wait until the OTG was on and so the IRQ
9154 	 * handlers didn't access stale or invalid state.
9155 	 */
9156 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9157 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9158 #ifdef CONFIG_DEBUG_FS
9159 		bool configure_crc = false;
9160 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9161 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9162 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9163 #endif
9164 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9165 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9166 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9167 #endif
9168 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9169 
9170 		if (new_crtc_state->active &&
9171 		    (!old_crtc_state->active ||
9172 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9173 			dc_stream_retain(dm_new_crtc_state->stream);
9174 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9175 			manage_dm_interrupts(adev, acrtc, true);
9176 
9177 #ifdef CONFIG_DEBUG_FS
9178 			/**
9179 			 * Frontend may have changed so reapply the CRC capture
9180 			 * settings for the stream.
9181 			 */
9182 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9183 
9184 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9185 				configure_crc = true;
9186 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9187 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9188 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9189 					acrtc->dm_irq_params.crc_window.update_win = true;
9190 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9191 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9192 					crc_rd_wrk->crtc = crtc;
9193 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9194 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9195 				}
9196 #endif
9197 			}
9198 
9199 			if (configure_crc)
9200 				if (amdgpu_dm_crtc_configure_crc_source(
9201 					crtc, dm_new_crtc_state, cur_crc_src))
9202 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9203 #endif
9204 		}
9205 	}
9206 
9207 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9208 		if (new_crtc_state->async_flip)
9209 			wait_for_vblank = false;
9210 
9211 	/* update planes when needed per crtc*/
9212 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9213 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9214 
9215 		if (dm_new_crtc_state->stream)
9216 			amdgpu_dm_commit_planes(state, dc_state, dev,
9217 						dm, crtc, wait_for_vblank);
9218 	}
9219 
9220 	/* Update audio instances for each connector. */
9221 	amdgpu_dm_commit_audio(dev, state);
9222 
9223 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9224 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9225 	/* restore the backlight level */
9226 	for (i = 0; i < dm->num_of_edps; i++) {
9227 		if (dm->backlight_dev[i] &&
9228 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9229 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9230 	}
9231 #endif
9232 	/*
9233 	 * send vblank event on all events not handled in flip and
9234 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9235 	 */
9236 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9237 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9238 
9239 		if (new_crtc_state->event)
9240 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9241 
9242 		new_crtc_state->event = NULL;
9243 	}
9244 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9245 
9246 	/* Signal HW programming completion */
9247 	drm_atomic_helper_commit_hw_done(state);
9248 
9249 	if (wait_for_vblank)
9250 		drm_atomic_helper_wait_for_flip_done(dev, state);
9251 
9252 	drm_atomic_helper_cleanup_planes(dev, state);
9253 
9254 	/* return the stolen vga memory back to VRAM */
9255 	if (!adev->mman.keep_stolen_vga_memory)
9256 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9257 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9258 
9259 	/*
9260 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9261 	 * so we can put the GPU into runtime suspend if we're not driving any
9262 	 * displays anymore
9263 	 */
9264 	for (i = 0; i < crtc_disable_count; i++)
9265 		pm_runtime_put_autosuspend(dev->dev);
9266 	pm_runtime_mark_last_busy(dev->dev);
9267 
9268 	if (dc_state_temp)
9269 		dc_release_state(dc_state_temp);
9270 }
9271 
9272 
9273 static int dm_force_atomic_commit(struct drm_connector *connector)
9274 {
9275 	int ret = 0;
9276 	struct drm_device *ddev = connector->dev;
9277 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9278 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9279 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9280 	struct drm_connector_state *conn_state;
9281 	struct drm_crtc_state *crtc_state;
9282 	struct drm_plane_state *plane_state;
9283 
9284 	if (!state)
9285 		return -ENOMEM;
9286 
9287 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9288 
9289 	/* Construct an atomic state to restore previous display setting */
9290 
9291 	/*
9292 	 * Attach connectors to drm_atomic_state
9293 	 */
9294 	conn_state = drm_atomic_get_connector_state(state, connector);
9295 
9296 	ret = PTR_ERR_OR_ZERO(conn_state);
9297 	if (ret)
9298 		goto out;
9299 
9300 	/* Attach crtc to drm_atomic_state*/
9301 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9302 
9303 	ret = PTR_ERR_OR_ZERO(crtc_state);
9304 	if (ret)
9305 		goto out;
9306 
9307 	/* force a restore */
9308 	crtc_state->mode_changed = true;
9309 
9310 	/* Attach plane to drm_atomic_state */
9311 	plane_state = drm_atomic_get_plane_state(state, plane);
9312 
9313 	ret = PTR_ERR_OR_ZERO(plane_state);
9314 	if (ret)
9315 		goto out;
9316 
9317 	/* Call commit internally with the state we just constructed */
9318 	ret = drm_atomic_commit(state);
9319 
9320 out:
9321 	drm_atomic_state_put(state);
9322 	if (ret)
9323 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9324 
9325 	return ret;
9326 }
9327 
9328 /*
9329  * This function handles all cases when set mode does not come upon hotplug.
9330  * This includes when a display is unplugged then plugged back into the
9331  * same port and when running without usermode desktop manager supprot
9332  */
9333 void dm_restore_drm_connector_state(struct drm_device *dev,
9334 				    struct drm_connector *connector)
9335 {
9336 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9337 	struct amdgpu_crtc *disconnected_acrtc;
9338 	struct dm_crtc_state *acrtc_state;
9339 
9340 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9341 		return;
9342 
9343 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9344 	if (!disconnected_acrtc)
9345 		return;
9346 
9347 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9348 	if (!acrtc_state->stream)
9349 		return;
9350 
9351 	/*
9352 	 * If the previous sink is not released and different from the current,
9353 	 * we deduce we are in a state where we can not rely on usermode call
9354 	 * to turn on the display, so we do it here
9355 	 */
9356 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9357 		dm_force_atomic_commit(&aconnector->base);
9358 }
9359 
9360 /*
9361  * Grabs all modesetting locks to serialize against any blocking commits,
9362  * Waits for completion of all non blocking commits.
9363  */
9364 static int do_aquire_global_lock(struct drm_device *dev,
9365 				 struct drm_atomic_state *state)
9366 {
9367 	struct drm_crtc *crtc;
9368 	struct drm_crtc_commit *commit;
9369 	long ret;
9370 
9371 	/*
9372 	 * Adding all modeset locks to aquire_ctx will
9373 	 * ensure that when the framework release it the
9374 	 * extra locks we are locking here will get released to
9375 	 */
9376 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9377 	if (ret)
9378 		return ret;
9379 
9380 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9381 		spin_lock(&crtc->commit_lock);
9382 		commit = list_first_entry_or_null(&crtc->commit_list,
9383 				struct drm_crtc_commit, commit_entry);
9384 		if (commit)
9385 			drm_crtc_commit_get(commit);
9386 		spin_unlock(&crtc->commit_lock);
9387 
9388 		if (!commit)
9389 			continue;
9390 
9391 		/*
9392 		 * Make sure all pending HW programming completed and
9393 		 * page flips done
9394 		 */
9395 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9396 
9397 		if (ret > 0)
9398 			ret = wait_for_completion_interruptible_timeout(
9399 					&commit->flip_done, 10*HZ);
9400 
9401 		if (ret == 0)
9402 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9403 				  "timed out\n", crtc->base.id, crtc->name);
9404 
9405 		drm_crtc_commit_put(commit);
9406 	}
9407 
9408 	return ret < 0 ? ret : 0;
9409 }
9410 
9411 static void get_freesync_config_for_crtc(
9412 	struct dm_crtc_state *new_crtc_state,
9413 	struct dm_connector_state *new_con_state)
9414 {
9415 	struct mod_freesync_config config = {0};
9416 	struct amdgpu_dm_connector *aconnector =
9417 			to_amdgpu_dm_connector(new_con_state->base.connector);
9418 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9419 	int vrefresh = drm_mode_vrefresh(mode);
9420 	bool fs_vid_mode = false;
9421 
9422 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9423 					vrefresh >= aconnector->min_vfreq &&
9424 					vrefresh <= aconnector->max_vfreq;
9425 
9426 	if (new_crtc_state->vrr_supported) {
9427 		new_crtc_state->stream->ignore_msa_timing_param = true;
9428 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9429 
9430 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9431 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9432 		config.vsif_supported = true;
9433 		config.btr = true;
9434 
9435 		if (fs_vid_mode) {
9436 			config.state = VRR_STATE_ACTIVE_FIXED;
9437 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9438 			goto out;
9439 		} else if (new_crtc_state->base.vrr_enabled) {
9440 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9441 		} else {
9442 			config.state = VRR_STATE_INACTIVE;
9443 		}
9444 	}
9445 out:
9446 	new_crtc_state->freesync_config = config;
9447 }
9448 
9449 static void reset_freesync_config_for_crtc(
9450 	struct dm_crtc_state *new_crtc_state)
9451 {
9452 	new_crtc_state->vrr_supported = false;
9453 
9454 	memset(&new_crtc_state->vrr_infopacket, 0,
9455 	       sizeof(new_crtc_state->vrr_infopacket));
9456 }
9457 
9458 static bool
9459 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9460 				 struct drm_crtc_state *new_crtc_state)
9461 {
9462 	struct drm_display_mode old_mode, new_mode;
9463 
9464 	if (!old_crtc_state || !new_crtc_state)
9465 		return false;
9466 
9467 	old_mode = old_crtc_state->mode;
9468 	new_mode = new_crtc_state->mode;
9469 
9470 	if (old_mode.clock       == new_mode.clock &&
9471 	    old_mode.hdisplay    == new_mode.hdisplay &&
9472 	    old_mode.vdisplay    == new_mode.vdisplay &&
9473 	    old_mode.htotal      == new_mode.htotal &&
9474 	    old_mode.vtotal      != new_mode.vtotal &&
9475 	    old_mode.hsync_start == new_mode.hsync_start &&
9476 	    old_mode.vsync_start != new_mode.vsync_start &&
9477 	    old_mode.hsync_end   == new_mode.hsync_end &&
9478 	    old_mode.vsync_end   != new_mode.vsync_end &&
9479 	    old_mode.hskew       == new_mode.hskew &&
9480 	    old_mode.vscan       == new_mode.vscan &&
9481 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9482 	    (new_mode.vsync_end - new_mode.vsync_start))
9483 		return true;
9484 
9485 	return false;
9486 }
9487 
9488 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9489 	uint64_t num, den, res;
9490 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9491 
9492 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9493 
9494 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9495 	den = (unsigned long long)new_crtc_state->mode.htotal *
9496 	      (unsigned long long)new_crtc_state->mode.vtotal;
9497 
9498 	res = div_u64(num, den);
9499 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9500 }
9501 
9502 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9503 				struct drm_atomic_state *state,
9504 				struct drm_crtc *crtc,
9505 				struct drm_crtc_state *old_crtc_state,
9506 				struct drm_crtc_state *new_crtc_state,
9507 				bool enable,
9508 				bool *lock_and_validation_needed)
9509 {
9510 	struct dm_atomic_state *dm_state = NULL;
9511 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9512 	struct dc_stream_state *new_stream;
9513 	int ret = 0;
9514 
9515 	/*
9516 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9517 	 * update changed items
9518 	 */
9519 	struct amdgpu_crtc *acrtc = NULL;
9520 	struct amdgpu_dm_connector *aconnector = NULL;
9521 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9522 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9523 
9524 	new_stream = NULL;
9525 
9526 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9527 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9528 	acrtc = to_amdgpu_crtc(crtc);
9529 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9530 
9531 	/* TODO This hack should go away */
9532 	if (aconnector && enable) {
9533 		/* Make sure fake sink is created in plug-in scenario */
9534 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9535 							    &aconnector->base);
9536 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9537 							    &aconnector->base);
9538 
9539 		if (IS_ERR(drm_new_conn_state)) {
9540 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9541 			goto fail;
9542 		}
9543 
9544 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9545 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9546 
9547 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9548 			goto skip_modeset;
9549 
9550 		new_stream = create_validate_stream_for_sink(aconnector,
9551 							     &new_crtc_state->mode,
9552 							     dm_new_conn_state,
9553 							     dm_old_crtc_state->stream);
9554 
9555 		/*
9556 		 * we can have no stream on ACTION_SET if a display
9557 		 * was disconnected during S3, in this case it is not an
9558 		 * error, the OS will be updated after detection, and
9559 		 * will do the right thing on next atomic commit
9560 		 */
9561 
9562 		if (!new_stream) {
9563 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9564 					__func__, acrtc->base.base.id);
9565 			ret = -ENOMEM;
9566 			goto fail;
9567 		}
9568 
9569 		/*
9570 		 * TODO: Check VSDB bits to decide whether this should
9571 		 * be enabled or not.
9572 		 */
9573 		new_stream->triggered_crtc_reset.enabled =
9574 			dm->force_timing_sync;
9575 
9576 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9577 
9578 		ret = fill_hdr_info_packet(drm_new_conn_state,
9579 					   &new_stream->hdr_static_metadata);
9580 		if (ret)
9581 			goto fail;
9582 
9583 		/*
9584 		 * If we already removed the old stream from the context
9585 		 * (and set the new stream to NULL) then we can't reuse
9586 		 * the old stream even if the stream and scaling are unchanged.
9587 		 * We'll hit the BUG_ON and black screen.
9588 		 *
9589 		 * TODO: Refactor this function to allow this check to work
9590 		 * in all conditions.
9591 		 */
9592 		if (amdgpu_freesync_vid_mode &&
9593 		    dm_new_crtc_state->stream &&
9594 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9595 			goto skip_modeset;
9596 
9597 		if (dm_new_crtc_state->stream &&
9598 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9599 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9600 			new_crtc_state->mode_changed = false;
9601 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9602 					 new_crtc_state->mode_changed);
9603 		}
9604 	}
9605 
9606 	/* mode_changed flag may get updated above, need to check again */
9607 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9608 		goto skip_modeset;
9609 
9610 	DRM_DEBUG_ATOMIC(
9611 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9612 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9613 		"connectors_changed:%d\n",
9614 		acrtc->crtc_id,
9615 		new_crtc_state->enable,
9616 		new_crtc_state->active,
9617 		new_crtc_state->planes_changed,
9618 		new_crtc_state->mode_changed,
9619 		new_crtc_state->active_changed,
9620 		new_crtc_state->connectors_changed);
9621 
9622 	/* Remove stream for any changed/disabled CRTC */
9623 	if (!enable) {
9624 
9625 		if (!dm_old_crtc_state->stream)
9626 			goto skip_modeset;
9627 
9628 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9629 		    is_timing_unchanged_for_freesync(new_crtc_state,
9630 						     old_crtc_state)) {
9631 			new_crtc_state->mode_changed = false;
9632 			DRM_DEBUG_DRIVER(
9633 				"Mode change not required for front porch change, "
9634 				"setting mode_changed to %d",
9635 				new_crtc_state->mode_changed);
9636 
9637 			set_freesync_fixed_config(dm_new_crtc_state);
9638 
9639 			goto skip_modeset;
9640 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9641 			   is_freesync_video_mode(&new_crtc_state->mode,
9642 						  aconnector)) {
9643 			struct drm_display_mode *high_mode;
9644 
9645 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
9646 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9647 				set_freesync_fixed_config(dm_new_crtc_state);
9648 			}
9649 		}
9650 
9651 		ret = dm_atomic_get_state(state, &dm_state);
9652 		if (ret)
9653 			goto fail;
9654 
9655 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9656 				crtc->base.id);
9657 
9658 		/* i.e. reset mode */
9659 		if (dc_remove_stream_from_ctx(
9660 				dm->dc,
9661 				dm_state->context,
9662 				dm_old_crtc_state->stream) != DC_OK) {
9663 			ret = -EINVAL;
9664 			goto fail;
9665 		}
9666 
9667 		dc_stream_release(dm_old_crtc_state->stream);
9668 		dm_new_crtc_state->stream = NULL;
9669 
9670 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9671 
9672 		*lock_and_validation_needed = true;
9673 
9674 	} else {/* Add stream for any updated/enabled CRTC */
9675 		/*
9676 		 * Quick fix to prevent NULL pointer on new_stream when
9677 		 * added MST connectors not found in existing crtc_state in the chained mode
9678 		 * TODO: need to dig out the root cause of that
9679 		 */
9680 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9681 			goto skip_modeset;
9682 
9683 		if (modereset_required(new_crtc_state))
9684 			goto skip_modeset;
9685 
9686 		if (modeset_required(new_crtc_state, new_stream,
9687 				     dm_old_crtc_state->stream)) {
9688 
9689 			WARN_ON(dm_new_crtc_state->stream);
9690 
9691 			ret = dm_atomic_get_state(state, &dm_state);
9692 			if (ret)
9693 				goto fail;
9694 
9695 			dm_new_crtc_state->stream = new_stream;
9696 
9697 			dc_stream_retain(new_stream);
9698 
9699 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9700 					 crtc->base.id);
9701 
9702 			if (dc_add_stream_to_ctx(
9703 					dm->dc,
9704 					dm_state->context,
9705 					dm_new_crtc_state->stream) != DC_OK) {
9706 				ret = -EINVAL;
9707 				goto fail;
9708 			}
9709 
9710 			*lock_and_validation_needed = true;
9711 		}
9712 	}
9713 
9714 skip_modeset:
9715 	/* Release extra reference */
9716 	if (new_stream)
9717 		 dc_stream_release(new_stream);
9718 
9719 	/*
9720 	 * We want to do dc stream updates that do not require a
9721 	 * full modeset below.
9722 	 */
9723 	if (!(enable && aconnector && new_crtc_state->active))
9724 		return 0;
9725 	/*
9726 	 * Given above conditions, the dc state cannot be NULL because:
9727 	 * 1. We're in the process of enabling CRTCs (just been added
9728 	 *    to the dc context, or already is on the context)
9729 	 * 2. Has a valid connector attached, and
9730 	 * 3. Is currently active and enabled.
9731 	 * => The dc stream state currently exists.
9732 	 */
9733 	BUG_ON(dm_new_crtc_state->stream == NULL);
9734 
9735 	/* Scaling or underscan settings */
9736 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9737 				drm_atomic_crtc_needs_modeset(new_crtc_state))
9738 		update_stream_scaling_settings(
9739 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9740 
9741 	/* ABM settings */
9742 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9743 
9744 	/*
9745 	 * Color management settings. We also update color properties
9746 	 * when a modeset is needed, to ensure it gets reprogrammed.
9747 	 */
9748 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9749 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9750 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9751 		if (ret)
9752 			goto fail;
9753 	}
9754 
9755 	/* Update Freesync settings. */
9756 	get_freesync_config_for_crtc(dm_new_crtc_state,
9757 				     dm_new_conn_state);
9758 
9759 	return ret;
9760 
9761 fail:
9762 	if (new_stream)
9763 		dc_stream_release(new_stream);
9764 	return ret;
9765 }
9766 
9767 static bool should_reset_plane(struct drm_atomic_state *state,
9768 			       struct drm_plane *plane,
9769 			       struct drm_plane_state *old_plane_state,
9770 			       struct drm_plane_state *new_plane_state)
9771 {
9772 	struct drm_plane *other;
9773 	struct drm_plane_state *old_other_state, *new_other_state;
9774 	struct drm_crtc_state *new_crtc_state;
9775 	int i;
9776 
9777 	/*
9778 	 * TODO: Remove this hack once the checks below are sufficient
9779 	 * enough to determine when we need to reset all the planes on
9780 	 * the stream.
9781 	 */
9782 	if (state->allow_modeset)
9783 		return true;
9784 
9785 	/* Exit early if we know that we're adding or removing the plane. */
9786 	if (old_plane_state->crtc != new_plane_state->crtc)
9787 		return true;
9788 
9789 	/* old crtc == new_crtc == NULL, plane not in context. */
9790 	if (!new_plane_state->crtc)
9791 		return false;
9792 
9793 	new_crtc_state =
9794 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9795 
9796 	if (!new_crtc_state)
9797 		return true;
9798 
9799 	/* CRTC Degamma changes currently require us to recreate planes. */
9800 	if (new_crtc_state->color_mgmt_changed)
9801 		return true;
9802 
9803 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9804 		return true;
9805 
9806 	/*
9807 	 * If there are any new primary or overlay planes being added or
9808 	 * removed then the z-order can potentially change. To ensure
9809 	 * correct z-order and pipe acquisition the current DC architecture
9810 	 * requires us to remove and recreate all existing planes.
9811 	 *
9812 	 * TODO: Come up with a more elegant solution for this.
9813 	 */
9814 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9815 		struct amdgpu_framebuffer *old_afb, *new_afb;
9816 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9817 			continue;
9818 
9819 		if (old_other_state->crtc != new_plane_state->crtc &&
9820 		    new_other_state->crtc != new_plane_state->crtc)
9821 			continue;
9822 
9823 		if (old_other_state->crtc != new_other_state->crtc)
9824 			return true;
9825 
9826 		/* Src/dst size and scaling updates. */
9827 		if (old_other_state->src_w != new_other_state->src_w ||
9828 		    old_other_state->src_h != new_other_state->src_h ||
9829 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9830 		    old_other_state->crtc_h != new_other_state->crtc_h)
9831 			return true;
9832 
9833 		/* Rotation / mirroring updates. */
9834 		if (old_other_state->rotation != new_other_state->rotation)
9835 			return true;
9836 
9837 		/* Blending updates. */
9838 		if (old_other_state->pixel_blend_mode !=
9839 		    new_other_state->pixel_blend_mode)
9840 			return true;
9841 
9842 		/* Alpha updates. */
9843 		if (old_other_state->alpha != new_other_state->alpha)
9844 			return true;
9845 
9846 		/* Colorspace changes. */
9847 		if (old_other_state->color_range != new_other_state->color_range ||
9848 		    old_other_state->color_encoding != new_other_state->color_encoding)
9849 			return true;
9850 
9851 		/* Framebuffer checks fall at the end. */
9852 		if (!old_other_state->fb || !new_other_state->fb)
9853 			continue;
9854 
9855 		/* Pixel format changes can require bandwidth updates. */
9856 		if (old_other_state->fb->format != new_other_state->fb->format)
9857 			return true;
9858 
9859 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9860 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9861 
9862 		/* Tiling and DCC changes also require bandwidth updates. */
9863 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9864 		    old_afb->base.modifier != new_afb->base.modifier)
9865 			return true;
9866 	}
9867 
9868 	return false;
9869 }
9870 
9871 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9872 			      struct drm_plane_state *new_plane_state,
9873 			      struct drm_framebuffer *fb)
9874 {
9875 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9876 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9877 	unsigned int pitch;
9878 	bool linear;
9879 
9880 	if (fb->width > new_acrtc->max_cursor_width ||
9881 	    fb->height > new_acrtc->max_cursor_height) {
9882 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9883 				 new_plane_state->fb->width,
9884 				 new_plane_state->fb->height);
9885 		return -EINVAL;
9886 	}
9887 	if (new_plane_state->src_w != fb->width << 16 ||
9888 	    new_plane_state->src_h != fb->height << 16) {
9889 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9890 		return -EINVAL;
9891 	}
9892 
9893 	/* Pitch in pixels */
9894 	pitch = fb->pitches[0] / fb->format->cpp[0];
9895 
9896 	if (fb->width != pitch) {
9897 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9898 				 fb->width, pitch);
9899 		return -EINVAL;
9900 	}
9901 
9902 	switch (pitch) {
9903 	case 64:
9904 	case 128:
9905 	case 256:
9906 		/* FB pitch is supported by cursor plane */
9907 		break;
9908 	default:
9909 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9910 		return -EINVAL;
9911 	}
9912 
9913 	/* Core DRM takes care of checking FB modifiers, so we only need to
9914 	 * check tiling flags when the FB doesn't have a modifier. */
9915 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9916 		if (adev->family < AMDGPU_FAMILY_AI) {
9917 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9918 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9919 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9920 		} else {
9921 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9922 		}
9923 		if (!linear) {
9924 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9925 			return -EINVAL;
9926 		}
9927 	}
9928 
9929 	return 0;
9930 }
9931 
9932 static int dm_update_plane_state(struct dc *dc,
9933 				 struct drm_atomic_state *state,
9934 				 struct drm_plane *plane,
9935 				 struct drm_plane_state *old_plane_state,
9936 				 struct drm_plane_state *new_plane_state,
9937 				 bool enable,
9938 				 bool *lock_and_validation_needed)
9939 {
9940 
9941 	struct dm_atomic_state *dm_state = NULL;
9942 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9943 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9944 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9945 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9946 	struct amdgpu_crtc *new_acrtc;
9947 	bool needs_reset;
9948 	int ret = 0;
9949 
9950 
9951 	new_plane_crtc = new_plane_state->crtc;
9952 	old_plane_crtc = old_plane_state->crtc;
9953 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9954 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9955 
9956 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9957 		if (!enable || !new_plane_crtc ||
9958 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9959 			return 0;
9960 
9961 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9962 
9963 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9964 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9965 			return -EINVAL;
9966 		}
9967 
9968 		if (new_plane_state->fb) {
9969 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9970 						 new_plane_state->fb);
9971 			if (ret)
9972 				return ret;
9973 		}
9974 
9975 		return 0;
9976 	}
9977 
9978 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9979 					 new_plane_state);
9980 
9981 	/* Remove any changed/removed planes */
9982 	if (!enable) {
9983 		if (!needs_reset)
9984 			return 0;
9985 
9986 		if (!old_plane_crtc)
9987 			return 0;
9988 
9989 		old_crtc_state = drm_atomic_get_old_crtc_state(
9990 				state, old_plane_crtc);
9991 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9992 
9993 		if (!dm_old_crtc_state->stream)
9994 			return 0;
9995 
9996 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9997 				plane->base.id, old_plane_crtc->base.id);
9998 
9999 		ret = dm_atomic_get_state(state, &dm_state);
10000 		if (ret)
10001 			return ret;
10002 
10003 		if (!dc_remove_plane_from_context(
10004 				dc,
10005 				dm_old_crtc_state->stream,
10006 				dm_old_plane_state->dc_state,
10007 				dm_state->context)) {
10008 
10009 			return -EINVAL;
10010 		}
10011 
10012 
10013 		dc_plane_state_release(dm_old_plane_state->dc_state);
10014 		dm_new_plane_state->dc_state = NULL;
10015 
10016 		*lock_and_validation_needed = true;
10017 
10018 	} else { /* Add new planes */
10019 		struct dc_plane_state *dc_new_plane_state;
10020 
10021 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10022 			return 0;
10023 
10024 		if (!new_plane_crtc)
10025 			return 0;
10026 
10027 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10028 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10029 
10030 		if (!dm_new_crtc_state->stream)
10031 			return 0;
10032 
10033 		if (!needs_reset)
10034 			return 0;
10035 
10036 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10037 		if (ret)
10038 			return ret;
10039 
10040 		WARN_ON(dm_new_plane_state->dc_state);
10041 
10042 		dc_new_plane_state = dc_create_plane_state(dc);
10043 		if (!dc_new_plane_state)
10044 			return -ENOMEM;
10045 
10046 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10047 				 plane->base.id, new_plane_crtc->base.id);
10048 
10049 		ret = fill_dc_plane_attributes(
10050 			drm_to_adev(new_plane_crtc->dev),
10051 			dc_new_plane_state,
10052 			new_plane_state,
10053 			new_crtc_state);
10054 		if (ret) {
10055 			dc_plane_state_release(dc_new_plane_state);
10056 			return ret;
10057 		}
10058 
10059 		ret = dm_atomic_get_state(state, &dm_state);
10060 		if (ret) {
10061 			dc_plane_state_release(dc_new_plane_state);
10062 			return ret;
10063 		}
10064 
10065 		/*
10066 		 * Any atomic check errors that occur after this will
10067 		 * not need a release. The plane state will be attached
10068 		 * to the stream, and therefore part of the atomic
10069 		 * state. It'll be released when the atomic state is
10070 		 * cleaned.
10071 		 */
10072 		if (!dc_add_plane_to_context(
10073 				dc,
10074 				dm_new_crtc_state->stream,
10075 				dc_new_plane_state,
10076 				dm_state->context)) {
10077 
10078 			dc_plane_state_release(dc_new_plane_state);
10079 			return -EINVAL;
10080 		}
10081 
10082 		dm_new_plane_state->dc_state = dc_new_plane_state;
10083 
10084 		/* Tell DC to do a full surface update every time there
10085 		 * is a plane change. Inefficient, but works for now.
10086 		 */
10087 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10088 
10089 		*lock_and_validation_needed = true;
10090 	}
10091 
10092 
10093 	return ret;
10094 }
10095 
10096 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10097 				struct drm_crtc *crtc,
10098 				struct drm_crtc_state *new_crtc_state)
10099 {
10100 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10101 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10102 
10103 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10104 	 * cursor per pipe but it's going to inherit the scaling and
10105 	 * positioning from the underlying pipe. Check the cursor plane's
10106 	 * blending properties match the primary plane's. */
10107 
10108 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10109 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10110 	if (!new_cursor_state || !new_primary_state ||
10111 	    !new_cursor_state->fb || !new_primary_state->fb) {
10112 		return 0;
10113 	}
10114 
10115 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10116 			 (new_cursor_state->src_w >> 16);
10117 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10118 			 (new_cursor_state->src_h >> 16);
10119 
10120 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10121 			 (new_primary_state->src_w >> 16);
10122 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10123 			 (new_primary_state->src_h >> 16);
10124 
10125 	if (cursor_scale_w != primary_scale_w ||
10126 	    cursor_scale_h != primary_scale_h) {
10127 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10128 		return -EINVAL;
10129 	}
10130 
10131 	return 0;
10132 }
10133 
10134 #if defined(CONFIG_DRM_AMD_DC_DCN)
10135 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10136 {
10137 	struct drm_connector *connector;
10138 	struct drm_connector_state *conn_state;
10139 	struct amdgpu_dm_connector *aconnector = NULL;
10140 	int i;
10141 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10142 		if (conn_state->crtc != crtc)
10143 			continue;
10144 
10145 		aconnector = to_amdgpu_dm_connector(connector);
10146 		if (!aconnector->port || !aconnector->mst_port)
10147 			aconnector = NULL;
10148 		else
10149 			break;
10150 	}
10151 
10152 	if (!aconnector)
10153 		return 0;
10154 
10155 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10156 }
10157 #endif
10158 
10159 static int validate_overlay(struct drm_atomic_state *state)
10160 {
10161 	int i;
10162 	struct drm_plane *plane;
10163 	struct drm_plane_state *new_plane_state;
10164 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10165 
10166 	/* Check if primary plane is contained inside overlay */
10167 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10168 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10169 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10170 				return 0;
10171 
10172 			overlay_state = new_plane_state;
10173 			continue;
10174 		}
10175 	}
10176 
10177 	/* check if we're making changes to the overlay plane */
10178 	if (!overlay_state)
10179 		return 0;
10180 
10181 	/* check if overlay plane is enabled */
10182 	if (!overlay_state->crtc)
10183 		return 0;
10184 
10185 	/* find the primary plane for the CRTC that the overlay is enabled on */
10186 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10187 	if (IS_ERR(primary_state))
10188 		return PTR_ERR(primary_state);
10189 
10190 	/* check if primary plane is enabled */
10191 	if (!primary_state->crtc)
10192 		return 0;
10193 
10194 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10195 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10196 	    primary_state->crtc_y < overlay_state->crtc_y ||
10197 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10198 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10199 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10200 		return -EINVAL;
10201 	}
10202 
10203 	return 0;
10204 }
10205 
10206 /**
10207  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10208  * @dev: The DRM device
10209  * @state: The atomic state to commit
10210  *
10211  * Validate that the given atomic state is programmable by DC into hardware.
10212  * This involves constructing a &struct dc_state reflecting the new hardware
10213  * state we wish to commit, then querying DC to see if it is programmable. It's
10214  * important not to modify the existing DC state. Otherwise, atomic_check
10215  * may unexpectedly commit hardware changes.
10216  *
10217  * When validating the DC state, it's important that the right locks are
10218  * acquired. For full updates case which removes/adds/updates streams on one
10219  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10220  * that any such full update commit will wait for completion of any outstanding
10221  * flip using DRMs synchronization events.
10222  *
10223  * Note that DM adds the affected connectors for all CRTCs in state, when that
10224  * might not seem necessary. This is because DC stream creation requires the
10225  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10226  * be possible but non-trivial - a possible TODO item.
10227  *
10228  * Return: -Error code if validation failed.
10229  */
10230 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10231 				  struct drm_atomic_state *state)
10232 {
10233 	struct amdgpu_device *adev = drm_to_adev(dev);
10234 	struct dm_atomic_state *dm_state = NULL;
10235 	struct dc *dc = adev->dm.dc;
10236 	struct drm_connector *connector;
10237 	struct drm_connector_state *old_con_state, *new_con_state;
10238 	struct drm_crtc *crtc;
10239 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10240 	struct drm_plane *plane;
10241 	struct drm_plane_state *old_plane_state, *new_plane_state;
10242 	enum dc_status status;
10243 	int ret, i;
10244 	bool lock_and_validation_needed = false;
10245 	struct dm_crtc_state *dm_old_crtc_state;
10246 
10247 	trace_amdgpu_dm_atomic_check_begin(state);
10248 
10249 	ret = drm_atomic_helper_check_modeset(dev, state);
10250 	if (ret)
10251 		goto fail;
10252 
10253 	/* Check connector changes */
10254 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10255 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10256 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10257 
10258 		/* Skip connectors that are disabled or part of modeset already. */
10259 		if (!old_con_state->crtc && !new_con_state->crtc)
10260 			continue;
10261 
10262 		if (!new_con_state->crtc)
10263 			continue;
10264 
10265 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10266 		if (IS_ERR(new_crtc_state)) {
10267 			ret = PTR_ERR(new_crtc_state);
10268 			goto fail;
10269 		}
10270 
10271 		if (dm_old_con_state->abm_level !=
10272 		    dm_new_con_state->abm_level)
10273 			new_crtc_state->connectors_changed = true;
10274 	}
10275 
10276 #if defined(CONFIG_DRM_AMD_DC_DCN)
10277 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10278 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10279 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10280 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10281 				if (ret)
10282 					goto fail;
10283 			}
10284 		}
10285 	}
10286 #endif
10287 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10288 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10289 
10290 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10291 		    !new_crtc_state->color_mgmt_changed &&
10292 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10293 			dm_old_crtc_state->dsc_force_changed == false)
10294 			continue;
10295 
10296 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10297 		if (ret)
10298 			goto fail;
10299 
10300 		if (!new_crtc_state->enable)
10301 			continue;
10302 
10303 		ret = drm_atomic_add_affected_connectors(state, crtc);
10304 		if (ret)
10305 			return ret;
10306 
10307 		ret = drm_atomic_add_affected_planes(state, crtc);
10308 		if (ret)
10309 			goto fail;
10310 
10311 		if (dm_old_crtc_state->dsc_force_changed)
10312 			new_crtc_state->mode_changed = true;
10313 	}
10314 
10315 	/*
10316 	 * Add all primary and overlay planes on the CRTC to the state
10317 	 * whenever a plane is enabled to maintain correct z-ordering
10318 	 * and to enable fast surface updates.
10319 	 */
10320 	drm_for_each_crtc(crtc, dev) {
10321 		bool modified = false;
10322 
10323 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10324 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10325 				continue;
10326 
10327 			if (new_plane_state->crtc == crtc ||
10328 			    old_plane_state->crtc == crtc) {
10329 				modified = true;
10330 				break;
10331 			}
10332 		}
10333 
10334 		if (!modified)
10335 			continue;
10336 
10337 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10338 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10339 				continue;
10340 
10341 			new_plane_state =
10342 				drm_atomic_get_plane_state(state, plane);
10343 
10344 			if (IS_ERR(new_plane_state)) {
10345 				ret = PTR_ERR(new_plane_state);
10346 				goto fail;
10347 			}
10348 		}
10349 	}
10350 
10351 	/* Remove exiting planes if they are modified */
10352 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10353 		ret = dm_update_plane_state(dc, state, plane,
10354 					    old_plane_state,
10355 					    new_plane_state,
10356 					    false,
10357 					    &lock_and_validation_needed);
10358 		if (ret)
10359 			goto fail;
10360 	}
10361 
10362 	/* Disable all crtcs which require disable */
10363 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10364 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10365 					   old_crtc_state,
10366 					   new_crtc_state,
10367 					   false,
10368 					   &lock_and_validation_needed);
10369 		if (ret)
10370 			goto fail;
10371 	}
10372 
10373 	/* Enable all crtcs which require enable */
10374 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10375 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10376 					   old_crtc_state,
10377 					   new_crtc_state,
10378 					   true,
10379 					   &lock_and_validation_needed);
10380 		if (ret)
10381 			goto fail;
10382 	}
10383 
10384 	ret = validate_overlay(state);
10385 	if (ret)
10386 		goto fail;
10387 
10388 	/* Add new/modified planes */
10389 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10390 		ret = dm_update_plane_state(dc, state, plane,
10391 					    old_plane_state,
10392 					    new_plane_state,
10393 					    true,
10394 					    &lock_and_validation_needed);
10395 		if (ret)
10396 			goto fail;
10397 	}
10398 
10399 	/* Run this here since we want to validate the streams we created */
10400 	ret = drm_atomic_helper_check_planes(dev, state);
10401 	if (ret)
10402 		goto fail;
10403 
10404 	/* Check cursor planes scaling */
10405 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10406 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10407 		if (ret)
10408 			goto fail;
10409 	}
10410 
10411 	if (state->legacy_cursor_update) {
10412 		/*
10413 		 * This is a fast cursor update coming from the plane update
10414 		 * helper, check if it can be done asynchronously for better
10415 		 * performance.
10416 		 */
10417 		state->async_update =
10418 			!drm_atomic_helper_async_check(dev, state);
10419 
10420 		/*
10421 		 * Skip the remaining global validation if this is an async
10422 		 * update. Cursor updates can be done without affecting
10423 		 * state or bandwidth calcs and this avoids the performance
10424 		 * penalty of locking the private state object and
10425 		 * allocating a new dc_state.
10426 		 */
10427 		if (state->async_update)
10428 			return 0;
10429 	}
10430 
10431 	/* Check scaling and underscan changes*/
10432 	/* TODO Removed scaling changes validation due to inability to commit
10433 	 * new stream into context w\o causing full reset. Need to
10434 	 * decide how to handle.
10435 	 */
10436 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10437 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10438 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10439 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10440 
10441 		/* Skip any modesets/resets */
10442 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10443 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10444 			continue;
10445 
10446 		/* Skip any thing not scale or underscan changes */
10447 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10448 			continue;
10449 
10450 		lock_and_validation_needed = true;
10451 	}
10452 
10453 	/**
10454 	 * Streams and planes are reset when there are changes that affect
10455 	 * bandwidth. Anything that affects bandwidth needs to go through
10456 	 * DC global validation to ensure that the configuration can be applied
10457 	 * to hardware.
10458 	 *
10459 	 * We have to currently stall out here in atomic_check for outstanding
10460 	 * commits to finish in this case because our IRQ handlers reference
10461 	 * DRM state directly - we can end up disabling interrupts too early
10462 	 * if we don't.
10463 	 *
10464 	 * TODO: Remove this stall and drop DM state private objects.
10465 	 */
10466 	if (lock_and_validation_needed) {
10467 		ret = dm_atomic_get_state(state, &dm_state);
10468 		if (ret)
10469 			goto fail;
10470 
10471 		ret = do_aquire_global_lock(dev, state);
10472 		if (ret)
10473 			goto fail;
10474 
10475 #if defined(CONFIG_DRM_AMD_DC_DCN)
10476 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10477 			goto fail;
10478 
10479 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10480 		if (ret)
10481 			goto fail;
10482 #endif
10483 
10484 		/*
10485 		 * Perform validation of MST topology in the state:
10486 		 * We need to perform MST atomic check before calling
10487 		 * dc_validate_global_state(), or there is a chance
10488 		 * to get stuck in an infinite loop and hang eventually.
10489 		 */
10490 		ret = drm_dp_mst_atomic_check(state);
10491 		if (ret)
10492 			goto fail;
10493 		status = dc_validate_global_state(dc, dm_state->context, false);
10494 		if (status != DC_OK) {
10495 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10496 				       dc_status_to_str(status), status);
10497 			ret = -EINVAL;
10498 			goto fail;
10499 		}
10500 	} else {
10501 		/*
10502 		 * The commit is a fast update. Fast updates shouldn't change
10503 		 * the DC context, affect global validation, and can have their
10504 		 * commit work done in parallel with other commits not touching
10505 		 * the same resource. If we have a new DC context as part of
10506 		 * the DM atomic state from validation we need to free it and
10507 		 * retain the existing one instead.
10508 		 *
10509 		 * Furthermore, since the DM atomic state only contains the DC
10510 		 * context and can safely be annulled, we can free the state
10511 		 * and clear the associated private object now to free
10512 		 * some memory and avoid a possible use-after-free later.
10513 		 */
10514 
10515 		for (i = 0; i < state->num_private_objs; i++) {
10516 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10517 
10518 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10519 				int j = state->num_private_objs-1;
10520 
10521 				dm_atomic_destroy_state(obj,
10522 						state->private_objs[i].state);
10523 
10524 				/* If i is not at the end of the array then the
10525 				 * last element needs to be moved to where i was
10526 				 * before the array can safely be truncated.
10527 				 */
10528 				if (i != j)
10529 					state->private_objs[i] =
10530 						state->private_objs[j];
10531 
10532 				state->private_objs[j].ptr = NULL;
10533 				state->private_objs[j].state = NULL;
10534 				state->private_objs[j].old_state = NULL;
10535 				state->private_objs[j].new_state = NULL;
10536 
10537 				state->num_private_objs = j;
10538 				break;
10539 			}
10540 		}
10541 	}
10542 
10543 	/* Store the overall update type for use later in atomic check. */
10544 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10545 		struct dm_crtc_state *dm_new_crtc_state =
10546 			to_dm_crtc_state(new_crtc_state);
10547 
10548 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10549 							 UPDATE_TYPE_FULL :
10550 							 UPDATE_TYPE_FAST;
10551 	}
10552 
10553 	/* Must be success */
10554 	WARN_ON(ret);
10555 
10556 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10557 
10558 	return ret;
10559 
10560 fail:
10561 	if (ret == -EDEADLK)
10562 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10563 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10564 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10565 	else
10566 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10567 
10568 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10569 
10570 	return ret;
10571 }
10572 
10573 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10574 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10575 {
10576 	uint8_t dpcd_data;
10577 	bool capable = false;
10578 
10579 	if (amdgpu_dm_connector->dc_link &&
10580 		dm_helpers_dp_read_dpcd(
10581 				NULL,
10582 				amdgpu_dm_connector->dc_link,
10583 				DP_DOWN_STREAM_PORT_COUNT,
10584 				&dpcd_data,
10585 				sizeof(dpcd_data))) {
10586 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10587 	}
10588 
10589 	return capable;
10590 }
10591 
10592 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10593 		unsigned int offset,
10594 		unsigned int total_length,
10595 		uint8_t *data,
10596 		unsigned int length,
10597 		struct amdgpu_hdmi_vsdb_info *vsdb)
10598 {
10599 	bool res;
10600 	union dmub_rb_cmd cmd;
10601 	struct dmub_cmd_send_edid_cea *input;
10602 	struct dmub_cmd_edid_cea_output *output;
10603 
10604 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10605 		return false;
10606 
10607 	memset(&cmd, 0, sizeof(cmd));
10608 
10609 	input = &cmd.edid_cea.data.input;
10610 
10611 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10612 	cmd.edid_cea.header.sub_type = 0;
10613 	cmd.edid_cea.header.payload_bytes =
10614 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10615 	input->offset = offset;
10616 	input->length = length;
10617 	input->total_length = total_length;
10618 	memcpy(input->payload, data, length);
10619 
10620 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10621 	if (!res) {
10622 		DRM_ERROR("EDID CEA parser failed\n");
10623 		return false;
10624 	}
10625 
10626 	output = &cmd.edid_cea.data.output;
10627 
10628 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10629 		if (!output->ack.success) {
10630 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10631 					output->ack.offset);
10632 		}
10633 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10634 		if (!output->amd_vsdb.vsdb_found)
10635 			return false;
10636 
10637 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10638 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10639 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10640 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10641 	} else {
10642 		DRM_WARN("Unknown EDID CEA parser results\n");
10643 		return false;
10644 	}
10645 
10646 	return true;
10647 }
10648 
10649 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10650 		uint8_t *edid_ext, int len,
10651 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10652 {
10653 	int i;
10654 
10655 	/* send extension block to DMCU for parsing */
10656 	for (i = 0; i < len; i += 8) {
10657 		bool res;
10658 		int offset;
10659 
10660 		/* send 8 bytes a time */
10661 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10662 			return false;
10663 
10664 		if (i+8 == len) {
10665 			/* EDID block sent completed, expect result */
10666 			int version, min_rate, max_rate;
10667 
10668 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10669 			if (res) {
10670 				/* amd vsdb found */
10671 				vsdb_info->freesync_supported = 1;
10672 				vsdb_info->amd_vsdb_version = version;
10673 				vsdb_info->min_refresh_rate_hz = min_rate;
10674 				vsdb_info->max_refresh_rate_hz = max_rate;
10675 				return true;
10676 			}
10677 			/* not amd vsdb */
10678 			return false;
10679 		}
10680 
10681 		/* check for ack*/
10682 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10683 		if (!res)
10684 			return false;
10685 	}
10686 
10687 	return false;
10688 }
10689 
10690 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10691 		uint8_t *edid_ext, int len,
10692 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10693 {
10694 	int i;
10695 
10696 	/* send extension block to DMCU for parsing */
10697 	for (i = 0; i < len; i += 8) {
10698 		/* send 8 bytes a time */
10699 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10700 			return false;
10701 	}
10702 
10703 	return vsdb_info->freesync_supported;
10704 }
10705 
10706 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10707 		uint8_t *edid_ext, int len,
10708 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10709 {
10710 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10711 
10712 	if (adev->dm.dmub_srv)
10713 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10714 	else
10715 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10716 }
10717 
10718 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10719 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10720 {
10721 	uint8_t *edid_ext = NULL;
10722 	int i;
10723 	bool valid_vsdb_found = false;
10724 
10725 	/*----- drm_find_cea_extension() -----*/
10726 	/* No EDID or EDID extensions */
10727 	if (edid == NULL || edid->extensions == 0)
10728 		return -ENODEV;
10729 
10730 	/* Find CEA extension */
10731 	for (i = 0; i < edid->extensions; i++) {
10732 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10733 		if (edid_ext[0] == CEA_EXT)
10734 			break;
10735 	}
10736 
10737 	if (i == edid->extensions)
10738 		return -ENODEV;
10739 
10740 	/*----- cea_db_offsets() -----*/
10741 	if (edid_ext[0] != CEA_EXT)
10742 		return -ENODEV;
10743 
10744 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10745 
10746 	return valid_vsdb_found ? i : -ENODEV;
10747 }
10748 
10749 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10750 					struct edid *edid)
10751 {
10752 	int i = 0;
10753 	struct detailed_timing *timing;
10754 	struct detailed_non_pixel *data;
10755 	struct detailed_data_monitor_range *range;
10756 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10757 			to_amdgpu_dm_connector(connector);
10758 	struct dm_connector_state *dm_con_state = NULL;
10759 
10760 	struct drm_device *dev = connector->dev;
10761 	struct amdgpu_device *adev = drm_to_adev(dev);
10762 	bool freesync_capable = false;
10763 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10764 
10765 	if (!connector->state) {
10766 		DRM_ERROR("%s - Connector has no state", __func__);
10767 		goto update;
10768 	}
10769 
10770 	if (!edid) {
10771 		dm_con_state = to_dm_connector_state(connector->state);
10772 
10773 		amdgpu_dm_connector->min_vfreq = 0;
10774 		amdgpu_dm_connector->max_vfreq = 0;
10775 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10776 
10777 		goto update;
10778 	}
10779 
10780 	dm_con_state = to_dm_connector_state(connector->state);
10781 
10782 	if (!amdgpu_dm_connector->dc_sink) {
10783 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10784 		goto update;
10785 	}
10786 	if (!adev->dm.freesync_module)
10787 		goto update;
10788 
10789 
10790 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10791 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10792 		bool edid_check_required = false;
10793 
10794 		if (edid) {
10795 			edid_check_required = is_dp_capable_without_timing_msa(
10796 						adev->dm.dc,
10797 						amdgpu_dm_connector);
10798 		}
10799 
10800 		if (edid_check_required == true && (edid->version > 1 ||
10801 		   (edid->version == 1 && edid->revision > 1))) {
10802 			for (i = 0; i < 4; i++) {
10803 
10804 				timing	= &edid->detailed_timings[i];
10805 				data	= &timing->data.other_data;
10806 				range	= &data->data.range;
10807 				/*
10808 				 * Check if monitor has continuous frequency mode
10809 				 */
10810 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10811 					continue;
10812 				/*
10813 				 * Check for flag range limits only. If flag == 1 then
10814 				 * no additional timing information provided.
10815 				 * Default GTF, GTF Secondary curve and CVT are not
10816 				 * supported
10817 				 */
10818 				if (range->flags != 1)
10819 					continue;
10820 
10821 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10822 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10823 				amdgpu_dm_connector->pixel_clock_mhz =
10824 					range->pixel_clock_mhz * 10;
10825 
10826 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10827 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10828 
10829 				break;
10830 			}
10831 
10832 			if (amdgpu_dm_connector->max_vfreq -
10833 			    amdgpu_dm_connector->min_vfreq > 10) {
10834 
10835 				freesync_capable = true;
10836 			}
10837 		}
10838 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10839 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10840 		if (i >= 0 && vsdb_info.freesync_supported) {
10841 			timing  = &edid->detailed_timings[i];
10842 			data    = &timing->data.other_data;
10843 
10844 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10845 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10846 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10847 				freesync_capable = true;
10848 
10849 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10850 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10851 		}
10852 	}
10853 
10854 update:
10855 	if (dm_con_state)
10856 		dm_con_state->freesync_capable = freesync_capable;
10857 
10858 	if (connector->vrr_capable_property)
10859 		drm_connector_set_vrr_capable_property(connector,
10860 						       freesync_capable);
10861 }
10862 
10863 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10864 {
10865 	struct amdgpu_device *adev = drm_to_adev(dev);
10866 	struct dc *dc = adev->dm.dc;
10867 	int i;
10868 
10869 	mutex_lock(&adev->dm.dc_lock);
10870 	if (dc->current_state) {
10871 		for (i = 0; i < dc->current_state->stream_count; ++i)
10872 			dc->current_state->streams[i]
10873 				->triggered_crtc_reset.enabled =
10874 				adev->dm.force_timing_sync;
10875 
10876 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10877 		dc_trigger_sync(dc, dc->current_state);
10878 	}
10879 	mutex_unlock(&adev->dm.dc_lock);
10880 }
10881 
10882 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10883 		       uint32_t value, const char *func_name)
10884 {
10885 #ifdef DM_CHECK_ADDR_0
10886 	if (address == 0) {
10887 		DC_ERR("invalid register write. address = 0");
10888 		return;
10889 	}
10890 #endif
10891 	cgs_write_register(ctx->cgs_device, address, value);
10892 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10893 }
10894 
10895 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10896 			  const char *func_name)
10897 {
10898 	uint32_t value;
10899 #ifdef DM_CHECK_ADDR_0
10900 	if (address == 0) {
10901 		DC_ERR("invalid register read; address = 0\n");
10902 		return 0;
10903 	}
10904 #endif
10905 
10906 	if (ctx->dmub_srv &&
10907 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10908 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10909 		ASSERT(false);
10910 		return 0;
10911 	}
10912 
10913 	value = cgs_read_register(ctx->cgs_device, address);
10914 
10915 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10916 
10917 	return value;
10918 }
10919 
10920 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10921 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10922 {
10923 	struct amdgpu_device *adev = ctx->driver_context;
10924 	int ret = 0;
10925 
10926 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10927 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10928 	if (ret == 0) {
10929 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10930 		return -1;
10931 	}
10932 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10933 
10934 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10935 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10936 
10937 		// For read case, Copy data to payload
10938 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10939 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10940 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10941 			adev->dm.dmub_notify->aux_reply.length);
10942 	}
10943 
10944 	return adev->dm.dmub_notify->aux_reply.length;
10945 }
10946