1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 
62 #include "ivsrcid/ivsrcid_vislands30.h"
63 
64 #include "i2caux_interface.h"
65 #include <linux/module.h>
66 #include <linux/moduleparam.h>
67 #include <linux/types.h>
68 #include <linux/pm_runtime.h>
69 #include <linux/pci.h>
70 #include <linux/firmware.h>
71 #include <linux/component.h>
72 
73 #include <drm/drm_atomic.h>
74 #include <drm/drm_atomic_uapi.h>
75 #include <drm/drm_atomic_helper.h>
76 #include <drm/drm_dp_mst_helper.h>
77 #include <drm/drm_fb_helper.h>
78 #include <drm/drm_fourcc.h>
79 #include <drm/drm_edid.h>
80 #include <drm/drm_vblank.h>
81 #include <drm/drm_audio_component.h>
82 
83 #if defined(CONFIG_DRM_AMD_DC_DCN)
84 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
85 
86 #include "dcn/dcn_1_0_offset.h"
87 #include "dcn/dcn_1_0_sh_mask.h"
88 #include "soc15_hw_ip.h"
89 #include "vega10_ip_offset.h"
90 
91 #include "soc15_common.h"
92 #endif
93 
94 #include "modules/inc/mod_freesync.h"
95 #include "modules/power/power_helpers.h"
96 #include "modules/inc/mod_info_packet.h"
97 
98 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
100 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
102 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
104 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
106 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
108 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
110 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
112 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 #endif
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
217 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
218 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
219 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
220 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
221 
222 static const struct drm_format_info *
223 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
224 
225 static bool
226 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
227 				 struct drm_crtc_state *new_crtc_state);
228 /*
229  * dm_vblank_get_counter
230  *
231  * @brief
232  * Get counter for number of vertical blanks
233  *
234  * @param
235  * struct amdgpu_device *adev - [in] desired amdgpu device
236  * int disp_idx - [in] which CRTC to get the counter from
237  *
238  * @return
239  * Counter for vertical blanks
240  */
241 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
242 {
243 	if (crtc >= adev->mode_info.num_crtc)
244 		return 0;
245 	else {
246 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
247 
248 		if (acrtc->dm_irq_params.stream == NULL) {
249 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
250 				  crtc);
251 			return 0;
252 		}
253 
254 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
255 	}
256 }
257 
258 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
259 				  u32 *vbl, u32 *position)
260 {
261 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
262 
263 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
264 		return -EINVAL;
265 	else {
266 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
267 
268 		if (acrtc->dm_irq_params.stream ==  NULL) {
269 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
270 				  crtc);
271 			return 0;
272 		}
273 
274 		/*
275 		 * TODO rework base driver to use values directly.
276 		 * for now parse it back into reg-format
277 		 */
278 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
279 					 &v_blank_start,
280 					 &v_blank_end,
281 					 &h_position,
282 					 &v_position);
283 
284 		*position = v_position | (h_position << 16);
285 		*vbl = v_blank_start | (v_blank_end << 16);
286 	}
287 
288 	return 0;
289 }
290 
291 static bool dm_is_idle(void *handle)
292 {
293 	/* XXX todo */
294 	return true;
295 }
296 
297 static int dm_wait_for_idle(void *handle)
298 {
299 	/* XXX todo */
300 	return 0;
301 }
302 
303 static bool dm_check_soft_reset(void *handle)
304 {
305 	return false;
306 }
307 
308 static int dm_soft_reset(void *handle)
309 {
310 	/* XXX todo */
311 	return 0;
312 }
313 
314 static struct amdgpu_crtc *
315 get_crtc_by_otg_inst(struct amdgpu_device *adev,
316 		     int otg_inst)
317 {
318 	struct drm_device *dev = adev_to_drm(adev);
319 	struct drm_crtc *crtc;
320 	struct amdgpu_crtc *amdgpu_crtc;
321 
322 	if (WARN_ON(otg_inst == -1))
323 		return adev->mode_info.crtcs[0];
324 
325 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
326 		amdgpu_crtc = to_amdgpu_crtc(crtc);
327 
328 		if (amdgpu_crtc->otg_inst == otg_inst)
329 			return amdgpu_crtc;
330 	}
331 
332 	return NULL;
333 }
334 
335 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
336 {
337 	return acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_VARIABLE ||
339 	       acrtc->dm_irq_params.freesync_config.state ==
340 		       VRR_STATE_ACTIVE_FIXED;
341 }
342 
343 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
344 {
345 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
346 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
347 }
348 
349 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
350 					      struct dm_crtc_state *new_state)
351 {
352 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
353 		return true;
354 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
355 		return true;
356 	else
357 		return false;
358 }
359 
360 /**
361  * dm_pflip_high_irq() - Handle pageflip interrupt
362  * @interrupt_params: ignored
363  *
364  * Handles the pageflip interrupt by notifying all interested parties
365  * that the pageflip has been completed.
366  */
367 static void dm_pflip_high_irq(void *interrupt_params)
368 {
369 	struct amdgpu_crtc *amdgpu_crtc;
370 	struct common_irq_params *irq_params = interrupt_params;
371 	struct amdgpu_device *adev = irq_params->adev;
372 	unsigned long flags;
373 	struct drm_pending_vblank_event *e;
374 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
375 	bool vrr_active;
376 
377 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
378 
379 	/* IRQ could occur when in initial stage */
380 	/* TODO work and BO cleanup */
381 	if (amdgpu_crtc == NULL) {
382 		DC_LOG_PFLIP("CRTC is null, returning.\n");
383 		return;
384 	}
385 
386 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
387 
388 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
389 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
390 						 amdgpu_crtc->pflip_status,
391 						 AMDGPU_FLIP_SUBMITTED,
392 						 amdgpu_crtc->crtc_id,
393 						 amdgpu_crtc);
394 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
395 		return;
396 	}
397 
398 	/* page flip completed. */
399 	e = amdgpu_crtc->event;
400 	amdgpu_crtc->event = NULL;
401 
402 	WARN_ON(!e);
403 
404 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
405 
406 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
407 	if (!vrr_active ||
408 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
409 				      &v_blank_end, &hpos, &vpos) ||
410 	    (vpos < v_blank_start)) {
411 		/* Update to correct count and vblank timestamp if racing with
412 		 * vblank irq. This also updates to the correct vblank timestamp
413 		 * even in VRR mode, as scanout is past the front-porch atm.
414 		 */
415 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
416 
417 		/* Wake up userspace by sending the pageflip event with proper
418 		 * count and timestamp of vblank of flip completion.
419 		 */
420 		if (e) {
421 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
422 
423 			/* Event sent, so done with vblank for this flip */
424 			drm_crtc_vblank_put(&amdgpu_crtc->base);
425 		}
426 	} else if (e) {
427 		/* VRR active and inside front-porch: vblank count and
428 		 * timestamp for pageflip event will only be up to date after
429 		 * drm_crtc_handle_vblank() has been executed from late vblank
430 		 * irq handler after start of back-porch (vline 0). We queue the
431 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
432 		 * updated timestamp and count, once it runs after us.
433 		 *
434 		 * We need to open-code this instead of using the helper
435 		 * drm_crtc_arm_vblank_event(), as that helper would
436 		 * call drm_crtc_accurate_vblank_count(), which we must
437 		 * not call in VRR mode while we are in front-porch!
438 		 */
439 
440 		/* sequence will be replaced by real count during send-out. */
441 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
442 		e->pipe = amdgpu_crtc->crtc_id;
443 
444 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
445 		e = NULL;
446 	}
447 
448 	/* Keep track of vblank of this flip for flip throttling. We use the
449 	 * cooked hw counter, as that one incremented at start of this vblank
450 	 * of pageflip completion, so last_flip_vblank is the forbidden count
451 	 * for queueing new pageflips if vsync + VRR is enabled.
452 	 */
453 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
454 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
455 
456 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
457 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
458 
459 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
460 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
461 		     vrr_active, (int) !e);
462 }
463 
464 static void dm_vupdate_high_irq(void *interrupt_params)
465 {
466 	struct common_irq_params *irq_params = interrupt_params;
467 	struct amdgpu_device *adev = irq_params->adev;
468 	struct amdgpu_crtc *acrtc;
469 	struct drm_device *drm_dev;
470 	struct drm_vblank_crtc *vblank;
471 	ktime_t frame_duration_ns, previous_timestamp;
472 	unsigned long flags;
473 	int vrr_active;
474 
475 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
476 
477 	if (acrtc) {
478 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
479 		drm_dev = acrtc->base.dev;
480 		vblank = &drm_dev->vblank[acrtc->base.index];
481 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
482 		frame_duration_ns = vblank->time - previous_timestamp;
483 
484 		if (frame_duration_ns > 0) {
485 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
486 						frame_duration_ns,
487 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
488 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
489 		}
490 
491 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
492 			      acrtc->crtc_id,
493 			      vrr_active);
494 
495 		/* Core vblank handling is done here after end of front-porch in
496 		 * vrr mode, as vblank timestamping will give valid results
497 		 * while now done after front-porch. This will also deliver
498 		 * page-flip completion events that have been queued to us
499 		 * if a pageflip happened inside front-porch.
500 		 */
501 		if (vrr_active) {
502 			drm_crtc_handle_vblank(&acrtc->base);
503 
504 			/* BTR processing for pre-DCE12 ASICs */
505 			if (acrtc->dm_irq_params.stream &&
506 			    adev->family < AMDGPU_FAMILY_AI) {
507 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
508 				mod_freesync_handle_v_update(
509 				    adev->dm.freesync_module,
510 				    acrtc->dm_irq_params.stream,
511 				    &acrtc->dm_irq_params.vrr_params);
512 
513 				dc_stream_adjust_vmin_vmax(
514 				    adev->dm.dc,
515 				    acrtc->dm_irq_params.stream,
516 				    &acrtc->dm_irq_params.vrr_params.adjust);
517 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
518 			}
519 		}
520 	}
521 }
522 
523 /**
524  * dm_crtc_high_irq() - Handles CRTC interrupt
525  * @interrupt_params: used for determining the CRTC instance
526  *
527  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
528  * event handler.
529  */
530 static void dm_crtc_high_irq(void *interrupt_params)
531 {
532 	struct common_irq_params *irq_params = interrupt_params;
533 	struct amdgpu_device *adev = irq_params->adev;
534 	struct amdgpu_crtc *acrtc;
535 	unsigned long flags;
536 	int vrr_active;
537 
538 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
539 	if (!acrtc)
540 		return;
541 
542 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
543 
544 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
545 		      vrr_active, acrtc->dm_irq_params.active_planes);
546 
547 	/**
548 	 * Core vblank handling at start of front-porch is only possible
549 	 * in non-vrr mode, as only there vblank timestamping will give
550 	 * valid results while done in front-porch. Otherwise defer it
551 	 * to dm_vupdate_high_irq after end of front-porch.
552 	 */
553 	if (!vrr_active)
554 		drm_crtc_handle_vblank(&acrtc->base);
555 
556 	/**
557 	 * Following stuff must happen at start of vblank, for crc
558 	 * computation and below-the-range btr support in vrr mode.
559 	 */
560 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
561 
562 	/* BTR updates need to happen before VUPDATE on Vega and above. */
563 	if (adev->family < AMDGPU_FAMILY_AI)
564 		return;
565 
566 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
567 
568 	if (acrtc->dm_irq_params.stream &&
569 	    acrtc->dm_irq_params.vrr_params.supported &&
570 	    acrtc->dm_irq_params.freesync_config.state ==
571 		    VRR_STATE_ACTIVE_VARIABLE) {
572 		mod_freesync_handle_v_update(adev->dm.freesync_module,
573 					     acrtc->dm_irq_params.stream,
574 					     &acrtc->dm_irq_params.vrr_params);
575 
576 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
577 					   &acrtc->dm_irq_params.vrr_params.adjust);
578 	}
579 
580 	/*
581 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
582 	 * In that case, pageflip completion interrupts won't fire and pageflip
583 	 * completion events won't get delivered. Prevent this by sending
584 	 * pending pageflip events from here if a flip is still pending.
585 	 *
586 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
587 	 * avoid race conditions between flip programming and completion,
588 	 * which could cause too early flip completion events.
589 	 */
590 	if (adev->family >= AMDGPU_FAMILY_RV &&
591 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
592 	    acrtc->dm_irq_params.active_planes == 0) {
593 		if (acrtc->event) {
594 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
595 			acrtc->event = NULL;
596 			drm_crtc_vblank_put(&acrtc->base);
597 		}
598 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
599 	}
600 
601 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
602 }
603 
604 #if defined(CONFIG_DRM_AMD_DC_DCN)
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615 	struct common_irq_params *irq_params = interrupt_params;
616 	struct amdgpu_device *adev = irq_params->adev;
617 	struct amdgpu_crtc *acrtc;
618 
619 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620 
621 	if (!acrtc)
622 		return;
623 
624 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif
627 
628 /**
629  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
630  * @interrupt_params: used for determining the Outbox instance
631  *
632  * Handles the Outbox Interrupt
633  * event handler.
634  */
635 #define DMUB_TRACE_MAX_READ 64
636 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
637 {
638 	struct dmub_notification notify;
639 	struct common_irq_params *irq_params = interrupt_params;
640 	struct amdgpu_device *adev = irq_params->adev;
641 	struct amdgpu_display_manager *dm = &adev->dm;
642 	struct dmcub_trace_buf_entry entry = { 0 };
643 	uint32_t count = 0;
644 
645 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
646 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
647 			do {
648 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
649 			} while (notify.pending_notification);
650 
651 			if (adev->dm.dmub_notify)
652 				memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
653 			if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
654 				complete(&adev->dm.dmub_aux_transfer_done);
655 			// TODO : HPD Implementation
656 
657 		} else {
658 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
659 		}
660 	}
661 
662 
663 	do {
664 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
665 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
666 							entry.param0, entry.param1);
667 
668 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
669 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
670 		} else
671 			break;
672 
673 		count++;
674 
675 	} while (count <= DMUB_TRACE_MAX_READ);
676 
677 	ASSERT(count <= DMUB_TRACE_MAX_READ);
678 }
679 #endif
680 
681 static int dm_set_clockgating_state(void *handle,
682 		  enum amd_clockgating_state state)
683 {
684 	return 0;
685 }
686 
687 static int dm_set_powergating_state(void *handle,
688 		  enum amd_powergating_state state)
689 {
690 	return 0;
691 }
692 
693 /* Prototypes of private functions */
694 static int dm_early_init(void* handle);
695 
696 /* Allocate memory for FBC compressed data  */
697 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
698 {
699 	struct drm_device *dev = connector->dev;
700 	struct amdgpu_device *adev = drm_to_adev(dev);
701 	struct dm_compressor_info *compressor = &adev->dm.compressor;
702 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
703 	struct drm_display_mode *mode;
704 	unsigned long max_size = 0;
705 
706 	if (adev->dm.dc->fbc_compressor == NULL)
707 		return;
708 
709 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
710 		return;
711 
712 	if (compressor->bo_ptr)
713 		return;
714 
715 
716 	list_for_each_entry(mode, &connector->modes, head) {
717 		if (max_size < mode->htotal * mode->vtotal)
718 			max_size = mode->htotal * mode->vtotal;
719 	}
720 
721 	if (max_size) {
722 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
723 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
724 			    &compressor->gpu_addr, &compressor->cpu_addr);
725 
726 		if (r)
727 			DRM_ERROR("DM: Failed to initialize FBC\n");
728 		else {
729 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
730 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
731 		}
732 
733 	}
734 
735 }
736 
737 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
738 					  int pipe, bool *enabled,
739 					  unsigned char *buf, int max_bytes)
740 {
741 	struct drm_device *dev = dev_get_drvdata(kdev);
742 	struct amdgpu_device *adev = drm_to_adev(dev);
743 	struct drm_connector *connector;
744 	struct drm_connector_list_iter conn_iter;
745 	struct amdgpu_dm_connector *aconnector;
746 	int ret = 0;
747 
748 	*enabled = false;
749 
750 	mutex_lock(&adev->dm.audio_lock);
751 
752 	drm_connector_list_iter_begin(dev, &conn_iter);
753 	drm_for_each_connector_iter(connector, &conn_iter) {
754 		aconnector = to_amdgpu_dm_connector(connector);
755 		if (aconnector->audio_inst != port)
756 			continue;
757 
758 		*enabled = true;
759 		ret = drm_eld_size(connector->eld);
760 		memcpy(buf, connector->eld, min(max_bytes, ret));
761 
762 		break;
763 	}
764 	drm_connector_list_iter_end(&conn_iter);
765 
766 	mutex_unlock(&adev->dm.audio_lock);
767 
768 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
769 
770 	return ret;
771 }
772 
773 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
774 	.get_eld = amdgpu_dm_audio_component_get_eld,
775 };
776 
777 static int amdgpu_dm_audio_component_bind(struct device *kdev,
778 				       struct device *hda_kdev, void *data)
779 {
780 	struct drm_device *dev = dev_get_drvdata(kdev);
781 	struct amdgpu_device *adev = drm_to_adev(dev);
782 	struct drm_audio_component *acomp = data;
783 
784 	acomp->ops = &amdgpu_dm_audio_component_ops;
785 	acomp->dev = kdev;
786 	adev->dm.audio_component = acomp;
787 
788 	return 0;
789 }
790 
791 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
792 					  struct device *hda_kdev, void *data)
793 {
794 	struct drm_device *dev = dev_get_drvdata(kdev);
795 	struct amdgpu_device *adev = drm_to_adev(dev);
796 	struct drm_audio_component *acomp = data;
797 
798 	acomp->ops = NULL;
799 	acomp->dev = NULL;
800 	adev->dm.audio_component = NULL;
801 }
802 
803 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
804 	.bind	= amdgpu_dm_audio_component_bind,
805 	.unbind	= amdgpu_dm_audio_component_unbind,
806 };
807 
808 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
809 {
810 	int i, ret;
811 
812 	if (!amdgpu_audio)
813 		return 0;
814 
815 	adev->mode_info.audio.enabled = true;
816 
817 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
818 
819 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
820 		adev->mode_info.audio.pin[i].channels = -1;
821 		adev->mode_info.audio.pin[i].rate = -1;
822 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
823 		adev->mode_info.audio.pin[i].status_bits = 0;
824 		adev->mode_info.audio.pin[i].category_code = 0;
825 		adev->mode_info.audio.pin[i].connected = false;
826 		adev->mode_info.audio.pin[i].id =
827 			adev->dm.dc->res_pool->audios[i]->inst;
828 		adev->mode_info.audio.pin[i].offset = 0;
829 	}
830 
831 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
832 	if (ret < 0)
833 		return ret;
834 
835 	adev->dm.audio_registered = true;
836 
837 	return 0;
838 }
839 
840 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
841 {
842 	if (!amdgpu_audio)
843 		return;
844 
845 	if (!adev->mode_info.audio.enabled)
846 		return;
847 
848 	if (adev->dm.audio_registered) {
849 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
850 		adev->dm.audio_registered = false;
851 	}
852 
853 	/* TODO: Disable audio? */
854 
855 	adev->mode_info.audio.enabled = false;
856 }
857 
858 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
859 {
860 	struct drm_audio_component *acomp = adev->dm.audio_component;
861 
862 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
863 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
864 
865 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
866 						 pin, -1);
867 	}
868 }
869 
870 static int dm_dmub_hw_init(struct amdgpu_device *adev)
871 {
872 	const struct dmcub_firmware_header_v1_0 *hdr;
873 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
874 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
875 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
876 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
877 	struct abm *abm = adev->dm.dc->res_pool->abm;
878 	struct dmub_srv_hw_params hw_params;
879 	enum dmub_status status;
880 	const unsigned char *fw_inst_const, *fw_bss_data;
881 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
882 	bool has_hw_support;
883 
884 	if (!dmub_srv)
885 		/* DMUB isn't supported on the ASIC. */
886 		return 0;
887 
888 	if (!fb_info) {
889 		DRM_ERROR("No framebuffer info for DMUB service.\n");
890 		return -EINVAL;
891 	}
892 
893 	if (!dmub_fw) {
894 		/* Firmware required for DMUB support. */
895 		DRM_ERROR("No firmware provided for DMUB.\n");
896 		return -EINVAL;
897 	}
898 
899 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
900 	if (status != DMUB_STATUS_OK) {
901 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
902 		return -EINVAL;
903 	}
904 
905 	if (!has_hw_support) {
906 		DRM_INFO("DMUB unsupported on ASIC\n");
907 		return 0;
908 	}
909 
910 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
911 
912 	fw_inst_const = dmub_fw->data +
913 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
914 			PSP_HEADER_BYTES;
915 
916 	fw_bss_data = dmub_fw->data +
917 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
918 		      le32_to_cpu(hdr->inst_const_bytes);
919 
920 	/* Copy firmware and bios info into FB memory. */
921 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
922 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
923 
924 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
925 
926 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
927 	 * amdgpu_ucode_init_single_fw will load dmub firmware
928 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
929 	 * will be done by dm_dmub_hw_init
930 	 */
931 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
932 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
933 				fw_inst_const_size);
934 	}
935 
936 	if (fw_bss_data_size)
937 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
938 		       fw_bss_data, fw_bss_data_size);
939 
940 	/* Copy firmware bios info into FB memory. */
941 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
942 	       adev->bios_size);
943 
944 	/* Reset regions that need to be reset. */
945 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
946 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
947 
948 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
949 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
950 
951 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
952 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
953 
954 	/* Initialize hardware. */
955 	memset(&hw_params, 0, sizeof(hw_params));
956 	hw_params.fb_base = adev->gmc.fb_start;
957 	hw_params.fb_offset = adev->gmc.aper_base;
958 
959 	/* backdoor load firmware and trigger dmub running */
960 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
961 		hw_params.load_inst_const = true;
962 
963 	if (dmcu)
964 		hw_params.psp_version = dmcu->psp_version;
965 
966 	for (i = 0; i < fb_info->num_fb; ++i)
967 		hw_params.fb[i] = &fb_info->fb[i];
968 
969 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
970 	if (status != DMUB_STATUS_OK) {
971 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
972 		return -EINVAL;
973 	}
974 
975 	/* Wait for firmware load to finish. */
976 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
977 	if (status != DMUB_STATUS_OK)
978 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
979 
980 	/* Init DMCU and ABM if available. */
981 	if (dmcu && abm) {
982 		dmcu->funcs->dmcu_init(dmcu);
983 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
984 	}
985 
986 	if (!adev->dm.dc->ctx->dmub_srv)
987 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
988 	if (!adev->dm.dc->ctx->dmub_srv) {
989 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
990 		return -ENOMEM;
991 	}
992 
993 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
994 		 adev->dm.dmcub_fw_version);
995 
996 	return 0;
997 }
998 
999 #if defined(CONFIG_DRM_AMD_DC_DCN)
1000 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1001 {
1002 	uint64_t pt_base;
1003 	uint32_t logical_addr_low;
1004 	uint32_t logical_addr_high;
1005 	uint32_t agp_base, agp_bot, agp_top;
1006 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1007 
1008 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1009 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1010 
1011 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1012 		/*
1013 		 * Raven2 has a HW issue that it is unable to use the vram which
1014 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1015 		 * workaround that increase system aperture high address (add 1)
1016 		 * to get rid of the VM fault and hardware hang.
1017 		 */
1018 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1019 	else
1020 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1021 
1022 	agp_base = 0;
1023 	agp_bot = adev->gmc.agp_start >> 24;
1024 	agp_top = adev->gmc.agp_end >> 24;
1025 
1026 
1027 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1028 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1029 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1030 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1031 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1032 	page_table_base.low_part = lower_32_bits(pt_base);
1033 
1034 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1035 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1036 
1037 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1038 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1039 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1040 
1041 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1042 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1043 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1044 
1045 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1046 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1047 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1048 
1049 	pa_config->is_hvm_enabled = 0;
1050 
1051 }
1052 #endif
1053 #if defined(CONFIG_DRM_AMD_DC_DCN)
1054 static void event_mall_stutter(struct work_struct *work)
1055 {
1056 
1057 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1058 	struct amdgpu_display_manager *dm = vblank_work->dm;
1059 
1060 	mutex_lock(&dm->dc_lock);
1061 
1062 	if (vblank_work->enable)
1063 		dm->active_vblank_irq_count++;
1064 	else if(dm->active_vblank_irq_count)
1065 		dm->active_vblank_irq_count--;
1066 
1067 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1068 
1069 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1070 
1071 	mutex_unlock(&dm->dc_lock);
1072 }
1073 
1074 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1075 {
1076 
1077 	int max_caps = dc->caps.max_links;
1078 	struct vblank_workqueue *vblank_work;
1079 	int i = 0;
1080 
1081 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1082 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1083 		kfree(vblank_work);
1084 		return NULL;
1085 	}
1086 
1087 	for (i = 0; i < max_caps; i++)
1088 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1089 
1090 	return vblank_work;
1091 }
1092 #endif
1093 static int amdgpu_dm_init(struct amdgpu_device *adev)
1094 {
1095 	struct dc_init_data init_data;
1096 #ifdef CONFIG_DRM_AMD_DC_HDCP
1097 	struct dc_callback_init init_params;
1098 #endif
1099 	int r;
1100 
1101 	adev->dm.ddev = adev_to_drm(adev);
1102 	adev->dm.adev = adev;
1103 
1104 	/* Zero all the fields */
1105 	memset(&init_data, 0, sizeof(init_data));
1106 #ifdef CONFIG_DRM_AMD_DC_HDCP
1107 	memset(&init_params, 0, sizeof(init_params));
1108 #endif
1109 
1110 	mutex_init(&adev->dm.dc_lock);
1111 	mutex_init(&adev->dm.audio_lock);
1112 #if defined(CONFIG_DRM_AMD_DC_DCN)
1113 	spin_lock_init(&adev->dm.vblank_lock);
1114 #endif
1115 
1116 	if(amdgpu_dm_irq_init(adev)) {
1117 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1118 		goto error;
1119 	}
1120 
1121 	init_data.asic_id.chip_family = adev->family;
1122 
1123 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1124 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1125 
1126 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1127 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1128 	init_data.asic_id.atombios_base_address =
1129 		adev->mode_info.atom_context->bios;
1130 
1131 	init_data.driver = adev;
1132 
1133 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1134 
1135 	if (!adev->dm.cgs_device) {
1136 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1137 		goto error;
1138 	}
1139 
1140 	init_data.cgs_device = adev->dm.cgs_device;
1141 
1142 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1143 
1144 	switch (adev->asic_type) {
1145 	case CHIP_CARRIZO:
1146 	case CHIP_STONEY:
1147 	case CHIP_RAVEN:
1148 	case CHIP_RENOIR:
1149 		init_data.flags.gpu_vm_support = true;
1150 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1151 			init_data.flags.disable_dmcu = true;
1152 		break;
1153 #if defined(CONFIG_DRM_AMD_DC_DCN)
1154 	case CHIP_VANGOGH:
1155 		init_data.flags.gpu_vm_support = true;
1156 		break;
1157 #endif
1158 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1159 	case CHIP_YELLOW_CARP:
1160 		init_data.flags.gpu_vm_support = true;
1161 		break;
1162 #endif
1163 	default:
1164 		break;
1165 	}
1166 
1167 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1168 		init_data.flags.fbc_support = true;
1169 
1170 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1171 		init_data.flags.multi_mon_pp_mclk_switch = true;
1172 
1173 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1174 		init_data.flags.disable_fractional_pwm = true;
1175 
1176 	init_data.flags.power_down_display_on_boot = true;
1177 
1178 	INIT_LIST_HEAD(&adev->dm.da_list);
1179 	/* Display Core create. */
1180 	adev->dm.dc = dc_create(&init_data);
1181 
1182 	if (adev->dm.dc) {
1183 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1184 	} else {
1185 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1186 		goto error;
1187 	}
1188 
1189 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1190 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1191 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1192 	}
1193 
1194 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1195 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1196 
1197 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1198 		adev->dm.dc->debug.disable_stutter = true;
1199 
1200 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1201 		adev->dm.dc->debug.disable_dsc = true;
1202 
1203 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1204 		adev->dm.dc->debug.disable_clock_gate = true;
1205 
1206 	r = dm_dmub_hw_init(adev);
1207 	if (r) {
1208 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1209 		goto error;
1210 	}
1211 
1212 	dc_hardware_init(adev->dm.dc);
1213 
1214 #if defined(CONFIG_DRM_AMD_DC_DCN)
1215 	if (adev->apu_flags) {
1216 		struct dc_phy_addr_space_config pa_config;
1217 
1218 		mmhub_read_system_context(adev, &pa_config);
1219 
1220 		// Call the DC init_memory func
1221 		dc_setup_system_context(adev->dm.dc, &pa_config);
1222 	}
1223 #endif
1224 
1225 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1226 	if (!adev->dm.freesync_module) {
1227 		DRM_ERROR(
1228 		"amdgpu: failed to initialize freesync_module.\n");
1229 	} else
1230 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1231 				adev->dm.freesync_module);
1232 
1233 	amdgpu_dm_init_color_mod();
1234 
1235 #if defined(CONFIG_DRM_AMD_DC_DCN)
1236 	if (adev->dm.dc->caps.max_links > 0) {
1237 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1238 
1239 		if (!adev->dm.vblank_workqueue)
1240 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1241 		else
1242 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1243 	}
1244 #endif
1245 
1246 #ifdef CONFIG_DRM_AMD_DC_HDCP
1247 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1248 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1249 
1250 		if (!adev->dm.hdcp_workqueue)
1251 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1252 		else
1253 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1254 
1255 		dc_init_callbacks(adev->dm.dc, &init_params);
1256 	}
1257 #endif
1258 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1259 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1260 #endif
1261 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1262 		init_completion(&adev->dm.dmub_aux_transfer_done);
1263 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1264 		if (!adev->dm.dmub_notify) {
1265 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1266 			goto error;
1267 		}
1268 		amdgpu_dm_outbox_init(adev);
1269 	}
1270 
1271 	if (amdgpu_dm_initialize_drm_device(adev)) {
1272 		DRM_ERROR(
1273 		"amdgpu: failed to initialize sw for display support.\n");
1274 		goto error;
1275 	}
1276 
1277 	/* create fake encoders for MST */
1278 	dm_dp_create_fake_mst_encoders(adev);
1279 
1280 	/* TODO: Add_display_info? */
1281 
1282 	/* TODO use dynamic cursor width */
1283 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1284 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1285 
1286 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1287 		DRM_ERROR(
1288 		"amdgpu: failed to initialize sw for display support.\n");
1289 		goto error;
1290 	}
1291 
1292 
1293 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1294 
1295 	return 0;
1296 error:
1297 	amdgpu_dm_fini(adev);
1298 
1299 	return -EINVAL;
1300 }
1301 
1302 static int amdgpu_dm_early_fini(void *handle)
1303 {
1304 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 
1306 	amdgpu_dm_audio_fini(adev);
1307 
1308 	return 0;
1309 }
1310 
1311 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1312 {
1313 	int i;
1314 
1315 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1316 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1317 	}
1318 
1319 	amdgpu_dm_destroy_drm_device(&adev->dm);
1320 
1321 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1322 	if (adev->dm.crc_rd_wrk) {
1323 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1324 		kfree(adev->dm.crc_rd_wrk);
1325 		adev->dm.crc_rd_wrk = NULL;
1326 	}
1327 #endif
1328 #ifdef CONFIG_DRM_AMD_DC_HDCP
1329 	if (adev->dm.hdcp_workqueue) {
1330 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1331 		adev->dm.hdcp_workqueue = NULL;
1332 	}
1333 
1334 	if (adev->dm.dc)
1335 		dc_deinit_callbacks(adev->dm.dc);
1336 #endif
1337 
1338 #if defined(CONFIG_DRM_AMD_DC_DCN)
1339 	if (adev->dm.vblank_workqueue) {
1340 		adev->dm.vblank_workqueue->dm = NULL;
1341 		kfree(adev->dm.vblank_workqueue);
1342 		adev->dm.vblank_workqueue = NULL;
1343 	}
1344 #endif
1345 
1346 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1347 
1348 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1349 		kfree(adev->dm.dmub_notify);
1350 		adev->dm.dmub_notify = NULL;
1351 	}
1352 
1353 	if (adev->dm.dmub_bo)
1354 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1355 				      &adev->dm.dmub_bo_gpu_addr,
1356 				      &adev->dm.dmub_bo_cpu_addr);
1357 
1358 	/* DC Destroy TODO: Replace destroy DAL */
1359 	if (adev->dm.dc)
1360 		dc_destroy(&adev->dm.dc);
1361 	/*
1362 	 * TODO: pageflip, vlank interrupt
1363 	 *
1364 	 * amdgpu_dm_irq_fini(adev);
1365 	 */
1366 
1367 	if (adev->dm.cgs_device) {
1368 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1369 		adev->dm.cgs_device = NULL;
1370 	}
1371 	if (adev->dm.freesync_module) {
1372 		mod_freesync_destroy(adev->dm.freesync_module);
1373 		adev->dm.freesync_module = NULL;
1374 	}
1375 
1376 	mutex_destroy(&adev->dm.audio_lock);
1377 	mutex_destroy(&adev->dm.dc_lock);
1378 
1379 	return;
1380 }
1381 
1382 static int load_dmcu_fw(struct amdgpu_device *adev)
1383 {
1384 	const char *fw_name_dmcu = NULL;
1385 	int r;
1386 	const struct dmcu_firmware_header_v1_0 *hdr;
1387 
1388 	switch(adev->asic_type) {
1389 #if defined(CONFIG_DRM_AMD_DC_SI)
1390 	case CHIP_TAHITI:
1391 	case CHIP_PITCAIRN:
1392 	case CHIP_VERDE:
1393 	case CHIP_OLAND:
1394 #endif
1395 	case CHIP_BONAIRE:
1396 	case CHIP_HAWAII:
1397 	case CHIP_KAVERI:
1398 	case CHIP_KABINI:
1399 	case CHIP_MULLINS:
1400 	case CHIP_TONGA:
1401 	case CHIP_FIJI:
1402 	case CHIP_CARRIZO:
1403 	case CHIP_STONEY:
1404 	case CHIP_POLARIS11:
1405 	case CHIP_POLARIS10:
1406 	case CHIP_POLARIS12:
1407 	case CHIP_VEGAM:
1408 	case CHIP_VEGA10:
1409 	case CHIP_VEGA12:
1410 	case CHIP_VEGA20:
1411 	case CHIP_NAVI10:
1412 	case CHIP_NAVI14:
1413 	case CHIP_RENOIR:
1414 	case CHIP_SIENNA_CICHLID:
1415 	case CHIP_NAVY_FLOUNDER:
1416 	case CHIP_DIMGREY_CAVEFISH:
1417 	case CHIP_BEIGE_GOBY:
1418 	case CHIP_VANGOGH:
1419 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1420 	case CHIP_YELLOW_CARP:
1421 #endif
1422 		return 0;
1423 	case CHIP_NAVI12:
1424 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1425 		break;
1426 	case CHIP_RAVEN:
1427 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1428 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1429 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1430 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1431 		else
1432 			return 0;
1433 		break;
1434 	default:
1435 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1436 		return -EINVAL;
1437 	}
1438 
1439 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1440 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1441 		return 0;
1442 	}
1443 
1444 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1445 	if (r == -ENOENT) {
1446 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1447 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1448 		adev->dm.fw_dmcu = NULL;
1449 		return 0;
1450 	}
1451 	if (r) {
1452 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1453 			fw_name_dmcu);
1454 		return r;
1455 	}
1456 
1457 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1458 	if (r) {
1459 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1460 			fw_name_dmcu);
1461 		release_firmware(adev->dm.fw_dmcu);
1462 		adev->dm.fw_dmcu = NULL;
1463 		return r;
1464 	}
1465 
1466 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1467 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1468 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1469 	adev->firmware.fw_size +=
1470 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1471 
1472 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1473 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1474 	adev->firmware.fw_size +=
1475 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1476 
1477 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1478 
1479 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1480 
1481 	return 0;
1482 }
1483 
1484 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1485 {
1486 	struct amdgpu_device *adev = ctx;
1487 
1488 	return dm_read_reg(adev->dm.dc->ctx, address);
1489 }
1490 
1491 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1492 				     uint32_t value)
1493 {
1494 	struct amdgpu_device *adev = ctx;
1495 
1496 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1497 }
1498 
1499 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1500 {
1501 	struct dmub_srv_create_params create_params;
1502 	struct dmub_srv_region_params region_params;
1503 	struct dmub_srv_region_info region_info;
1504 	struct dmub_srv_fb_params fb_params;
1505 	struct dmub_srv_fb_info *fb_info;
1506 	struct dmub_srv *dmub_srv;
1507 	const struct dmcub_firmware_header_v1_0 *hdr;
1508 	const char *fw_name_dmub;
1509 	enum dmub_asic dmub_asic;
1510 	enum dmub_status status;
1511 	int r;
1512 
1513 	switch (adev->asic_type) {
1514 	case CHIP_RENOIR:
1515 		dmub_asic = DMUB_ASIC_DCN21;
1516 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1517 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1518 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1519 		break;
1520 	case CHIP_SIENNA_CICHLID:
1521 		dmub_asic = DMUB_ASIC_DCN30;
1522 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1523 		break;
1524 	case CHIP_NAVY_FLOUNDER:
1525 		dmub_asic = DMUB_ASIC_DCN30;
1526 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1527 		break;
1528 	case CHIP_VANGOGH:
1529 		dmub_asic = DMUB_ASIC_DCN301;
1530 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1531 		break;
1532 	case CHIP_DIMGREY_CAVEFISH:
1533 		dmub_asic = DMUB_ASIC_DCN302;
1534 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1535 		break;
1536 	case CHIP_BEIGE_GOBY:
1537 		dmub_asic = DMUB_ASIC_DCN303;
1538 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1539 		break;
1540 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
1541 	case CHIP_YELLOW_CARP:
1542 		dmub_asic = DMUB_ASIC_DCN31;
1543 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1544 		break;
1545 #endif
1546 
1547 	default:
1548 		/* ASIC doesn't support DMUB. */
1549 		return 0;
1550 	}
1551 
1552 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1553 	if (r) {
1554 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1555 		return 0;
1556 	}
1557 
1558 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1559 	if (r) {
1560 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1561 		return 0;
1562 	}
1563 
1564 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1565 
1566 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1567 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1568 			AMDGPU_UCODE_ID_DMCUB;
1569 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1570 			adev->dm.dmub_fw;
1571 		adev->firmware.fw_size +=
1572 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1573 
1574 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1575 			 adev->dm.dmcub_fw_version);
1576 	}
1577 
1578 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1579 
1580 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1581 	dmub_srv = adev->dm.dmub_srv;
1582 
1583 	if (!dmub_srv) {
1584 		DRM_ERROR("Failed to allocate DMUB service!\n");
1585 		return -ENOMEM;
1586 	}
1587 
1588 	memset(&create_params, 0, sizeof(create_params));
1589 	create_params.user_ctx = adev;
1590 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1591 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1592 	create_params.asic = dmub_asic;
1593 
1594 	/* Create the DMUB service. */
1595 	status = dmub_srv_create(dmub_srv, &create_params);
1596 	if (status != DMUB_STATUS_OK) {
1597 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1598 		return -EINVAL;
1599 	}
1600 
1601 	/* Calculate the size of all the regions for the DMUB service. */
1602 	memset(&region_params, 0, sizeof(region_params));
1603 
1604 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1605 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1606 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1607 	region_params.vbios_size = adev->bios_size;
1608 	region_params.fw_bss_data = region_params.bss_data_size ?
1609 		adev->dm.dmub_fw->data +
1610 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1611 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1612 	region_params.fw_inst_const =
1613 		adev->dm.dmub_fw->data +
1614 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1615 		PSP_HEADER_BYTES;
1616 
1617 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1618 					   &region_info);
1619 
1620 	if (status != DMUB_STATUS_OK) {
1621 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1622 		return -EINVAL;
1623 	}
1624 
1625 	/*
1626 	 * Allocate a framebuffer based on the total size of all the regions.
1627 	 * TODO: Move this into GART.
1628 	 */
1629 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1630 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1631 				    &adev->dm.dmub_bo_gpu_addr,
1632 				    &adev->dm.dmub_bo_cpu_addr);
1633 	if (r)
1634 		return r;
1635 
1636 	/* Rebase the regions on the framebuffer address. */
1637 	memset(&fb_params, 0, sizeof(fb_params));
1638 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1639 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1640 	fb_params.region_info = &region_info;
1641 
1642 	adev->dm.dmub_fb_info =
1643 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1644 	fb_info = adev->dm.dmub_fb_info;
1645 
1646 	if (!fb_info) {
1647 		DRM_ERROR(
1648 			"Failed to allocate framebuffer info for DMUB service!\n");
1649 		return -ENOMEM;
1650 	}
1651 
1652 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1653 	if (status != DMUB_STATUS_OK) {
1654 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1655 		return -EINVAL;
1656 	}
1657 
1658 	return 0;
1659 }
1660 
1661 static int dm_sw_init(void *handle)
1662 {
1663 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1664 	int r;
1665 
1666 	r = dm_dmub_sw_init(adev);
1667 	if (r)
1668 		return r;
1669 
1670 	return load_dmcu_fw(adev);
1671 }
1672 
1673 static int dm_sw_fini(void *handle)
1674 {
1675 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1676 
1677 	kfree(adev->dm.dmub_fb_info);
1678 	adev->dm.dmub_fb_info = NULL;
1679 
1680 	if (adev->dm.dmub_srv) {
1681 		dmub_srv_destroy(adev->dm.dmub_srv);
1682 		adev->dm.dmub_srv = NULL;
1683 	}
1684 
1685 	release_firmware(adev->dm.dmub_fw);
1686 	adev->dm.dmub_fw = NULL;
1687 
1688 	release_firmware(adev->dm.fw_dmcu);
1689 	adev->dm.fw_dmcu = NULL;
1690 
1691 	return 0;
1692 }
1693 
1694 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1695 {
1696 	struct amdgpu_dm_connector *aconnector;
1697 	struct drm_connector *connector;
1698 	struct drm_connector_list_iter iter;
1699 	int ret = 0;
1700 
1701 	drm_connector_list_iter_begin(dev, &iter);
1702 	drm_for_each_connector_iter(connector, &iter) {
1703 		aconnector = to_amdgpu_dm_connector(connector);
1704 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1705 		    aconnector->mst_mgr.aux) {
1706 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1707 					 aconnector,
1708 					 aconnector->base.base.id);
1709 
1710 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1711 			if (ret < 0) {
1712 				DRM_ERROR("DM_MST: Failed to start MST\n");
1713 				aconnector->dc_link->type =
1714 					dc_connection_single;
1715 				break;
1716 			}
1717 		}
1718 	}
1719 	drm_connector_list_iter_end(&iter);
1720 
1721 	return ret;
1722 }
1723 
1724 static int dm_late_init(void *handle)
1725 {
1726 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1727 
1728 	struct dmcu_iram_parameters params;
1729 	unsigned int linear_lut[16];
1730 	int i;
1731 	struct dmcu *dmcu = NULL;
1732 
1733 	dmcu = adev->dm.dc->res_pool->dmcu;
1734 
1735 	for (i = 0; i < 16; i++)
1736 		linear_lut[i] = 0xFFFF * i / 15;
1737 
1738 	params.set = 0;
1739 	params.backlight_ramping_start = 0xCCCC;
1740 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1741 	params.backlight_lut_array_size = 16;
1742 	params.backlight_lut_array = linear_lut;
1743 
1744 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1745 	 * 0xFFFF x 0.01 = 0x28F
1746 	 */
1747 	params.min_abm_backlight = 0x28F;
1748 	/* In the case where abm is implemented on dmcub,
1749 	* dmcu object will be null.
1750 	* ABM 2.4 and up are implemented on dmcub.
1751 	*/
1752 	if (dmcu) {
1753 		if (!dmcu_load_iram(dmcu, params))
1754 			return -EINVAL;
1755 	} else if (adev->dm.dc->ctx->dmub_srv) {
1756 		struct dc_link *edp_links[MAX_NUM_EDP];
1757 		int edp_num;
1758 
1759 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1760 		for (i = 0; i < edp_num; i++) {
1761 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1762 				return -EINVAL;
1763 		}
1764 	}
1765 
1766 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1767 }
1768 
1769 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1770 {
1771 	struct amdgpu_dm_connector *aconnector;
1772 	struct drm_connector *connector;
1773 	struct drm_connector_list_iter iter;
1774 	struct drm_dp_mst_topology_mgr *mgr;
1775 	int ret;
1776 	bool need_hotplug = false;
1777 
1778 	drm_connector_list_iter_begin(dev, &iter);
1779 	drm_for_each_connector_iter(connector, &iter) {
1780 		aconnector = to_amdgpu_dm_connector(connector);
1781 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1782 		    aconnector->mst_port)
1783 			continue;
1784 
1785 		mgr = &aconnector->mst_mgr;
1786 
1787 		if (suspend) {
1788 			drm_dp_mst_topology_mgr_suspend(mgr);
1789 		} else {
1790 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1791 			if (ret < 0) {
1792 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1793 				need_hotplug = true;
1794 			}
1795 		}
1796 	}
1797 	drm_connector_list_iter_end(&iter);
1798 
1799 	if (need_hotplug)
1800 		drm_kms_helper_hotplug_event(dev);
1801 }
1802 
1803 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1804 {
1805 	struct smu_context *smu = &adev->smu;
1806 	int ret = 0;
1807 
1808 	if (!is_support_sw_smu(adev))
1809 		return 0;
1810 
1811 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1812 	 * on window driver dc implementation.
1813 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1814 	 * should be passed to smu during boot up and resume from s3.
1815 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1816 	 * dcn20_resource_construct
1817 	 * then call pplib functions below to pass the settings to smu:
1818 	 * smu_set_watermarks_for_clock_ranges
1819 	 * smu_set_watermarks_table
1820 	 * navi10_set_watermarks_table
1821 	 * smu_write_watermarks_table
1822 	 *
1823 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1824 	 * dc has implemented different flow for window driver:
1825 	 * dc_hardware_init / dc_set_power_state
1826 	 * dcn10_init_hw
1827 	 * notify_wm_ranges
1828 	 * set_wm_ranges
1829 	 * -- Linux
1830 	 * smu_set_watermarks_for_clock_ranges
1831 	 * renoir_set_watermarks_table
1832 	 * smu_write_watermarks_table
1833 	 *
1834 	 * For Linux,
1835 	 * dc_hardware_init -> amdgpu_dm_init
1836 	 * dc_set_power_state --> dm_resume
1837 	 *
1838 	 * therefore, this function apply to navi10/12/14 but not Renoir
1839 	 * *
1840 	 */
1841 	switch(adev->asic_type) {
1842 	case CHIP_NAVI10:
1843 	case CHIP_NAVI14:
1844 	case CHIP_NAVI12:
1845 		break;
1846 	default:
1847 		return 0;
1848 	}
1849 
1850 	ret = smu_write_watermarks_table(smu);
1851 	if (ret) {
1852 		DRM_ERROR("Failed to update WMTABLE!\n");
1853 		return ret;
1854 	}
1855 
1856 	return 0;
1857 }
1858 
1859 /**
1860  * dm_hw_init() - Initialize DC device
1861  * @handle: The base driver device containing the amdgpu_dm device.
1862  *
1863  * Initialize the &struct amdgpu_display_manager device. This involves calling
1864  * the initializers of each DM component, then populating the struct with them.
1865  *
1866  * Although the function implies hardware initialization, both hardware and
1867  * software are initialized here. Splitting them out to their relevant init
1868  * hooks is a future TODO item.
1869  *
1870  * Some notable things that are initialized here:
1871  *
1872  * - Display Core, both software and hardware
1873  * - DC modules that we need (freesync and color management)
1874  * - DRM software states
1875  * - Interrupt sources and handlers
1876  * - Vblank support
1877  * - Debug FS entries, if enabled
1878  */
1879 static int dm_hw_init(void *handle)
1880 {
1881 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1882 	/* Create DAL display manager */
1883 	amdgpu_dm_init(adev);
1884 	amdgpu_dm_hpd_init(adev);
1885 
1886 	return 0;
1887 }
1888 
1889 /**
1890  * dm_hw_fini() - Teardown DC device
1891  * @handle: The base driver device containing the amdgpu_dm device.
1892  *
1893  * Teardown components within &struct amdgpu_display_manager that require
1894  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1895  * were loaded. Also flush IRQ workqueues and disable them.
1896  */
1897 static int dm_hw_fini(void *handle)
1898 {
1899 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1900 
1901 	amdgpu_dm_hpd_fini(adev);
1902 
1903 	amdgpu_dm_irq_fini(adev);
1904 	amdgpu_dm_fini(adev);
1905 	return 0;
1906 }
1907 
1908 
1909 static int dm_enable_vblank(struct drm_crtc *crtc);
1910 static void dm_disable_vblank(struct drm_crtc *crtc);
1911 
1912 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1913 				 struct dc_state *state, bool enable)
1914 {
1915 	enum dc_irq_source irq_source;
1916 	struct amdgpu_crtc *acrtc;
1917 	int rc = -EBUSY;
1918 	int i = 0;
1919 
1920 	for (i = 0; i < state->stream_count; i++) {
1921 		acrtc = get_crtc_by_otg_inst(
1922 				adev, state->stream_status[i].primary_otg_inst);
1923 
1924 		if (acrtc && state->stream_status[i].plane_count != 0) {
1925 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1926 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1927 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1928 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1929 			if (rc)
1930 				DRM_WARN("Failed to %s pflip interrupts\n",
1931 					 enable ? "enable" : "disable");
1932 
1933 			if (enable) {
1934 				rc = dm_enable_vblank(&acrtc->base);
1935 				if (rc)
1936 					DRM_WARN("Failed to enable vblank interrupts\n");
1937 			} else {
1938 				dm_disable_vblank(&acrtc->base);
1939 			}
1940 
1941 		}
1942 	}
1943 
1944 }
1945 
1946 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1947 {
1948 	struct dc_state *context = NULL;
1949 	enum dc_status res = DC_ERROR_UNEXPECTED;
1950 	int i;
1951 	struct dc_stream_state *del_streams[MAX_PIPES];
1952 	int del_streams_count = 0;
1953 
1954 	memset(del_streams, 0, sizeof(del_streams));
1955 
1956 	context = dc_create_state(dc);
1957 	if (context == NULL)
1958 		goto context_alloc_fail;
1959 
1960 	dc_resource_state_copy_construct_current(dc, context);
1961 
1962 	/* First remove from context all streams */
1963 	for (i = 0; i < context->stream_count; i++) {
1964 		struct dc_stream_state *stream = context->streams[i];
1965 
1966 		del_streams[del_streams_count++] = stream;
1967 	}
1968 
1969 	/* Remove all planes for removed streams and then remove the streams */
1970 	for (i = 0; i < del_streams_count; i++) {
1971 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1972 			res = DC_FAIL_DETACH_SURFACES;
1973 			goto fail;
1974 		}
1975 
1976 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1977 		if (res != DC_OK)
1978 			goto fail;
1979 	}
1980 
1981 
1982 	res = dc_validate_global_state(dc, context, false);
1983 
1984 	if (res != DC_OK) {
1985 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1986 		goto fail;
1987 	}
1988 
1989 	res = dc_commit_state(dc, context);
1990 
1991 fail:
1992 	dc_release_state(context);
1993 
1994 context_alloc_fail:
1995 	return res;
1996 }
1997 
1998 static int dm_suspend(void *handle)
1999 {
2000 	struct amdgpu_device *adev = handle;
2001 	struct amdgpu_display_manager *dm = &adev->dm;
2002 	int ret = 0;
2003 
2004 	if (amdgpu_in_reset(adev)) {
2005 		mutex_lock(&dm->dc_lock);
2006 
2007 #if defined(CONFIG_DRM_AMD_DC_DCN)
2008 		dc_allow_idle_optimizations(adev->dm.dc, false);
2009 #endif
2010 
2011 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2012 
2013 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2014 
2015 		amdgpu_dm_commit_zero_streams(dm->dc);
2016 
2017 		amdgpu_dm_irq_suspend(adev);
2018 
2019 		return ret;
2020 	}
2021 
2022 	WARN_ON(adev->dm.cached_state);
2023 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2024 
2025 	s3_handle_mst(adev_to_drm(adev), true);
2026 
2027 	amdgpu_dm_irq_suspend(adev);
2028 
2029 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2030 
2031 	return 0;
2032 }
2033 
2034 static struct amdgpu_dm_connector *
2035 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2036 					     struct drm_crtc *crtc)
2037 {
2038 	uint32_t i;
2039 	struct drm_connector_state *new_con_state;
2040 	struct drm_connector *connector;
2041 	struct drm_crtc *crtc_from_state;
2042 
2043 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2044 		crtc_from_state = new_con_state->crtc;
2045 
2046 		if (crtc_from_state == crtc)
2047 			return to_amdgpu_dm_connector(connector);
2048 	}
2049 
2050 	return NULL;
2051 }
2052 
2053 static void emulated_link_detect(struct dc_link *link)
2054 {
2055 	struct dc_sink_init_data sink_init_data = { 0 };
2056 	struct display_sink_capability sink_caps = { 0 };
2057 	enum dc_edid_status edid_status;
2058 	struct dc_context *dc_ctx = link->ctx;
2059 	struct dc_sink *sink = NULL;
2060 	struct dc_sink *prev_sink = NULL;
2061 
2062 	link->type = dc_connection_none;
2063 	prev_sink = link->local_sink;
2064 
2065 	if (prev_sink)
2066 		dc_sink_release(prev_sink);
2067 
2068 	switch (link->connector_signal) {
2069 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2070 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2071 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2072 		break;
2073 	}
2074 
2075 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2076 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2077 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2078 		break;
2079 	}
2080 
2081 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2082 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2083 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2084 		break;
2085 	}
2086 
2087 	case SIGNAL_TYPE_LVDS: {
2088 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2089 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2090 		break;
2091 	}
2092 
2093 	case SIGNAL_TYPE_EDP: {
2094 		sink_caps.transaction_type =
2095 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2096 		sink_caps.signal = SIGNAL_TYPE_EDP;
2097 		break;
2098 	}
2099 
2100 	case SIGNAL_TYPE_DISPLAY_PORT: {
2101 		sink_caps.transaction_type =
2102 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2103 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2104 		break;
2105 	}
2106 
2107 	default:
2108 		DC_ERROR("Invalid connector type! signal:%d\n",
2109 			link->connector_signal);
2110 		return;
2111 	}
2112 
2113 	sink_init_data.link = link;
2114 	sink_init_data.sink_signal = sink_caps.signal;
2115 
2116 	sink = dc_sink_create(&sink_init_data);
2117 	if (!sink) {
2118 		DC_ERROR("Failed to create sink!\n");
2119 		return;
2120 	}
2121 
2122 	/* dc_sink_create returns a new reference */
2123 	link->local_sink = sink;
2124 
2125 	edid_status = dm_helpers_read_local_edid(
2126 			link->ctx,
2127 			link,
2128 			sink);
2129 
2130 	if (edid_status != EDID_OK)
2131 		DC_ERROR("Failed to read EDID");
2132 
2133 }
2134 
2135 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2136 				     struct amdgpu_display_manager *dm)
2137 {
2138 	struct {
2139 		struct dc_surface_update surface_updates[MAX_SURFACES];
2140 		struct dc_plane_info plane_infos[MAX_SURFACES];
2141 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2142 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2143 		struct dc_stream_update stream_update;
2144 	} * bundle;
2145 	int k, m;
2146 
2147 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2148 
2149 	if (!bundle) {
2150 		dm_error("Failed to allocate update bundle\n");
2151 		goto cleanup;
2152 	}
2153 
2154 	for (k = 0; k < dc_state->stream_count; k++) {
2155 		bundle->stream_update.stream = dc_state->streams[k];
2156 
2157 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2158 			bundle->surface_updates[m].surface =
2159 				dc_state->stream_status->plane_states[m];
2160 			bundle->surface_updates[m].surface->force_full_update =
2161 				true;
2162 		}
2163 		dc_commit_updates_for_stream(
2164 			dm->dc, bundle->surface_updates,
2165 			dc_state->stream_status->plane_count,
2166 			dc_state->streams[k], &bundle->stream_update, dc_state);
2167 	}
2168 
2169 cleanup:
2170 	kfree(bundle);
2171 
2172 	return;
2173 }
2174 
2175 static void dm_set_dpms_off(struct dc_link *link)
2176 {
2177 	struct dc_stream_state *stream_state;
2178 	struct amdgpu_dm_connector *aconnector = link->priv;
2179 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2180 	struct dc_stream_update stream_update;
2181 	bool dpms_off = true;
2182 
2183 	memset(&stream_update, 0, sizeof(stream_update));
2184 	stream_update.dpms_off = &dpms_off;
2185 
2186 	mutex_lock(&adev->dm.dc_lock);
2187 	stream_state = dc_stream_find_from_link(link);
2188 
2189 	if (stream_state == NULL) {
2190 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2191 		mutex_unlock(&adev->dm.dc_lock);
2192 		return;
2193 	}
2194 
2195 	stream_update.stream = stream_state;
2196 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2197 				     stream_state, &stream_update,
2198 				     stream_state->ctx->dc->current_state);
2199 	mutex_unlock(&adev->dm.dc_lock);
2200 }
2201 
2202 static int dm_resume(void *handle)
2203 {
2204 	struct amdgpu_device *adev = handle;
2205 	struct drm_device *ddev = adev_to_drm(adev);
2206 	struct amdgpu_display_manager *dm = &adev->dm;
2207 	struct amdgpu_dm_connector *aconnector;
2208 	struct drm_connector *connector;
2209 	struct drm_connector_list_iter iter;
2210 	struct drm_crtc *crtc;
2211 	struct drm_crtc_state *new_crtc_state;
2212 	struct dm_crtc_state *dm_new_crtc_state;
2213 	struct drm_plane *plane;
2214 	struct drm_plane_state *new_plane_state;
2215 	struct dm_plane_state *dm_new_plane_state;
2216 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2217 	enum dc_connection_type new_connection_type = dc_connection_none;
2218 	struct dc_state *dc_state;
2219 	int i, r, j;
2220 
2221 	if (amdgpu_in_reset(adev)) {
2222 		dc_state = dm->cached_dc_state;
2223 
2224 		r = dm_dmub_hw_init(adev);
2225 		if (r)
2226 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2227 
2228 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2229 		dc_resume(dm->dc);
2230 
2231 		amdgpu_dm_irq_resume_early(adev);
2232 
2233 		for (i = 0; i < dc_state->stream_count; i++) {
2234 			dc_state->streams[i]->mode_changed = true;
2235 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2236 				dc_state->stream_status->plane_states[j]->update_flags.raw
2237 					= 0xffffffff;
2238 			}
2239 		}
2240 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
2241 		/*
2242 		 * Resource allocation happens for link encoders for newer ASIC in
2243 		 * dc_validate_global_state, so we need to revalidate it.
2244 		 *
2245 		 * This shouldn't fail (it passed once before), so warn if it does.
2246 		 */
2247 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2248 #endif
2249 
2250 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2251 
2252 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2253 
2254 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2255 
2256 		dc_release_state(dm->cached_dc_state);
2257 		dm->cached_dc_state = NULL;
2258 
2259 		amdgpu_dm_irq_resume_late(adev);
2260 
2261 		mutex_unlock(&dm->dc_lock);
2262 
2263 		return 0;
2264 	}
2265 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2266 	dc_release_state(dm_state->context);
2267 	dm_state->context = dc_create_state(dm->dc);
2268 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2269 	dc_resource_state_construct(dm->dc, dm_state->context);
2270 
2271 	/* Before powering on DC we need to re-initialize DMUB. */
2272 	r = dm_dmub_hw_init(adev);
2273 	if (r)
2274 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2275 
2276 	/* power on hardware */
2277 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2278 
2279 	/* program HPD filter */
2280 	dc_resume(dm->dc);
2281 
2282 	/*
2283 	 * early enable HPD Rx IRQ, should be done before set mode as short
2284 	 * pulse interrupts are used for MST
2285 	 */
2286 	amdgpu_dm_irq_resume_early(adev);
2287 
2288 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2289 	s3_handle_mst(ddev, false);
2290 
2291 	/* Do detection*/
2292 	drm_connector_list_iter_begin(ddev, &iter);
2293 	drm_for_each_connector_iter(connector, &iter) {
2294 		aconnector = to_amdgpu_dm_connector(connector);
2295 
2296 		/*
2297 		 * this is the case when traversing through already created
2298 		 * MST connectors, should be skipped
2299 		 */
2300 		if (aconnector->mst_port)
2301 			continue;
2302 
2303 		mutex_lock(&aconnector->hpd_lock);
2304 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2305 			DRM_ERROR("KMS: Failed to detect connector\n");
2306 
2307 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2308 			emulated_link_detect(aconnector->dc_link);
2309 		else
2310 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2311 
2312 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2313 			aconnector->fake_enable = false;
2314 
2315 		if (aconnector->dc_sink)
2316 			dc_sink_release(aconnector->dc_sink);
2317 		aconnector->dc_sink = NULL;
2318 		amdgpu_dm_update_connector_after_detect(aconnector);
2319 		mutex_unlock(&aconnector->hpd_lock);
2320 	}
2321 	drm_connector_list_iter_end(&iter);
2322 
2323 	/* Force mode set in atomic commit */
2324 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2325 		new_crtc_state->active_changed = true;
2326 
2327 	/*
2328 	 * atomic_check is expected to create the dc states. We need to release
2329 	 * them here, since they were duplicated as part of the suspend
2330 	 * procedure.
2331 	 */
2332 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2333 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2334 		if (dm_new_crtc_state->stream) {
2335 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2336 			dc_stream_release(dm_new_crtc_state->stream);
2337 			dm_new_crtc_state->stream = NULL;
2338 		}
2339 	}
2340 
2341 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2342 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2343 		if (dm_new_plane_state->dc_state) {
2344 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2345 			dc_plane_state_release(dm_new_plane_state->dc_state);
2346 			dm_new_plane_state->dc_state = NULL;
2347 		}
2348 	}
2349 
2350 	drm_atomic_helper_resume(ddev, dm->cached_state);
2351 
2352 	dm->cached_state = NULL;
2353 
2354 	amdgpu_dm_irq_resume_late(adev);
2355 
2356 	amdgpu_dm_smu_write_watermarks_table(adev);
2357 
2358 	return 0;
2359 }
2360 
2361 /**
2362  * DOC: DM Lifecycle
2363  *
2364  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2365  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2366  * the base driver's device list to be initialized and torn down accordingly.
2367  *
2368  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2369  */
2370 
2371 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2372 	.name = "dm",
2373 	.early_init = dm_early_init,
2374 	.late_init = dm_late_init,
2375 	.sw_init = dm_sw_init,
2376 	.sw_fini = dm_sw_fini,
2377 	.early_fini = amdgpu_dm_early_fini,
2378 	.hw_init = dm_hw_init,
2379 	.hw_fini = dm_hw_fini,
2380 	.suspend = dm_suspend,
2381 	.resume = dm_resume,
2382 	.is_idle = dm_is_idle,
2383 	.wait_for_idle = dm_wait_for_idle,
2384 	.check_soft_reset = dm_check_soft_reset,
2385 	.soft_reset = dm_soft_reset,
2386 	.set_clockgating_state = dm_set_clockgating_state,
2387 	.set_powergating_state = dm_set_powergating_state,
2388 };
2389 
2390 const struct amdgpu_ip_block_version dm_ip_block =
2391 {
2392 	.type = AMD_IP_BLOCK_TYPE_DCE,
2393 	.major = 1,
2394 	.minor = 0,
2395 	.rev = 0,
2396 	.funcs = &amdgpu_dm_funcs,
2397 };
2398 
2399 
2400 /**
2401  * DOC: atomic
2402  *
2403  * *WIP*
2404  */
2405 
2406 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2407 	.fb_create = amdgpu_display_user_framebuffer_create,
2408 	.get_format_info = amd_get_format_info,
2409 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2410 	.atomic_check = amdgpu_dm_atomic_check,
2411 	.atomic_commit = drm_atomic_helper_commit,
2412 };
2413 
2414 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2415 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2416 };
2417 
2418 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2419 {
2420 	u32 max_cll, min_cll, max, min, q, r;
2421 	struct amdgpu_dm_backlight_caps *caps;
2422 	struct amdgpu_display_manager *dm;
2423 	struct drm_connector *conn_base;
2424 	struct amdgpu_device *adev;
2425 	struct dc_link *link = NULL;
2426 	static const u8 pre_computed_values[] = {
2427 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2428 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2429 
2430 	if (!aconnector || !aconnector->dc_link)
2431 		return;
2432 
2433 	link = aconnector->dc_link;
2434 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2435 		return;
2436 
2437 	conn_base = &aconnector->base;
2438 	adev = drm_to_adev(conn_base->dev);
2439 	dm = &adev->dm;
2440 	caps = &dm->backlight_caps;
2441 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2442 	caps->aux_support = false;
2443 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2444 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2445 
2446 	if (caps->ext_caps->bits.oled == 1 ||
2447 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2448 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2449 		caps->aux_support = true;
2450 
2451 	if (amdgpu_backlight == 0)
2452 		caps->aux_support = false;
2453 	else if (amdgpu_backlight == 1)
2454 		caps->aux_support = true;
2455 
2456 	/* From the specification (CTA-861-G), for calculating the maximum
2457 	 * luminance we need to use:
2458 	 *	Luminance = 50*2**(CV/32)
2459 	 * Where CV is a one-byte value.
2460 	 * For calculating this expression we may need float point precision;
2461 	 * to avoid this complexity level, we take advantage that CV is divided
2462 	 * by a constant. From the Euclids division algorithm, we know that CV
2463 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2464 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2465 	 * need to pre-compute the value of r/32. For pre-computing the values
2466 	 * We just used the following Ruby line:
2467 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2468 	 * The results of the above expressions can be verified at
2469 	 * pre_computed_values.
2470 	 */
2471 	q = max_cll >> 5;
2472 	r = max_cll % 32;
2473 	max = (1 << q) * pre_computed_values[r];
2474 
2475 	// min luminance: maxLum * (CV/255)^2 / 100
2476 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2477 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2478 
2479 	caps->aux_max_input_signal = max;
2480 	caps->aux_min_input_signal = min;
2481 }
2482 
2483 void amdgpu_dm_update_connector_after_detect(
2484 		struct amdgpu_dm_connector *aconnector)
2485 {
2486 	struct drm_connector *connector = &aconnector->base;
2487 	struct drm_device *dev = connector->dev;
2488 	struct dc_sink *sink;
2489 
2490 	/* MST handled by drm_mst framework */
2491 	if (aconnector->mst_mgr.mst_state == true)
2492 		return;
2493 
2494 	sink = aconnector->dc_link->local_sink;
2495 	if (sink)
2496 		dc_sink_retain(sink);
2497 
2498 	/*
2499 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2500 	 * the connector sink is set to either fake or physical sink depends on link status.
2501 	 * Skip if already done during boot.
2502 	 */
2503 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2504 			&& aconnector->dc_em_sink) {
2505 
2506 		/*
2507 		 * For S3 resume with headless use eml_sink to fake stream
2508 		 * because on resume connector->sink is set to NULL
2509 		 */
2510 		mutex_lock(&dev->mode_config.mutex);
2511 
2512 		if (sink) {
2513 			if (aconnector->dc_sink) {
2514 				amdgpu_dm_update_freesync_caps(connector, NULL);
2515 				/*
2516 				 * retain and release below are used to
2517 				 * bump up refcount for sink because the link doesn't point
2518 				 * to it anymore after disconnect, so on next crtc to connector
2519 				 * reshuffle by UMD we will get into unwanted dc_sink release
2520 				 */
2521 				dc_sink_release(aconnector->dc_sink);
2522 			}
2523 			aconnector->dc_sink = sink;
2524 			dc_sink_retain(aconnector->dc_sink);
2525 			amdgpu_dm_update_freesync_caps(connector,
2526 					aconnector->edid);
2527 		} else {
2528 			amdgpu_dm_update_freesync_caps(connector, NULL);
2529 			if (!aconnector->dc_sink) {
2530 				aconnector->dc_sink = aconnector->dc_em_sink;
2531 				dc_sink_retain(aconnector->dc_sink);
2532 			}
2533 		}
2534 
2535 		mutex_unlock(&dev->mode_config.mutex);
2536 
2537 		if (sink)
2538 			dc_sink_release(sink);
2539 		return;
2540 	}
2541 
2542 	/*
2543 	 * TODO: temporary guard to look for proper fix
2544 	 * if this sink is MST sink, we should not do anything
2545 	 */
2546 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2547 		dc_sink_release(sink);
2548 		return;
2549 	}
2550 
2551 	if (aconnector->dc_sink == sink) {
2552 		/*
2553 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2554 		 * Do nothing!!
2555 		 */
2556 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2557 				aconnector->connector_id);
2558 		if (sink)
2559 			dc_sink_release(sink);
2560 		return;
2561 	}
2562 
2563 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2564 		aconnector->connector_id, aconnector->dc_sink, sink);
2565 
2566 	mutex_lock(&dev->mode_config.mutex);
2567 
2568 	/*
2569 	 * 1. Update status of the drm connector
2570 	 * 2. Send an event and let userspace tell us what to do
2571 	 */
2572 	if (sink) {
2573 		/*
2574 		 * TODO: check if we still need the S3 mode update workaround.
2575 		 * If yes, put it here.
2576 		 */
2577 		if (aconnector->dc_sink) {
2578 			amdgpu_dm_update_freesync_caps(connector, NULL);
2579 			dc_sink_release(aconnector->dc_sink);
2580 		}
2581 
2582 		aconnector->dc_sink = sink;
2583 		dc_sink_retain(aconnector->dc_sink);
2584 		if (sink->dc_edid.length == 0) {
2585 			aconnector->edid = NULL;
2586 			if (aconnector->dc_link->aux_mode) {
2587 				drm_dp_cec_unset_edid(
2588 					&aconnector->dm_dp_aux.aux);
2589 			}
2590 		} else {
2591 			aconnector->edid =
2592 				(struct edid *)sink->dc_edid.raw_edid;
2593 
2594 			drm_connector_update_edid_property(connector,
2595 							   aconnector->edid);
2596 			if (aconnector->dc_link->aux_mode)
2597 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2598 						    aconnector->edid);
2599 		}
2600 
2601 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2602 		update_connector_ext_caps(aconnector);
2603 	} else {
2604 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2605 		amdgpu_dm_update_freesync_caps(connector, NULL);
2606 		drm_connector_update_edid_property(connector, NULL);
2607 		aconnector->num_modes = 0;
2608 		dc_sink_release(aconnector->dc_sink);
2609 		aconnector->dc_sink = NULL;
2610 		aconnector->edid = NULL;
2611 #ifdef CONFIG_DRM_AMD_DC_HDCP
2612 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2613 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2614 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2615 #endif
2616 	}
2617 
2618 	mutex_unlock(&dev->mode_config.mutex);
2619 
2620 	update_subconnector_property(aconnector);
2621 
2622 	if (sink)
2623 		dc_sink_release(sink);
2624 }
2625 
2626 static void handle_hpd_irq(void *param)
2627 {
2628 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2629 	struct drm_connector *connector = &aconnector->base;
2630 	struct drm_device *dev = connector->dev;
2631 	enum dc_connection_type new_connection_type = dc_connection_none;
2632 	struct amdgpu_device *adev = drm_to_adev(dev);
2633 #ifdef CONFIG_DRM_AMD_DC_HDCP
2634 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2635 #endif
2636 
2637 	if (adev->dm.disable_hpd_irq)
2638 		return;
2639 
2640 	/*
2641 	 * In case of failure or MST no need to update connector status or notify the OS
2642 	 * since (for MST case) MST does this in its own context.
2643 	 */
2644 	mutex_lock(&aconnector->hpd_lock);
2645 
2646 #ifdef CONFIG_DRM_AMD_DC_HDCP
2647 	if (adev->dm.hdcp_workqueue) {
2648 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2649 		dm_con_state->update_hdcp = true;
2650 	}
2651 #endif
2652 	if (aconnector->fake_enable)
2653 		aconnector->fake_enable = false;
2654 
2655 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2656 		DRM_ERROR("KMS: Failed to detect connector\n");
2657 
2658 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2659 		emulated_link_detect(aconnector->dc_link);
2660 
2661 
2662 		drm_modeset_lock_all(dev);
2663 		dm_restore_drm_connector_state(dev, connector);
2664 		drm_modeset_unlock_all(dev);
2665 
2666 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2667 			drm_kms_helper_hotplug_event(dev);
2668 
2669 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2670 		if (new_connection_type == dc_connection_none &&
2671 		    aconnector->dc_link->type == dc_connection_none)
2672 			dm_set_dpms_off(aconnector->dc_link);
2673 
2674 		amdgpu_dm_update_connector_after_detect(aconnector);
2675 
2676 		drm_modeset_lock_all(dev);
2677 		dm_restore_drm_connector_state(dev, connector);
2678 		drm_modeset_unlock_all(dev);
2679 
2680 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2681 			drm_kms_helper_hotplug_event(dev);
2682 	}
2683 	mutex_unlock(&aconnector->hpd_lock);
2684 
2685 }
2686 
2687 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2688 {
2689 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2690 	uint8_t dret;
2691 	bool new_irq_handled = false;
2692 	int dpcd_addr;
2693 	int dpcd_bytes_to_read;
2694 
2695 	const int max_process_count = 30;
2696 	int process_count = 0;
2697 
2698 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2699 
2700 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2701 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2702 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2703 		dpcd_addr = DP_SINK_COUNT;
2704 	} else {
2705 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2706 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2707 		dpcd_addr = DP_SINK_COUNT_ESI;
2708 	}
2709 
2710 	dret = drm_dp_dpcd_read(
2711 		&aconnector->dm_dp_aux.aux,
2712 		dpcd_addr,
2713 		esi,
2714 		dpcd_bytes_to_read);
2715 
2716 	while (dret == dpcd_bytes_to_read &&
2717 		process_count < max_process_count) {
2718 		uint8_t retry;
2719 		dret = 0;
2720 
2721 		process_count++;
2722 
2723 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2724 		/* handle HPD short pulse irq */
2725 		if (aconnector->mst_mgr.mst_state)
2726 			drm_dp_mst_hpd_irq(
2727 				&aconnector->mst_mgr,
2728 				esi,
2729 				&new_irq_handled);
2730 
2731 		if (new_irq_handled) {
2732 			/* ACK at DPCD to notify down stream */
2733 			const int ack_dpcd_bytes_to_write =
2734 				dpcd_bytes_to_read - 1;
2735 
2736 			for (retry = 0; retry < 3; retry++) {
2737 				uint8_t wret;
2738 
2739 				wret = drm_dp_dpcd_write(
2740 					&aconnector->dm_dp_aux.aux,
2741 					dpcd_addr + 1,
2742 					&esi[1],
2743 					ack_dpcd_bytes_to_write);
2744 				if (wret == ack_dpcd_bytes_to_write)
2745 					break;
2746 			}
2747 
2748 			/* check if there is new irq to be handled */
2749 			dret = drm_dp_dpcd_read(
2750 				&aconnector->dm_dp_aux.aux,
2751 				dpcd_addr,
2752 				esi,
2753 				dpcd_bytes_to_read);
2754 
2755 			new_irq_handled = false;
2756 		} else {
2757 			break;
2758 		}
2759 	}
2760 
2761 	if (process_count == max_process_count)
2762 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2763 }
2764 
2765 static void handle_hpd_rx_irq(void *param)
2766 {
2767 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2768 	struct drm_connector *connector = &aconnector->base;
2769 	struct drm_device *dev = connector->dev;
2770 	struct dc_link *dc_link = aconnector->dc_link;
2771 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2772 	bool result = false;
2773 	enum dc_connection_type new_connection_type = dc_connection_none;
2774 	struct amdgpu_device *adev = drm_to_adev(dev);
2775 	union hpd_irq_data hpd_irq_data;
2776 	bool lock_flag = 0;
2777 
2778 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2779 
2780 	if (adev->dm.disable_hpd_irq)
2781 		return;
2782 
2783 
2784 	/*
2785 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2786 	 * conflict, after implement i2c helper, this mutex should be
2787 	 * retired.
2788 	 */
2789 	mutex_lock(&aconnector->hpd_lock);
2790 
2791 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2792 
2793 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2794 		(dc_link->type == dc_connection_mst_branch)) {
2795 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2796 			result = true;
2797 			dm_handle_hpd_rx_irq(aconnector);
2798 			goto out;
2799 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2800 			result = false;
2801 			dm_handle_hpd_rx_irq(aconnector);
2802 			goto out;
2803 		}
2804 	}
2805 
2806 	/*
2807 	 * TODO: We need the lock to avoid touching DC state while it's being
2808 	 * modified during automated compliance testing, or when link loss
2809 	 * happens. While this should be split into subhandlers and proper
2810 	 * interfaces to avoid having to conditionally lock like this in the
2811 	 * outer layer, we need this workaround temporarily to allow MST
2812 	 * lightup in some scenarios to avoid timeout.
2813 	 */
2814 	if (!amdgpu_in_reset(adev) &&
2815 	    (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
2816 	     hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
2817 		mutex_lock(&adev->dm.dc_lock);
2818 		lock_flag = 1;
2819 	}
2820 
2821 #ifdef CONFIG_DRM_AMD_DC_HDCP
2822 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2823 #else
2824 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2825 #endif
2826 	if (!amdgpu_in_reset(adev) && lock_flag)
2827 		mutex_unlock(&adev->dm.dc_lock);
2828 
2829 out:
2830 	if (result && !is_mst_root_connector) {
2831 		/* Downstream Port status changed. */
2832 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2833 			DRM_ERROR("KMS: Failed to detect connector\n");
2834 
2835 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2836 			emulated_link_detect(dc_link);
2837 
2838 			if (aconnector->fake_enable)
2839 				aconnector->fake_enable = false;
2840 
2841 			amdgpu_dm_update_connector_after_detect(aconnector);
2842 
2843 
2844 			drm_modeset_lock_all(dev);
2845 			dm_restore_drm_connector_state(dev, connector);
2846 			drm_modeset_unlock_all(dev);
2847 
2848 			drm_kms_helper_hotplug_event(dev);
2849 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2850 
2851 			if (aconnector->fake_enable)
2852 				aconnector->fake_enable = false;
2853 
2854 			amdgpu_dm_update_connector_after_detect(aconnector);
2855 
2856 
2857 			drm_modeset_lock_all(dev);
2858 			dm_restore_drm_connector_state(dev, connector);
2859 			drm_modeset_unlock_all(dev);
2860 
2861 			drm_kms_helper_hotplug_event(dev);
2862 		}
2863 	}
2864 #ifdef CONFIG_DRM_AMD_DC_HDCP
2865 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2866 		if (adev->dm.hdcp_workqueue)
2867 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2868 	}
2869 #endif
2870 
2871 	if (dc_link->type != dc_connection_mst_branch)
2872 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2873 
2874 	mutex_unlock(&aconnector->hpd_lock);
2875 }
2876 
2877 static void register_hpd_handlers(struct amdgpu_device *adev)
2878 {
2879 	struct drm_device *dev = adev_to_drm(adev);
2880 	struct drm_connector *connector;
2881 	struct amdgpu_dm_connector *aconnector;
2882 	const struct dc_link *dc_link;
2883 	struct dc_interrupt_params int_params = {0};
2884 
2885 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2886 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2887 
2888 	list_for_each_entry(connector,
2889 			&dev->mode_config.connector_list, head)	{
2890 
2891 		aconnector = to_amdgpu_dm_connector(connector);
2892 		dc_link = aconnector->dc_link;
2893 
2894 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2895 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2896 			int_params.irq_source = dc_link->irq_source_hpd;
2897 
2898 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2899 					handle_hpd_irq,
2900 					(void *) aconnector);
2901 		}
2902 
2903 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2904 
2905 			/* Also register for DP short pulse (hpd_rx). */
2906 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2907 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2908 
2909 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2910 					handle_hpd_rx_irq,
2911 					(void *) aconnector);
2912 		}
2913 	}
2914 }
2915 
2916 #if defined(CONFIG_DRM_AMD_DC_SI)
2917 /* Register IRQ sources and initialize IRQ callbacks */
2918 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2919 {
2920 	struct dc *dc = adev->dm.dc;
2921 	struct common_irq_params *c_irq_params;
2922 	struct dc_interrupt_params int_params = {0};
2923 	int r;
2924 	int i;
2925 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2926 
2927 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2928 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2929 
2930 	/*
2931 	 * Actions of amdgpu_irq_add_id():
2932 	 * 1. Register a set() function with base driver.
2933 	 *    Base driver will call set() function to enable/disable an
2934 	 *    interrupt in DC hardware.
2935 	 * 2. Register amdgpu_dm_irq_handler().
2936 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2937 	 *    coming from DC hardware.
2938 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2939 	 *    for acknowledging and handling. */
2940 
2941 	/* Use VBLANK interrupt */
2942 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2943 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2944 		if (r) {
2945 			DRM_ERROR("Failed to add crtc irq id!\n");
2946 			return r;
2947 		}
2948 
2949 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2950 		int_params.irq_source =
2951 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2952 
2953 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2954 
2955 		c_irq_params->adev = adev;
2956 		c_irq_params->irq_src = int_params.irq_source;
2957 
2958 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2959 				dm_crtc_high_irq, c_irq_params);
2960 	}
2961 
2962 	/* Use GRPH_PFLIP interrupt */
2963 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2964 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2965 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2966 		if (r) {
2967 			DRM_ERROR("Failed to add page flip irq id!\n");
2968 			return r;
2969 		}
2970 
2971 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2972 		int_params.irq_source =
2973 			dc_interrupt_to_irq_source(dc, i, 0);
2974 
2975 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2976 
2977 		c_irq_params->adev = adev;
2978 		c_irq_params->irq_src = int_params.irq_source;
2979 
2980 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2981 				dm_pflip_high_irq, c_irq_params);
2982 
2983 	}
2984 
2985 	/* HPD */
2986 	r = amdgpu_irq_add_id(adev, client_id,
2987 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2988 	if (r) {
2989 		DRM_ERROR("Failed to add hpd irq id!\n");
2990 		return r;
2991 	}
2992 
2993 	register_hpd_handlers(adev);
2994 
2995 	return 0;
2996 }
2997 #endif
2998 
2999 /* Register IRQ sources and initialize IRQ callbacks */
3000 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3001 {
3002 	struct dc *dc = adev->dm.dc;
3003 	struct common_irq_params *c_irq_params;
3004 	struct dc_interrupt_params int_params = {0};
3005 	int r;
3006 	int i;
3007 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3008 
3009 	if (adev->asic_type >= CHIP_VEGA10)
3010 		client_id = SOC15_IH_CLIENTID_DCE;
3011 
3012 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3013 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3014 
3015 	/*
3016 	 * Actions of amdgpu_irq_add_id():
3017 	 * 1. Register a set() function with base driver.
3018 	 *    Base driver will call set() function to enable/disable an
3019 	 *    interrupt in DC hardware.
3020 	 * 2. Register amdgpu_dm_irq_handler().
3021 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3022 	 *    coming from DC hardware.
3023 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3024 	 *    for acknowledging and handling. */
3025 
3026 	/* Use VBLANK interrupt */
3027 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3028 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3029 		if (r) {
3030 			DRM_ERROR("Failed to add crtc irq id!\n");
3031 			return r;
3032 		}
3033 
3034 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3035 		int_params.irq_source =
3036 			dc_interrupt_to_irq_source(dc, i, 0);
3037 
3038 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3039 
3040 		c_irq_params->adev = adev;
3041 		c_irq_params->irq_src = int_params.irq_source;
3042 
3043 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3044 				dm_crtc_high_irq, c_irq_params);
3045 	}
3046 
3047 	/* Use VUPDATE interrupt */
3048 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3049 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3050 		if (r) {
3051 			DRM_ERROR("Failed to add vupdate irq id!\n");
3052 			return r;
3053 		}
3054 
3055 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3056 		int_params.irq_source =
3057 			dc_interrupt_to_irq_source(dc, i, 0);
3058 
3059 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3060 
3061 		c_irq_params->adev = adev;
3062 		c_irq_params->irq_src = int_params.irq_source;
3063 
3064 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3065 				dm_vupdate_high_irq, c_irq_params);
3066 	}
3067 
3068 	/* Use GRPH_PFLIP interrupt */
3069 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3070 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3071 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3072 		if (r) {
3073 			DRM_ERROR("Failed to add page flip irq id!\n");
3074 			return r;
3075 		}
3076 
3077 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3078 		int_params.irq_source =
3079 			dc_interrupt_to_irq_source(dc, i, 0);
3080 
3081 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3082 
3083 		c_irq_params->adev = adev;
3084 		c_irq_params->irq_src = int_params.irq_source;
3085 
3086 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3087 				dm_pflip_high_irq, c_irq_params);
3088 
3089 	}
3090 
3091 	/* HPD */
3092 	r = amdgpu_irq_add_id(adev, client_id,
3093 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3094 	if (r) {
3095 		DRM_ERROR("Failed to add hpd irq id!\n");
3096 		return r;
3097 	}
3098 
3099 	register_hpd_handlers(adev);
3100 
3101 	return 0;
3102 }
3103 
3104 #if defined(CONFIG_DRM_AMD_DC_DCN)
3105 /* Register IRQ sources and initialize IRQ callbacks */
3106 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3107 {
3108 	struct dc *dc = adev->dm.dc;
3109 	struct common_irq_params *c_irq_params;
3110 	struct dc_interrupt_params int_params = {0};
3111 	int r;
3112 	int i;
3113 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3114 	static const unsigned int vrtl_int_srcid[] = {
3115 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3116 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3117 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3118 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3119 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3120 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3121 	};
3122 #endif
3123 
3124 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3125 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3126 
3127 	/*
3128 	 * Actions of amdgpu_irq_add_id():
3129 	 * 1. Register a set() function with base driver.
3130 	 *    Base driver will call set() function to enable/disable an
3131 	 *    interrupt in DC hardware.
3132 	 * 2. Register amdgpu_dm_irq_handler().
3133 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3134 	 *    coming from DC hardware.
3135 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3136 	 *    for acknowledging and handling.
3137 	 */
3138 
3139 	/* Use VSTARTUP interrupt */
3140 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3141 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3142 			i++) {
3143 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3144 
3145 		if (r) {
3146 			DRM_ERROR("Failed to add crtc irq id!\n");
3147 			return r;
3148 		}
3149 
3150 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3151 		int_params.irq_source =
3152 			dc_interrupt_to_irq_source(dc, i, 0);
3153 
3154 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3155 
3156 		c_irq_params->adev = adev;
3157 		c_irq_params->irq_src = int_params.irq_source;
3158 
3159 		amdgpu_dm_irq_register_interrupt(
3160 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3161 	}
3162 
3163 	/* Use otg vertical line interrupt */
3164 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3165 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3166 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3167 				vrtl_int_srcid[i], &adev->vline0_irq);
3168 
3169 		if (r) {
3170 			DRM_ERROR("Failed to add vline0 irq id!\n");
3171 			return r;
3172 		}
3173 
3174 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3175 		int_params.irq_source =
3176 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3177 
3178 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3179 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3180 			break;
3181 		}
3182 
3183 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3184 					- DC_IRQ_SOURCE_DC1_VLINE0];
3185 
3186 		c_irq_params->adev = adev;
3187 		c_irq_params->irq_src = int_params.irq_source;
3188 
3189 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3190 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3191 	}
3192 #endif
3193 
3194 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3195 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3196 	 * to trigger at end of each vblank, regardless of state of the lock,
3197 	 * matching DCE behaviour.
3198 	 */
3199 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3200 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3201 	     i++) {
3202 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3203 
3204 		if (r) {
3205 			DRM_ERROR("Failed to add vupdate irq id!\n");
3206 			return r;
3207 		}
3208 
3209 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3210 		int_params.irq_source =
3211 			dc_interrupt_to_irq_source(dc, i, 0);
3212 
3213 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3214 
3215 		c_irq_params->adev = adev;
3216 		c_irq_params->irq_src = int_params.irq_source;
3217 
3218 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3219 				dm_vupdate_high_irq, c_irq_params);
3220 	}
3221 
3222 	/* Use GRPH_PFLIP interrupt */
3223 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3224 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3225 			i++) {
3226 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3227 		if (r) {
3228 			DRM_ERROR("Failed to add page flip irq id!\n");
3229 			return r;
3230 		}
3231 
3232 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3233 		int_params.irq_source =
3234 			dc_interrupt_to_irq_source(dc, i, 0);
3235 
3236 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3237 
3238 		c_irq_params->adev = adev;
3239 		c_irq_params->irq_src = int_params.irq_source;
3240 
3241 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3242 				dm_pflip_high_irq, c_irq_params);
3243 
3244 	}
3245 
3246 	/* HPD */
3247 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3248 			&adev->hpd_irq);
3249 	if (r) {
3250 		DRM_ERROR("Failed to add hpd irq id!\n");
3251 		return r;
3252 	}
3253 
3254 	register_hpd_handlers(adev);
3255 
3256 	return 0;
3257 }
3258 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3259 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3260 {
3261 	struct dc *dc = adev->dm.dc;
3262 	struct common_irq_params *c_irq_params;
3263 	struct dc_interrupt_params int_params = {0};
3264 	int r, i;
3265 
3266 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3267 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3268 
3269 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3270 			&adev->dmub_outbox_irq);
3271 	if (r) {
3272 		DRM_ERROR("Failed to add outbox irq id!\n");
3273 		return r;
3274 	}
3275 
3276 	if (dc->ctx->dmub_srv) {
3277 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3278 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3279 		int_params.irq_source =
3280 		dc_interrupt_to_irq_source(dc, i, 0);
3281 
3282 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3283 
3284 		c_irq_params->adev = adev;
3285 		c_irq_params->irq_src = int_params.irq_source;
3286 
3287 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3288 				dm_dmub_outbox1_low_irq, c_irq_params);
3289 	}
3290 
3291 	return 0;
3292 }
3293 #endif
3294 
3295 /*
3296  * Acquires the lock for the atomic state object and returns
3297  * the new atomic state.
3298  *
3299  * This should only be called during atomic check.
3300  */
3301 static int dm_atomic_get_state(struct drm_atomic_state *state,
3302 			       struct dm_atomic_state **dm_state)
3303 {
3304 	struct drm_device *dev = state->dev;
3305 	struct amdgpu_device *adev = drm_to_adev(dev);
3306 	struct amdgpu_display_manager *dm = &adev->dm;
3307 	struct drm_private_state *priv_state;
3308 
3309 	if (*dm_state)
3310 		return 0;
3311 
3312 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3313 	if (IS_ERR(priv_state))
3314 		return PTR_ERR(priv_state);
3315 
3316 	*dm_state = to_dm_atomic_state(priv_state);
3317 
3318 	return 0;
3319 }
3320 
3321 static struct dm_atomic_state *
3322 dm_atomic_get_new_state(struct drm_atomic_state *state)
3323 {
3324 	struct drm_device *dev = state->dev;
3325 	struct amdgpu_device *adev = drm_to_adev(dev);
3326 	struct amdgpu_display_manager *dm = &adev->dm;
3327 	struct drm_private_obj *obj;
3328 	struct drm_private_state *new_obj_state;
3329 	int i;
3330 
3331 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3332 		if (obj->funcs == dm->atomic_obj.funcs)
3333 			return to_dm_atomic_state(new_obj_state);
3334 	}
3335 
3336 	return NULL;
3337 }
3338 
3339 static struct drm_private_state *
3340 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3341 {
3342 	struct dm_atomic_state *old_state, *new_state;
3343 
3344 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3345 	if (!new_state)
3346 		return NULL;
3347 
3348 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3349 
3350 	old_state = to_dm_atomic_state(obj->state);
3351 
3352 	if (old_state && old_state->context)
3353 		new_state->context = dc_copy_state(old_state->context);
3354 
3355 	if (!new_state->context) {
3356 		kfree(new_state);
3357 		return NULL;
3358 	}
3359 
3360 	return &new_state->base;
3361 }
3362 
3363 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3364 				    struct drm_private_state *state)
3365 {
3366 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3367 
3368 	if (dm_state && dm_state->context)
3369 		dc_release_state(dm_state->context);
3370 
3371 	kfree(dm_state);
3372 }
3373 
3374 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3375 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3376 	.atomic_destroy_state = dm_atomic_destroy_state,
3377 };
3378 
3379 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3380 {
3381 	struct dm_atomic_state *state;
3382 	int r;
3383 
3384 	adev->mode_info.mode_config_initialized = true;
3385 
3386 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3387 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3388 
3389 	adev_to_drm(adev)->mode_config.max_width = 16384;
3390 	adev_to_drm(adev)->mode_config.max_height = 16384;
3391 
3392 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3393 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3394 	/* indicates support for immediate flip */
3395 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3396 
3397 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3398 
3399 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3400 	if (!state)
3401 		return -ENOMEM;
3402 
3403 	state->context = dc_create_state(adev->dm.dc);
3404 	if (!state->context) {
3405 		kfree(state);
3406 		return -ENOMEM;
3407 	}
3408 
3409 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3410 
3411 	drm_atomic_private_obj_init(adev_to_drm(adev),
3412 				    &adev->dm.atomic_obj,
3413 				    &state->base,
3414 				    &dm_atomic_state_funcs);
3415 
3416 	r = amdgpu_display_modeset_create_props(adev);
3417 	if (r) {
3418 		dc_release_state(state->context);
3419 		kfree(state);
3420 		return r;
3421 	}
3422 
3423 	r = amdgpu_dm_audio_init(adev);
3424 	if (r) {
3425 		dc_release_state(state->context);
3426 		kfree(state);
3427 		return r;
3428 	}
3429 
3430 	return 0;
3431 }
3432 
3433 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3434 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3435 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3436 
3437 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3438 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3439 
3440 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3441 {
3442 #if defined(CONFIG_ACPI)
3443 	struct amdgpu_dm_backlight_caps caps;
3444 
3445 	memset(&caps, 0, sizeof(caps));
3446 
3447 	if (dm->backlight_caps.caps_valid)
3448 		return;
3449 
3450 	amdgpu_acpi_get_backlight_caps(&caps);
3451 	if (caps.caps_valid) {
3452 		dm->backlight_caps.caps_valid = true;
3453 		if (caps.aux_support)
3454 			return;
3455 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3456 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3457 	} else {
3458 		dm->backlight_caps.min_input_signal =
3459 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3460 		dm->backlight_caps.max_input_signal =
3461 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3462 	}
3463 #else
3464 	if (dm->backlight_caps.aux_support)
3465 		return;
3466 
3467 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3468 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3469 #endif
3470 }
3471 
3472 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3473 				unsigned *min, unsigned *max)
3474 {
3475 	if (!caps)
3476 		return 0;
3477 
3478 	if (caps->aux_support) {
3479 		// Firmware limits are in nits, DC API wants millinits.
3480 		*max = 1000 * caps->aux_max_input_signal;
3481 		*min = 1000 * caps->aux_min_input_signal;
3482 	} else {
3483 		// Firmware limits are 8-bit, PWM control is 16-bit.
3484 		*max = 0x101 * caps->max_input_signal;
3485 		*min = 0x101 * caps->min_input_signal;
3486 	}
3487 	return 1;
3488 }
3489 
3490 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3491 					uint32_t brightness)
3492 {
3493 	unsigned min, max;
3494 
3495 	if (!get_brightness_range(caps, &min, &max))
3496 		return brightness;
3497 
3498 	// Rescale 0..255 to min..max
3499 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3500 				       AMDGPU_MAX_BL_LEVEL);
3501 }
3502 
3503 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3504 				      uint32_t brightness)
3505 {
3506 	unsigned min, max;
3507 
3508 	if (!get_brightness_range(caps, &min, &max))
3509 		return brightness;
3510 
3511 	if (brightness < min)
3512 		return 0;
3513 	// Rescale min..max to 0..255
3514 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3515 				 max - min);
3516 }
3517 
3518 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3519 					 u32 user_brightness)
3520 {
3521 	struct amdgpu_dm_backlight_caps caps;
3522 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3523 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
3524 	bool rc;
3525 	int i;
3526 
3527 	amdgpu_dm_update_backlight_caps(dm);
3528 	caps = dm->backlight_caps;
3529 
3530 	for (i = 0; i < dm->num_of_edps; i++) {
3531 		dm->brightness[i] = user_brightness;
3532 		brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
3533 		link[i] = (struct dc_link *)dm->backlight_link[i];
3534 	}
3535 
3536 	/* Change brightness based on AUX property */
3537 	if (caps.aux_support) {
3538 		for (i = 0; i < dm->num_of_edps; i++) {
3539 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
3540 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3541 			if (!rc) {
3542 				DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3543 				break;
3544 			}
3545 		}
3546 	} else {
3547 		for (i = 0; i < dm->num_of_edps; i++) {
3548 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
3549 			if (!rc) {
3550 				DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
3551 				break;
3552 			}
3553 		}
3554 	}
3555 
3556 	return rc ? 0 : 1;
3557 }
3558 
3559 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3560 {
3561 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3562 
3563 	amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
3564 
3565 	return 0;
3566 }
3567 
3568 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
3569 {
3570 	struct amdgpu_dm_backlight_caps caps;
3571 
3572 	amdgpu_dm_update_backlight_caps(dm);
3573 	caps = dm->backlight_caps;
3574 
3575 	if (caps.aux_support) {
3576 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3577 		u32 avg, peak;
3578 		bool rc;
3579 
3580 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3581 		if (!rc)
3582 			return dm->brightness[0];
3583 		return convert_brightness_to_user(&caps, avg);
3584 	} else {
3585 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3586 
3587 		if (ret == DC_ERROR_UNEXPECTED)
3588 			return dm->brightness[0];
3589 		return convert_brightness_to_user(&caps, ret);
3590 	}
3591 }
3592 
3593 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3594 {
3595 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3596 
3597 	return amdgpu_dm_backlight_get_level(dm);
3598 }
3599 
3600 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3601 	.options = BL_CORE_SUSPENDRESUME,
3602 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3603 	.update_status	= amdgpu_dm_backlight_update_status,
3604 };
3605 
3606 static void
3607 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3608 {
3609 	char bl_name[16];
3610 	struct backlight_properties props = { 0 };
3611 	int i;
3612 
3613 	amdgpu_dm_update_backlight_caps(dm);
3614 	for (i = 0; i < dm->num_of_edps; i++)
3615 		dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
3616 
3617 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3618 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3619 	props.type = BACKLIGHT_RAW;
3620 
3621 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3622 		 adev_to_drm(dm->adev)->primary->index);
3623 
3624 	dm->backlight_dev = backlight_device_register(bl_name,
3625 						      adev_to_drm(dm->adev)->dev,
3626 						      dm,
3627 						      &amdgpu_dm_backlight_ops,
3628 						      &props);
3629 
3630 	if (IS_ERR(dm->backlight_dev))
3631 		DRM_ERROR("DM: Backlight registration failed!\n");
3632 	else
3633 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3634 }
3635 
3636 #endif
3637 
3638 static int initialize_plane(struct amdgpu_display_manager *dm,
3639 			    struct amdgpu_mode_info *mode_info, int plane_id,
3640 			    enum drm_plane_type plane_type,
3641 			    const struct dc_plane_cap *plane_cap)
3642 {
3643 	struct drm_plane *plane;
3644 	unsigned long possible_crtcs;
3645 	int ret = 0;
3646 
3647 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3648 	if (!plane) {
3649 		DRM_ERROR("KMS: Failed to allocate plane\n");
3650 		return -ENOMEM;
3651 	}
3652 	plane->type = plane_type;
3653 
3654 	/*
3655 	 * HACK: IGT tests expect that the primary plane for a CRTC
3656 	 * can only have one possible CRTC. Only expose support for
3657 	 * any CRTC if they're not going to be used as a primary plane
3658 	 * for a CRTC - like overlay or underlay planes.
3659 	 */
3660 	possible_crtcs = 1 << plane_id;
3661 	if (plane_id >= dm->dc->caps.max_streams)
3662 		possible_crtcs = 0xff;
3663 
3664 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3665 
3666 	if (ret) {
3667 		DRM_ERROR("KMS: Failed to initialize plane\n");
3668 		kfree(plane);
3669 		return ret;
3670 	}
3671 
3672 	if (mode_info)
3673 		mode_info->planes[plane_id] = plane;
3674 
3675 	return ret;
3676 }
3677 
3678 
3679 static void register_backlight_device(struct amdgpu_display_manager *dm,
3680 				      struct dc_link *link)
3681 {
3682 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3683 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3684 
3685 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3686 	    link->type != dc_connection_none) {
3687 		/*
3688 		 * Event if registration failed, we should continue with
3689 		 * DM initialization because not having a backlight control
3690 		 * is better then a black screen.
3691 		 */
3692 		if (!dm->backlight_dev)
3693 			amdgpu_dm_register_backlight_device(dm);
3694 
3695 		if (dm->backlight_dev) {
3696 			dm->backlight_link[dm->num_of_edps] = link;
3697 			dm->num_of_edps++;
3698 		}
3699 	}
3700 #endif
3701 }
3702 
3703 
3704 /*
3705  * In this architecture, the association
3706  * connector -> encoder -> crtc
3707  * id not really requried. The crtc and connector will hold the
3708  * display_index as an abstraction to use with DAL component
3709  *
3710  * Returns 0 on success
3711  */
3712 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3713 {
3714 	struct amdgpu_display_manager *dm = &adev->dm;
3715 	int32_t i;
3716 	struct amdgpu_dm_connector *aconnector = NULL;
3717 	struct amdgpu_encoder *aencoder = NULL;
3718 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3719 	uint32_t link_cnt;
3720 	int32_t primary_planes;
3721 	enum dc_connection_type new_connection_type = dc_connection_none;
3722 	const struct dc_plane_cap *plane;
3723 
3724 	dm->display_indexes_num = dm->dc->caps.max_streams;
3725 	/* Update the actual used number of crtc */
3726 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3727 
3728 	link_cnt = dm->dc->caps.max_links;
3729 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3730 		DRM_ERROR("DM: Failed to initialize mode config\n");
3731 		return -EINVAL;
3732 	}
3733 
3734 	/* There is one primary plane per CRTC */
3735 	primary_planes = dm->dc->caps.max_streams;
3736 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3737 
3738 	/*
3739 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3740 	 * Order is reversed to match iteration order in atomic check.
3741 	 */
3742 	for (i = (primary_planes - 1); i >= 0; i--) {
3743 		plane = &dm->dc->caps.planes[i];
3744 
3745 		if (initialize_plane(dm, mode_info, i,
3746 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3747 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3748 			goto fail;
3749 		}
3750 	}
3751 
3752 	/*
3753 	 * Initialize overlay planes, index starting after primary planes.
3754 	 * These planes have a higher DRM index than the primary planes since
3755 	 * they should be considered as having a higher z-order.
3756 	 * Order is reversed to match iteration order in atomic check.
3757 	 *
3758 	 * Only support DCN for now, and only expose one so we don't encourage
3759 	 * userspace to use up all the pipes.
3760 	 */
3761 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3762 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3763 
3764 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3765 			continue;
3766 
3767 		if (!plane->blends_with_above || !plane->blends_with_below)
3768 			continue;
3769 
3770 		if (!plane->pixel_format_support.argb8888)
3771 			continue;
3772 
3773 		if (initialize_plane(dm, NULL, primary_planes + i,
3774 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3775 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3776 			goto fail;
3777 		}
3778 
3779 		/* Only create one overlay plane. */
3780 		break;
3781 	}
3782 
3783 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3784 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3785 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3786 			goto fail;
3787 		}
3788 
3789 #if defined(CONFIG_DRM_AMD_DC_DCN)
3790 	/* Use Outbox interrupt */
3791 	switch (adev->asic_type) {
3792 	case CHIP_SIENNA_CICHLID:
3793 	case CHIP_NAVY_FLOUNDER:
3794 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
3795 	case CHIP_YELLOW_CARP:
3796 #endif
3797 	case CHIP_RENOIR:
3798 		if (register_outbox_irq_handlers(dm->adev)) {
3799 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3800 			goto fail;
3801 		}
3802 		break;
3803 	default:
3804 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
3805 	}
3806 #endif
3807 
3808 	/* loops over all connectors on the board */
3809 	for (i = 0; i < link_cnt; i++) {
3810 		struct dc_link *link = NULL;
3811 
3812 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3813 			DRM_ERROR(
3814 				"KMS: Cannot support more than %d display indexes\n",
3815 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3816 			continue;
3817 		}
3818 
3819 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3820 		if (!aconnector)
3821 			goto fail;
3822 
3823 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3824 		if (!aencoder)
3825 			goto fail;
3826 
3827 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3828 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3829 			goto fail;
3830 		}
3831 
3832 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3833 			DRM_ERROR("KMS: Failed to initialize connector\n");
3834 			goto fail;
3835 		}
3836 
3837 		link = dc_get_link_at_index(dm->dc, i);
3838 
3839 		if (!dc_link_detect_sink(link, &new_connection_type))
3840 			DRM_ERROR("KMS: Failed to detect connector\n");
3841 
3842 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3843 			emulated_link_detect(link);
3844 			amdgpu_dm_update_connector_after_detect(aconnector);
3845 
3846 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3847 			amdgpu_dm_update_connector_after_detect(aconnector);
3848 			register_backlight_device(dm, link);
3849 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3850 				amdgpu_dm_set_psr_caps(link);
3851 		}
3852 
3853 
3854 	}
3855 
3856 	/* Software is initialized. Now we can register interrupt handlers. */
3857 	switch (adev->asic_type) {
3858 #if defined(CONFIG_DRM_AMD_DC_SI)
3859 	case CHIP_TAHITI:
3860 	case CHIP_PITCAIRN:
3861 	case CHIP_VERDE:
3862 	case CHIP_OLAND:
3863 		if (dce60_register_irq_handlers(dm->adev)) {
3864 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3865 			goto fail;
3866 		}
3867 		break;
3868 #endif
3869 	case CHIP_BONAIRE:
3870 	case CHIP_HAWAII:
3871 	case CHIP_KAVERI:
3872 	case CHIP_KABINI:
3873 	case CHIP_MULLINS:
3874 	case CHIP_TONGA:
3875 	case CHIP_FIJI:
3876 	case CHIP_CARRIZO:
3877 	case CHIP_STONEY:
3878 	case CHIP_POLARIS11:
3879 	case CHIP_POLARIS10:
3880 	case CHIP_POLARIS12:
3881 	case CHIP_VEGAM:
3882 	case CHIP_VEGA10:
3883 	case CHIP_VEGA12:
3884 	case CHIP_VEGA20:
3885 		if (dce110_register_irq_handlers(dm->adev)) {
3886 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3887 			goto fail;
3888 		}
3889 		break;
3890 #if defined(CONFIG_DRM_AMD_DC_DCN)
3891 	case CHIP_RAVEN:
3892 	case CHIP_NAVI12:
3893 	case CHIP_NAVI10:
3894 	case CHIP_NAVI14:
3895 	case CHIP_RENOIR:
3896 	case CHIP_SIENNA_CICHLID:
3897 	case CHIP_NAVY_FLOUNDER:
3898 	case CHIP_DIMGREY_CAVEFISH:
3899 	case CHIP_BEIGE_GOBY:
3900 	case CHIP_VANGOGH:
3901 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
3902 	case CHIP_YELLOW_CARP:
3903 #endif
3904 		if (dcn10_register_irq_handlers(dm->adev)) {
3905 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3906 			goto fail;
3907 		}
3908 		break;
3909 #endif
3910 	default:
3911 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3912 		goto fail;
3913 	}
3914 
3915 	return 0;
3916 fail:
3917 	kfree(aencoder);
3918 	kfree(aconnector);
3919 
3920 	return -EINVAL;
3921 }
3922 
3923 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3924 {
3925 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3926 	return;
3927 }
3928 
3929 /******************************************************************************
3930  * amdgpu_display_funcs functions
3931  *****************************************************************************/
3932 
3933 /*
3934  * dm_bandwidth_update - program display watermarks
3935  *
3936  * @adev: amdgpu_device pointer
3937  *
3938  * Calculate and program the display watermarks and line buffer allocation.
3939  */
3940 static void dm_bandwidth_update(struct amdgpu_device *adev)
3941 {
3942 	/* TODO: implement later */
3943 }
3944 
3945 static const struct amdgpu_display_funcs dm_display_funcs = {
3946 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3947 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3948 	.backlight_set_level = NULL, /* never called for DC */
3949 	.backlight_get_level = NULL, /* never called for DC */
3950 	.hpd_sense = NULL,/* called unconditionally */
3951 	.hpd_set_polarity = NULL, /* called unconditionally */
3952 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3953 	.page_flip_get_scanoutpos =
3954 		dm_crtc_get_scanoutpos,/* called unconditionally */
3955 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3956 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3957 };
3958 
3959 #if defined(CONFIG_DEBUG_KERNEL_DC)
3960 
3961 static ssize_t s3_debug_store(struct device *device,
3962 			      struct device_attribute *attr,
3963 			      const char *buf,
3964 			      size_t count)
3965 {
3966 	int ret;
3967 	int s3_state;
3968 	struct drm_device *drm_dev = dev_get_drvdata(device);
3969 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3970 
3971 	ret = kstrtoint(buf, 0, &s3_state);
3972 
3973 	if (ret == 0) {
3974 		if (s3_state) {
3975 			dm_resume(adev);
3976 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3977 		} else
3978 			dm_suspend(adev);
3979 	}
3980 
3981 	return ret == 0 ? count : 0;
3982 }
3983 
3984 DEVICE_ATTR_WO(s3_debug);
3985 
3986 #endif
3987 
3988 static int dm_early_init(void *handle)
3989 {
3990 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3991 
3992 	switch (adev->asic_type) {
3993 #if defined(CONFIG_DRM_AMD_DC_SI)
3994 	case CHIP_TAHITI:
3995 	case CHIP_PITCAIRN:
3996 	case CHIP_VERDE:
3997 		adev->mode_info.num_crtc = 6;
3998 		adev->mode_info.num_hpd = 6;
3999 		adev->mode_info.num_dig = 6;
4000 		break;
4001 	case CHIP_OLAND:
4002 		adev->mode_info.num_crtc = 2;
4003 		adev->mode_info.num_hpd = 2;
4004 		adev->mode_info.num_dig = 2;
4005 		break;
4006 #endif
4007 	case CHIP_BONAIRE:
4008 	case CHIP_HAWAII:
4009 		adev->mode_info.num_crtc = 6;
4010 		adev->mode_info.num_hpd = 6;
4011 		adev->mode_info.num_dig = 6;
4012 		break;
4013 	case CHIP_KAVERI:
4014 		adev->mode_info.num_crtc = 4;
4015 		adev->mode_info.num_hpd = 6;
4016 		adev->mode_info.num_dig = 7;
4017 		break;
4018 	case CHIP_KABINI:
4019 	case CHIP_MULLINS:
4020 		adev->mode_info.num_crtc = 2;
4021 		adev->mode_info.num_hpd = 6;
4022 		adev->mode_info.num_dig = 6;
4023 		break;
4024 	case CHIP_FIJI:
4025 	case CHIP_TONGA:
4026 		adev->mode_info.num_crtc = 6;
4027 		adev->mode_info.num_hpd = 6;
4028 		adev->mode_info.num_dig = 7;
4029 		break;
4030 	case CHIP_CARRIZO:
4031 		adev->mode_info.num_crtc = 3;
4032 		adev->mode_info.num_hpd = 6;
4033 		adev->mode_info.num_dig = 9;
4034 		break;
4035 	case CHIP_STONEY:
4036 		adev->mode_info.num_crtc = 2;
4037 		adev->mode_info.num_hpd = 6;
4038 		adev->mode_info.num_dig = 9;
4039 		break;
4040 	case CHIP_POLARIS11:
4041 	case CHIP_POLARIS12:
4042 		adev->mode_info.num_crtc = 5;
4043 		adev->mode_info.num_hpd = 5;
4044 		adev->mode_info.num_dig = 5;
4045 		break;
4046 	case CHIP_POLARIS10:
4047 	case CHIP_VEGAM:
4048 		adev->mode_info.num_crtc = 6;
4049 		adev->mode_info.num_hpd = 6;
4050 		adev->mode_info.num_dig = 6;
4051 		break;
4052 	case CHIP_VEGA10:
4053 	case CHIP_VEGA12:
4054 	case CHIP_VEGA20:
4055 		adev->mode_info.num_crtc = 6;
4056 		adev->mode_info.num_hpd = 6;
4057 		adev->mode_info.num_dig = 6;
4058 		break;
4059 #if defined(CONFIG_DRM_AMD_DC_DCN)
4060 	case CHIP_RAVEN:
4061 	case CHIP_RENOIR:
4062 	case CHIP_VANGOGH:
4063 		adev->mode_info.num_crtc = 4;
4064 		adev->mode_info.num_hpd = 4;
4065 		adev->mode_info.num_dig = 4;
4066 		break;
4067 	case CHIP_NAVI10:
4068 	case CHIP_NAVI12:
4069 	case CHIP_SIENNA_CICHLID:
4070 	case CHIP_NAVY_FLOUNDER:
4071 		adev->mode_info.num_crtc = 6;
4072 		adev->mode_info.num_hpd = 6;
4073 		adev->mode_info.num_dig = 6;
4074 		break;
4075 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
4076 	case CHIP_YELLOW_CARP:
4077 		adev->mode_info.num_crtc = 4;
4078 		adev->mode_info.num_hpd = 4;
4079 		adev->mode_info.num_dig = 4;
4080 		break;
4081 #endif
4082 	case CHIP_NAVI14:
4083 	case CHIP_DIMGREY_CAVEFISH:
4084 		adev->mode_info.num_crtc = 5;
4085 		adev->mode_info.num_hpd = 5;
4086 		adev->mode_info.num_dig = 5;
4087 		break;
4088 	case CHIP_BEIGE_GOBY:
4089 		adev->mode_info.num_crtc = 2;
4090 		adev->mode_info.num_hpd = 2;
4091 		adev->mode_info.num_dig = 2;
4092 		break;
4093 #endif
4094 	default:
4095 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4096 		return -EINVAL;
4097 	}
4098 
4099 	amdgpu_dm_set_irq_funcs(adev);
4100 
4101 	if (adev->mode_info.funcs == NULL)
4102 		adev->mode_info.funcs = &dm_display_funcs;
4103 
4104 	/*
4105 	 * Note: Do NOT change adev->audio_endpt_rreg and
4106 	 * adev->audio_endpt_wreg because they are initialised in
4107 	 * amdgpu_device_init()
4108 	 */
4109 #if defined(CONFIG_DEBUG_KERNEL_DC)
4110 	device_create_file(
4111 		adev_to_drm(adev)->dev,
4112 		&dev_attr_s3_debug);
4113 #endif
4114 
4115 	return 0;
4116 }
4117 
4118 static bool modeset_required(struct drm_crtc_state *crtc_state,
4119 			     struct dc_stream_state *new_stream,
4120 			     struct dc_stream_state *old_stream)
4121 {
4122 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4123 }
4124 
4125 static bool modereset_required(struct drm_crtc_state *crtc_state)
4126 {
4127 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4128 }
4129 
4130 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4131 {
4132 	drm_encoder_cleanup(encoder);
4133 	kfree(encoder);
4134 }
4135 
4136 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4137 	.destroy = amdgpu_dm_encoder_destroy,
4138 };
4139 
4140 
4141 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4142 					 struct drm_framebuffer *fb,
4143 					 int *min_downscale, int *max_upscale)
4144 {
4145 	struct amdgpu_device *adev = drm_to_adev(dev);
4146 	struct dc *dc = adev->dm.dc;
4147 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4148 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4149 
4150 	switch (fb->format->format) {
4151 	case DRM_FORMAT_P010:
4152 	case DRM_FORMAT_NV12:
4153 	case DRM_FORMAT_NV21:
4154 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4155 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4156 		break;
4157 
4158 	case DRM_FORMAT_XRGB16161616F:
4159 	case DRM_FORMAT_ARGB16161616F:
4160 	case DRM_FORMAT_XBGR16161616F:
4161 	case DRM_FORMAT_ABGR16161616F:
4162 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4163 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4164 		break;
4165 
4166 	default:
4167 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4168 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4169 		break;
4170 	}
4171 
4172 	/*
4173 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4174 	 * scaling factor of 1.0 == 1000 units.
4175 	 */
4176 	if (*max_upscale == 1)
4177 		*max_upscale = 1000;
4178 
4179 	if (*min_downscale == 1)
4180 		*min_downscale = 1000;
4181 }
4182 
4183 
4184 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4185 				struct dc_scaling_info *scaling_info)
4186 {
4187 	int scale_w, scale_h, min_downscale, max_upscale;
4188 
4189 	memset(scaling_info, 0, sizeof(*scaling_info));
4190 
4191 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4192 	scaling_info->src_rect.x = state->src_x >> 16;
4193 	scaling_info->src_rect.y = state->src_y >> 16;
4194 
4195 	/*
4196 	 * For reasons we don't (yet) fully understand a non-zero
4197 	 * src_y coordinate into an NV12 buffer can cause a
4198 	 * system hang. To avoid hangs (and maybe be overly cautious)
4199 	 * let's reject both non-zero src_x and src_y.
4200 	 *
4201 	 * We currently know of only one use-case to reproduce a
4202 	 * scenario with non-zero src_x and src_y for NV12, which
4203 	 * is to gesture the YouTube Android app into full screen
4204 	 * on ChromeOS.
4205 	 */
4206 	if (state->fb &&
4207 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4208 	    (scaling_info->src_rect.x != 0 ||
4209 	     scaling_info->src_rect.y != 0))
4210 		return -EINVAL;
4211 
4212 	scaling_info->src_rect.width = state->src_w >> 16;
4213 	if (scaling_info->src_rect.width == 0)
4214 		return -EINVAL;
4215 
4216 	scaling_info->src_rect.height = state->src_h >> 16;
4217 	if (scaling_info->src_rect.height == 0)
4218 		return -EINVAL;
4219 
4220 	scaling_info->dst_rect.x = state->crtc_x;
4221 	scaling_info->dst_rect.y = state->crtc_y;
4222 
4223 	if (state->crtc_w == 0)
4224 		return -EINVAL;
4225 
4226 	scaling_info->dst_rect.width = state->crtc_w;
4227 
4228 	if (state->crtc_h == 0)
4229 		return -EINVAL;
4230 
4231 	scaling_info->dst_rect.height = state->crtc_h;
4232 
4233 	/* DRM doesn't specify clipping on destination output. */
4234 	scaling_info->clip_rect = scaling_info->dst_rect;
4235 
4236 	/* Validate scaling per-format with DC plane caps */
4237 	if (state->plane && state->plane->dev && state->fb) {
4238 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4239 					     &min_downscale, &max_upscale);
4240 	} else {
4241 		min_downscale = 250;
4242 		max_upscale = 16000;
4243 	}
4244 
4245 	scale_w = scaling_info->dst_rect.width * 1000 /
4246 		  scaling_info->src_rect.width;
4247 
4248 	if (scale_w < min_downscale || scale_w > max_upscale)
4249 		return -EINVAL;
4250 
4251 	scale_h = scaling_info->dst_rect.height * 1000 /
4252 		  scaling_info->src_rect.height;
4253 
4254 	if (scale_h < min_downscale || scale_h > max_upscale)
4255 		return -EINVAL;
4256 
4257 	/*
4258 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4259 	 * assume reasonable defaults based on the format.
4260 	 */
4261 
4262 	return 0;
4263 }
4264 
4265 static void
4266 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4267 				 uint64_t tiling_flags)
4268 {
4269 	/* Fill GFX8 params */
4270 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4271 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4272 
4273 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4274 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4275 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4276 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4277 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4278 
4279 		/* XXX fix me for VI */
4280 		tiling_info->gfx8.num_banks = num_banks;
4281 		tiling_info->gfx8.array_mode =
4282 				DC_ARRAY_2D_TILED_THIN1;
4283 		tiling_info->gfx8.tile_split = tile_split;
4284 		tiling_info->gfx8.bank_width = bankw;
4285 		tiling_info->gfx8.bank_height = bankh;
4286 		tiling_info->gfx8.tile_aspect = mtaspect;
4287 		tiling_info->gfx8.tile_mode =
4288 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4289 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4290 			== DC_ARRAY_1D_TILED_THIN1) {
4291 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4292 	}
4293 
4294 	tiling_info->gfx8.pipe_config =
4295 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4296 }
4297 
4298 static void
4299 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4300 				  union dc_tiling_info *tiling_info)
4301 {
4302 	tiling_info->gfx9.num_pipes =
4303 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4304 	tiling_info->gfx9.num_banks =
4305 		adev->gfx.config.gb_addr_config_fields.num_banks;
4306 	tiling_info->gfx9.pipe_interleave =
4307 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4308 	tiling_info->gfx9.num_shader_engines =
4309 		adev->gfx.config.gb_addr_config_fields.num_se;
4310 	tiling_info->gfx9.max_compressed_frags =
4311 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4312 	tiling_info->gfx9.num_rb_per_se =
4313 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4314 	tiling_info->gfx9.shaderEnable = 1;
4315 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4316 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4317 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4318 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4319 #if defined(CONFIG_DRM_AMD_DC_DCN3_1)
4320 	    adev->asic_type == CHIP_YELLOW_CARP ||
4321 #endif
4322 	    adev->asic_type == CHIP_VANGOGH)
4323 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4324 }
4325 
4326 static int
4327 validate_dcc(struct amdgpu_device *adev,
4328 	     const enum surface_pixel_format format,
4329 	     const enum dc_rotation_angle rotation,
4330 	     const union dc_tiling_info *tiling_info,
4331 	     const struct dc_plane_dcc_param *dcc,
4332 	     const struct dc_plane_address *address,
4333 	     const struct plane_size *plane_size)
4334 {
4335 	struct dc *dc = adev->dm.dc;
4336 	struct dc_dcc_surface_param input;
4337 	struct dc_surface_dcc_cap output;
4338 
4339 	memset(&input, 0, sizeof(input));
4340 	memset(&output, 0, sizeof(output));
4341 
4342 	if (!dcc->enable)
4343 		return 0;
4344 
4345 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4346 	    !dc->cap_funcs.get_dcc_compression_cap)
4347 		return -EINVAL;
4348 
4349 	input.format = format;
4350 	input.surface_size.width = plane_size->surface_size.width;
4351 	input.surface_size.height = plane_size->surface_size.height;
4352 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4353 
4354 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4355 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4356 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4357 		input.scan = SCAN_DIRECTION_VERTICAL;
4358 
4359 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4360 		return -EINVAL;
4361 
4362 	if (!output.capable)
4363 		return -EINVAL;
4364 
4365 	if (dcc->independent_64b_blks == 0 &&
4366 	    output.grph.rgb.independent_64b_blks != 0)
4367 		return -EINVAL;
4368 
4369 	return 0;
4370 }
4371 
4372 static bool
4373 modifier_has_dcc(uint64_t modifier)
4374 {
4375 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4376 }
4377 
4378 static unsigned
4379 modifier_gfx9_swizzle_mode(uint64_t modifier)
4380 {
4381 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4382 		return 0;
4383 
4384 	return AMD_FMT_MOD_GET(TILE, modifier);
4385 }
4386 
4387 static const struct drm_format_info *
4388 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4389 {
4390 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4391 }
4392 
4393 static void
4394 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4395 				    union dc_tiling_info *tiling_info,
4396 				    uint64_t modifier)
4397 {
4398 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4399 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4400 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4401 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4402 
4403 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4404 
4405 	if (!IS_AMD_FMT_MOD(modifier))
4406 		return;
4407 
4408 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4409 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4410 
4411 	if (adev->family >= AMDGPU_FAMILY_NV) {
4412 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4413 	} else {
4414 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4415 
4416 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4417 	}
4418 }
4419 
4420 enum dm_micro_swizzle {
4421 	MICRO_SWIZZLE_Z = 0,
4422 	MICRO_SWIZZLE_S = 1,
4423 	MICRO_SWIZZLE_D = 2,
4424 	MICRO_SWIZZLE_R = 3
4425 };
4426 
4427 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4428 					  uint32_t format,
4429 					  uint64_t modifier)
4430 {
4431 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4432 	const struct drm_format_info *info = drm_format_info(format);
4433 	int i;
4434 
4435 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4436 
4437 	if (!info)
4438 		return false;
4439 
4440 	/*
4441 	 * We always have to allow these modifiers:
4442 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4443 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4444 	 */
4445 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4446 	    modifier == DRM_FORMAT_MOD_INVALID) {
4447 		return true;
4448 	}
4449 
4450 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4451 	for (i = 0; i < plane->modifier_count; i++) {
4452 		if (modifier == plane->modifiers[i])
4453 			break;
4454 	}
4455 	if (i == plane->modifier_count)
4456 		return false;
4457 
4458 	/*
4459 	 * For D swizzle the canonical modifier depends on the bpp, so check
4460 	 * it here.
4461 	 */
4462 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4463 	    adev->family >= AMDGPU_FAMILY_NV) {
4464 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4465 			return false;
4466 	}
4467 
4468 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4469 	    info->cpp[0] < 8)
4470 		return false;
4471 
4472 	if (modifier_has_dcc(modifier)) {
4473 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4474 		if (info->cpp[0] != 4)
4475 			return false;
4476 		/* We support multi-planar formats, but not when combined with
4477 		 * additional DCC metadata planes. */
4478 		if (info->num_planes > 1)
4479 			return false;
4480 	}
4481 
4482 	return true;
4483 }
4484 
4485 static void
4486 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4487 {
4488 	if (!*mods)
4489 		return;
4490 
4491 	if (*cap - *size < 1) {
4492 		uint64_t new_cap = *cap * 2;
4493 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4494 
4495 		if (!new_mods) {
4496 			kfree(*mods);
4497 			*mods = NULL;
4498 			return;
4499 		}
4500 
4501 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4502 		kfree(*mods);
4503 		*mods = new_mods;
4504 		*cap = new_cap;
4505 	}
4506 
4507 	(*mods)[*size] = mod;
4508 	*size += 1;
4509 }
4510 
4511 static void
4512 add_gfx9_modifiers(const struct amdgpu_device *adev,
4513 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4514 {
4515 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4516 	int pipe_xor_bits = min(8, pipes +
4517 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4518 	int bank_xor_bits = min(8 - pipe_xor_bits,
4519 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4520 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4521 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4522 
4523 
4524 	if (adev->family == AMDGPU_FAMILY_RV) {
4525 		/* Raven2 and later */
4526 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4527 
4528 		/*
4529 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4530 		 * doesn't support _D on DCN
4531 		 */
4532 
4533 		if (has_constant_encode) {
4534 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4535 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4536 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4537 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4538 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4539 				    AMD_FMT_MOD_SET(DCC, 1) |
4540 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4541 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4542 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4543 		}
4544 
4545 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4546 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4547 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4548 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4549 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4550 			    AMD_FMT_MOD_SET(DCC, 1) |
4551 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4552 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4553 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4554 
4555 		if (has_constant_encode) {
4556 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4557 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4558 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4559 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4560 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4561 				    AMD_FMT_MOD_SET(DCC, 1) |
4562 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4563 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4564 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4565 
4566 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4567 				    AMD_FMT_MOD_SET(RB, rb) |
4568 				    AMD_FMT_MOD_SET(PIPE, pipes));
4569 		}
4570 
4571 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4572 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4573 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4574 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4575 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4576 			    AMD_FMT_MOD_SET(DCC, 1) |
4577 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4578 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4579 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4580 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4581 			    AMD_FMT_MOD_SET(RB, rb) |
4582 			    AMD_FMT_MOD_SET(PIPE, pipes));
4583 	}
4584 
4585 	/*
4586 	 * Only supported for 64bpp on Raven, will be filtered on format in
4587 	 * dm_plane_format_mod_supported.
4588 	 */
4589 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4590 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4591 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4592 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4593 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4594 
4595 	if (adev->family == AMDGPU_FAMILY_RV) {
4596 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4597 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4598 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4599 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4600 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4601 	}
4602 
4603 	/*
4604 	 * Only supported for 64bpp on Raven, will be filtered on format in
4605 	 * dm_plane_format_mod_supported.
4606 	 */
4607 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4608 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4609 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4610 
4611 	if (adev->family == AMDGPU_FAMILY_RV) {
4612 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4613 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4614 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4615 	}
4616 }
4617 
4618 static void
4619 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4620 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4621 {
4622 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4623 
4624 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4625 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4626 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4627 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4628 		    AMD_FMT_MOD_SET(DCC, 1) |
4629 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4630 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4631 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4632 
4633 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4634 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4635 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4636 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4637 		    AMD_FMT_MOD_SET(DCC, 1) |
4638 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4639 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4640 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4641 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4642 
4643 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4644 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4645 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4646 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4647 
4648 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4649 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4650 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4651 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4652 
4653 
4654 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4655 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4656 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4657 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4658 
4659 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4660 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4661 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4662 }
4663 
4664 static void
4665 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4666 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4667 {
4668 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4669 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4670 
4671 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4672 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4673 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4674 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4675 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4676 		    AMD_FMT_MOD_SET(DCC, 1) |
4677 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4678 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4679 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4680 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4681 
4682 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4683 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4684 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4685 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4686 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4687 		    AMD_FMT_MOD_SET(DCC, 1) |
4688 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4689 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4690 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4691 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4692 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4693 
4694 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4695 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4696 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4697 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4698 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4699 
4700 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4701 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4702 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4703 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4704 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4705 
4706 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4707 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4708 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4709 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4710 
4711 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4712 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4713 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4714 }
4715 
4716 static int
4717 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4718 {
4719 	uint64_t size = 0, capacity = 128;
4720 	*mods = NULL;
4721 
4722 	/* We have not hooked up any pre-GFX9 modifiers. */
4723 	if (adev->family < AMDGPU_FAMILY_AI)
4724 		return 0;
4725 
4726 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4727 
4728 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4729 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4730 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4731 		return *mods ? 0 : -ENOMEM;
4732 	}
4733 
4734 	switch (adev->family) {
4735 	case AMDGPU_FAMILY_AI:
4736 	case AMDGPU_FAMILY_RV:
4737 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4738 		break;
4739 	case AMDGPU_FAMILY_NV:
4740 	case AMDGPU_FAMILY_VGH:
4741 	case AMDGPU_FAMILY_YC:
4742 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4743 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4744 		else
4745 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4746 		break;
4747 	}
4748 
4749 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4750 
4751 	/* INVALID marks the end of the list. */
4752 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4753 
4754 	if (!*mods)
4755 		return -ENOMEM;
4756 
4757 	return 0;
4758 }
4759 
4760 static int
4761 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4762 					  const struct amdgpu_framebuffer *afb,
4763 					  const enum surface_pixel_format format,
4764 					  const enum dc_rotation_angle rotation,
4765 					  const struct plane_size *plane_size,
4766 					  union dc_tiling_info *tiling_info,
4767 					  struct dc_plane_dcc_param *dcc,
4768 					  struct dc_plane_address *address,
4769 					  const bool force_disable_dcc)
4770 {
4771 	const uint64_t modifier = afb->base.modifier;
4772 	int ret;
4773 
4774 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4775 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4776 
4777 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4778 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4779 
4780 		dcc->enable = 1;
4781 		dcc->meta_pitch = afb->base.pitches[1];
4782 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4783 
4784 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4785 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4786 	}
4787 
4788 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4789 	if (ret)
4790 		return ret;
4791 
4792 	return 0;
4793 }
4794 
4795 static int
4796 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4797 			     const struct amdgpu_framebuffer *afb,
4798 			     const enum surface_pixel_format format,
4799 			     const enum dc_rotation_angle rotation,
4800 			     const uint64_t tiling_flags,
4801 			     union dc_tiling_info *tiling_info,
4802 			     struct plane_size *plane_size,
4803 			     struct dc_plane_dcc_param *dcc,
4804 			     struct dc_plane_address *address,
4805 			     bool tmz_surface,
4806 			     bool force_disable_dcc)
4807 {
4808 	const struct drm_framebuffer *fb = &afb->base;
4809 	int ret;
4810 
4811 	memset(tiling_info, 0, sizeof(*tiling_info));
4812 	memset(plane_size, 0, sizeof(*plane_size));
4813 	memset(dcc, 0, sizeof(*dcc));
4814 	memset(address, 0, sizeof(*address));
4815 
4816 	address->tmz_surface = tmz_surface;
4817 
4818 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4819 		uint64_t addr = afb->address + fb->offsets[0];
4820 
4821 		plane_size->surface_size.x = 0;
4822 		plane_size->surface_size.y = 0;
4823 		plane_size->surface_size.width = fb->width;
4824 		plane_size->surface_size.height = fb->height;
4825 		plane_size->surface_pitch =
4826 			fb->pitches[0] / fb->format->cpp[0];
4827 
4828 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4829 		address->grph.addr.low_part = lower_32_bits(addr);
4830 		address->grph.addr.high_part = upper_32_bits(addr);
4831 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4832 		uint64_t luma_addr = afb->address + fb->offsets[0];
4833 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4834 
4835 		plane_size->surface_size.x = 0;
4836 		plane_size->surface_size.y = 0;
4837 		plane_size->surface_size.width = fb->width;
4838 		plane_size->surface_size.height = fb->height;
4839 		plane_size->surface_pitch =
4840 			fb->pitches[0] / fb->format->cpp[0];
4841 
4842 		plane_size->chroma_size.x = 0;
4843 		plane_size->chroma_size.y = 0;
4844 		/* TODO: set these based on surface format */
4845 		plane_size->chroma_size.width = fb->width / 2;
4846 		plane_size->chroma_size.height = fb->height / 2;
4847 
4848 		plane_size->chroma_pitch =
4849 			fb->pitches[1] / fb->format->cpp[1];
4850 
4851 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4852 		address->video_progressive.luma_addr.low_part =
4853 			lower_32_bits(luma_addr);
4854 		address->video_progressive.luma_addr.high_part =
4855 			upper_32_bits(luma_addr);
4856 		address->video_progressive.chroma_addr.low_part =
4857 			lower_32_bits(chroma_addr);
4858 		address->video_progressive.chroma_addr.high_part =
4859 			upper_32_bits(chroma_addr);
4860 	}
4861 
4862 	if (adev->family >= AMDGPU_FAMILY_AI) {
4863 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4864 								rotation, plane_size,
4865 								tiling_info, dcc,
4866 								address,
4867 								force_disable_dcc);
4868 		if (ret)
4869 			return ret;
4870 	} else {
4871 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4872 	}
4873 
4874 	return 0;
4875 }
4876 
4877 static void
4878 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4879 			       bool *per_pixel_alpha, bool *global_alpha,
4880 			       int *global_alpha_value)
4881 {
4882 	*per_pixel_alpha = false;
4883 	*global_alpha = false;
4884 	*global_alpha_value = 0xff;
4885 
4886 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4887 		return;
4888 
4889 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4890 		static const uint32_t alpha_formats[] = {
4891 			DRM_FORMAT_ARGB8888,
4892 			DRM_FORMAT_RGBA8888,
4893 			DRM_FORMAT_ABGR8888,
4894 		};
4895 		uint32_t format = plane_state->fb->format->format;
4896 		unsigned int i;
4897 
4898 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4899 			if (format == alpha_formats[i]) {
4900 				*per_pixel_alpha = true;
4901 				break;
4902 			}
4903 		}
4904 	}
4905 
4906 	if (plane_state->alpha < 0xffff) {
4907 		*global_alpha = true;
4908 		*global_alpha_value = plane_state->alpha >> 8;
4909 	}
4910 }
4911 
4912 static int
4913 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4914 			    const enum surface_pixel_format format,
4915 			    enum dc_color_space *color_space)
4916 {
4917 	bool full_range;
4918 
4919 	*color_space = COLOR_SPACE_SRGB;
4920 
4921 	/* DRM color properties only affect non-RGB formats. */
4922 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4923 		return 0;
4924 
4925 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4926 
4927 	switch (plane_state->color_encoding) {
4928 	case DRM_COLOR_YCBCR_BT601:
4929 		if (full_range)
4930 			*color_space = COLOR_SPACE_YCBCR601;
4931 		else
4932 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4933 		break;
4934 
4935 	case DRM_COLOR_YCBCR_BT709:
4936 		if (full_range)
4937 			*color_space = COLOR_SPACE_YCBCR709;
4938 		else
4939 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4940 		break;
4941 
4942 	case DRM_COLOR_YCBCR_BT2020:
4943 		if (full_range)
4944 			*color_space = COLOR_SPACE_2020_YCBCR;
4945 		else
4946 			return -EINVAL;
4947 		break;
4948 
4949 	default:
4950 		return -EINVAL;
4951 	}
4952 
4953 	return 0;
4954 }
4955 
4956 static int
4957 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4958 			    const struct drm_plane_state *plane_state,
4959 			    const uint64_t tiling_flags,
4960 			    struct dc_plane_info *plane_info,
4961 			    struct dc_plane_address *address,
4962 			    bool tmz_surface,
4963 			    bool force_disable_dcc)
4964 {
4965 	const struct drm_framebuffer *fb = plane_state->fb;
4966 	const struct amdgpu_framebuffer *afb =
4967 		to_amdgpu_framebuffer(plane_state->fb);
4968 	int ret;
4969 
4970 	memset(plane_info, 0, sizeof(*plane_info));
4971 
4972 	switch (fb->format->format) {
4973 	case DRM_FORMAT_C8:
4974 		plane_info->format =
4975 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4976 		break;
4977 	case DRM_FORMAT_RGB565:
4978 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4979 		break;
4980 	case DRM_FORMAT_XRGB8888:
4981 	case DRM_FORMAT_ARGB8888:
4982 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4983 		break;
4984 	case DRM_FORMAT_XRGB2101010:
4985 	case DRM_FORMAT_ARGB2101010:
4986 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4987 		break;
4988 	case DRM_FORMAT_XBGR2101010:
4989 	case DRM_FORMAT_ABGR2101010:
4990 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4991 		break;
4992 	case DRM_FORMAT_XBGR8888:
4993 	case DRM_FORMAT_ABGR8888:
4994 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4995 		break;
4996 	case DRM_FORMAT_NV21:
4997 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4998 		break;
4999 	case DRM_FORMAT_NV12:
5000 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5001 		break;
5002 	case DRM_FORMAT_P010:
5003 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5004 		break;
5005 	case DRM_FORMAT_XRGB16161616F:
5006 	case DRM_FORMAT_ARGB16161616F:
5007 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5008 		break;
5009 	case DRM_FORMAT_XBGR16161616F:
5010 	case DRM_FORMAT_ABGR16161616F:
5011 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5012 		break;
5013 	case DRM_FORMAT_XRGB16161616:
5014 	case DRM_FORMAT_ARGB16161616:
5015 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5016 		break;
5017 	case DRM_FORMAT_XBGR16161616:
5018 	case DRM_FORMAT_ABGR16161616:
5019 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5020 		break;
5021 	default:
5022 		DRM_ERROR(
5023 			"Unsupported screen format %p4cc\n",
5024 			&fb->format->format);
5025 		return -EINVAL;
5026 	}
5027 
5028 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5029 	case DRM_MODE_ROTATE_0:
5030 		plane_info->rotation = ROTATION_ANGLE_0;
5031 		break;
5032 	case DRM_MODE_ROTATE_90:
5033 		plane_info->rotation = ROTATION_ANGLE_90;
5034 		break;
5035 	case DRM_MODE_ROTATE_180:
5036 		plane_info->rotation = ROTATION_ANGLE_180;
5037 		break;
5038 	case DRM_MODE_ROTATE_270:
5039 		plane_info->rotation = ROTATION_ANGLE_270;
5040 		break;
5041 	default:
5042 		plane_info->rotation = ROTATION_ANGLE_0;
5043 		break;
5044 	}
5045 
5046 	plane_info->visible = true;
5047 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5048 
5049 	plane_info->layer_index = 0;
5050 
5051 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5052 					  &plane_info->color_space);
5053 	if (ret)
5054 		return ret;
5055 
5056 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5057 					   plane_info->rotation, tiling_flags,
5058 					   &plane_info->tiling_info,
5059 					   &plane_info->plane_size,
5060 					   &plane_info->dcc, address, tmz_surface,
5061 					   force_disable_dcc);
5062 	if (ret)
5063 		return ret;
5064 
5065 	fill_blending_from_plane_state(
5066 		plane_state, &plane_info->per_pixel_alpha,
5067 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5068 
5069 	return 0;
5070 }
5071 
5072 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5073 				    struct dc_plane_state *dc_plane_state,
5074 				    struct drm_plane_state *plane_state,
5075 				    struct drm_crtc_state *crtc_state)
5076 {
5077 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5078 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5079 	struct dc_scaling_info scaling_info;
5080 	struct dc_plane_info plane_info;
5081 	int ret;
5082 	bool force_disable_dcc = false;
5083 
5084 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5085 	if (ret)
5086 		return ret;
5087 
5088 	dc_plane_state->src_rect = scaling_info.src_rect;
5089 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5090 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5091 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5092 
5093 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5094 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5095 					  afb->tiling_flags,
5096 					  &plane_info,
5097 					  &dc_plane_state->address,
5098 					  afb->tmz_surface,
5099 					  force_disable_dcc);
5100 	if (ret)
5101 		return ret;
5102 
5103 	dc_plane_state->format = plane_info.format;
5104 	dc_plane_state->color_space = plane_info.color_space;
5105 	dc_plane_state->format = plane_info.format;
5106 	dc_plane_state->plane_size = plane_info.plane_size;
5107 	dc_plane_state->rotation = plane_info.rotation;
5108 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5109 	dc_plane_state->stereo_format = plane_info.stereo_format;
5110 	dc_plane_state->tiling_info = plane_info.tiling_info;
5111 	dc_plane_state->visible = plane_info.visible;
5112 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5113 	dc_plane_state->global_alpha = plane_info.global_alpha;
5114 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5115 	dc_plane_state->dcc = plane_info.dcc;
5116 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5117 	dc_plane_state->flip_int_enabled = true;
5118 
5119 	/*
5120 	 * Always set input transfer function, since plane state is refreshed
5121 	 * every time.
5122 	 */
5123 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5124 	if (ret)
5125 		return ret;
5126 
5127 	return 0;
5128 }
5129 
5130 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5131 					   const struct dm_connector_state *dm_state,
5132 					   struct dc_stream_state *stream)
5133 {
5134 	enum amdgpu_rmx_type rmx_type;
5135 
5136 	struct rect src = { 0 }; /* viewport in composition space*/
5137 	struct rect dst = { 0 }; /* stream addressable area */
5138 
5139 	/* no mode. nothing to be done */
5140 	if (!mode)
5141 		return;
5142 
5143 	/* Full screen scaling by default */
5144 	src.width = mode->hdisplay;
5145 	src.height = mode->vdisplay;
5146 	dst.width = stream->timing.h_addressable;
5147 	dst.height = stream->timing.v_addressable;
5148 
5149 	if (dm_state) {
5150 		rmx_type = dm_state->scaling;
5151 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5152 			if (src.width * dst.height <
5153 					src.height * dst.width) {
5154 				/* height needs less upscaling/more downscaling */
5155 				dst.width = src.width *
5156 						dst.height / src.height;
5157 			} else {
5158 				/* width needs less upscaling/more downscaling */
5159 				dst.height = src.height *
5160 						dst.width / src.width;
5161 			}
5162 		} else if (rmx_type == RMX_CENTER) {
5163 			dst = src;
5164 		}
5165 
5166 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5167 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5168 
5169 		if (dm_state->underscan_enable) {
5170 			dst.x += dm_state->underscan_hborder / 2;
5171 			dst.y += dm_state->underscan_vborder / 2;
5172 			dst.width -= dm_state->underscan_hborder;
5173 			dst.height -= dm_state->underscan_vborder;
5174 		}
5175 	}
5176 
5177 	stream->src = src;
5178 	stream->dst = dst;
5179 
5180 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5181 		      dst.x, dst.y, dst.width, dst.height);
5182 
5183 }
5184 
5185 static enum dc_color_depth
5186 convert_color_depth_from_display_info(const struct drm_connector *connector,
5187 				      bool is_y420, int requested_bpc)
5188 {
5189 	uint8_t bpc;
5190 
5191 	if (is_y420) {
5192 		bpc = 8;
5193 
5194 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5195 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5196 			bpc = 16;
5197 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5198 			bpc = 12;
5199 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5200 			bpc = 10;
5201 	} else {
5202 		bpc = (uint8_t)connector->display_info.bpc;
5203 		/* Assume 8 bpc by default if no bpc is specified. */
5204 		bpc = bpc ? bpc : 8;
5205 	}
5206 
5207 	if (requested_bpc > 0) {
5208 		/*
5209 		 * Cap display bpc based on the user requested value.
5210 		 *
5211 		 * The value for state->max_bpc may not correctly updated
5212 		 * depending on when the connector gets added to the state
5213 		 * or if this was called outside of atomic check, so it
5214 		 * can't be used directly.
5215 		 */
5216 		bpc = min_t(u8, bpc, requested_bpc);
5217 
5218 		/* Round down to the nearest even number. */
5219 		bpc = bpc - (bpc & 1);
5220 	}
5221 
5222 	switch (bpc) {
5223 	case 0:
5224 		/*
5225 		 * Temporary Work around, DRM doesn't parse color depth for
5226 		 * EDID revision before 1.4
5227 		 * TODO: Fix edid parsing
5228 		 */
5229 		return COLOR_DEPTH_888;
5230 	case 6:
5231 		return COLOR_DEPTH_666;
5232 	case 8:
5233 		return COLOR_DEPTH_888;
5234 	case 10:
5235 		return COLOR_DEPTH_101010;
5236 	case 12:
5237 		return COLOR_DEPTH_121212;
5238 	case 14:
5239 		return COLOR_DEPTH_141414;
5240 	case 16:
5241 		return COLOR_DEPTH_161616;
5242 	default:
5243 		return COLOR_DEPTH_UNDEFINED;
5244 	}
5245 }
5246 
5247 static enum dc_aspect_ratio
5248 get_aspect_ratio(const struct drm_display_mode *mode_in)
5249 {
5250 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5251 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5252 }
5253 
5254 static enum dc_color_space
5255 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5256 {
5257 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5258 
5259 	switch (dc_crtc_timing->pixel_encoding)	{
5260 	case PIXEL_ENCODING_YCBCR422:
5261 	case PIXEL_ENCODING_YCBCR444:
5262 	case PIXEL_ENCODING_YCBCR420:
5263 	{
5264 		/*
5265 		 * 27030khz is the separation point between HDTV and SDTV
5266 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5267 		 * respectively
5268 		 */
5269 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5270 			if (dc_crtc_timing->flags.Y_ONLY)
5271 				color_space =
5272 					COLOR_SPACE_YCBCR709_LIMITED;
5273 			else
5274 				color_space = COLOR_SPACE_YCBCR709;
5275 		} else {
5276 			if (dc_crtc_timing->flags.Y_ONLY)
5277 				color_space =
5278 					COLOR_SPACE_YCBCR601_LIMITED;
5279 			else
5280 				color_space = COLOR_SPACE_YCBCR601;
5281 		}
5282 
5283 	}
5284 	break;
5285 	case PIXEL_ENCODING_RGB:
5286 		color_space = COLOR_SPACE_SRGB;
5287 		break;
5288 
5289 	default:
5290 		WARN_ON(1);
5291 		break;
5292 	}
5293 
5294 	return color_space;
5295 }
5296 
5297 static bool adjust_colour_depth_from_display_info(
5298 	struct dc_crtc_timing *timing_out,
5299 	const struct drm_display_info *info)
5300 {
5301 	enum dc_color_depth depth = timing_out->display_color_depth;
5302 	int normalized_clk;
5303 	do {
5304 		normalized_clk = timing_out->pix_clk_100hz / 10;
5305 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5306 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5307 			normalized_clk /= 2;
5308 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5309 		switch (depth) {
5310 		case COLOR_DEPTH_888:
5311 			break;
5312 		case COLOR_DEPTH_101010:
5313 			normalized_clk = (normalized_clk * 30) / 24;
5314 			break;
5315 		case COLOR_DEPTH_121212:
5316 			normalized_clk = (normalized_clk * 36) / 24;
5317 			break;
5318 		case COLOR_DEPTH_161616:
5319 			normalized_clk = (normalized_clk * 48) / 24;
5320 			break;
5321 		default:
5322 			/* The above depths are the only ones valid for HDMI. */
5323 			return false;
5324 		}
5325 		if (normalized_clk <= info->max_tmds_clock) {
5326 			timing_out->display_color_depth = depth;
5327 			return true;
5328 		}
5329 	} while (--depth > COLOR_DEPTH_666);
5330 	return false;
5331 }
5332 
5333 static void fill_stream_properties_from_drm_display_mode(
5334 	struct dc_stream_state *stream,
5335 	const struct drm_display_mode *mode_in,
5336 	const struct drm_connector *connector,
5337 	const struct drm_connector_state *connector_state,
5338 	const struct dc_stream_state *old_stream,
5339 	int requested_bpc)
5340 {
5341 	struct dc_crtc_timing *timing_out = &stream->timing;
5342 	const struct drm_display_info *info = &connector->display_info;
5343 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5344 	struct hdmi_vendor_infoframe hv_frame;
5345 	struct hdmi_avi_infoframe avi_frame;
5346 
5347 	memset(&hv_frame, 0, sizeof(hv_frame));
5348 	memset(&avi_frame, 0, sizeof(avi_frame));
5349 
5350 	timing_out->h_border_left = 0;
5351 	timing_out->h_border_right = 0;
5352 	timing_out->v_border_top = 0;
5353 	timing_out->v_border_bottom = 0;
5354 	/* TODO: un-hardcode */
5355 	if (drm_mode_is_420_only(info, mode_in)
5356 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5357 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5358 	else if (drm_mode_is_420_also(info, mode_in)
5359 			&& aconnector->force_yuv420_output)
5360 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5361 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5362 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5363 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5364 	else
5365 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5366 
5367 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5368 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5369 		connector,
5370 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5371 		requested_bpc);
5372 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5373 	timing_out->hdmi_vic = 0;
5374 
5375 	if(old_stream) {
5376 		timing_out->vic = old_stream->timing.vic;
5377 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5378 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5379 	} else {
5380 		timing_out->vic = drm_match_cea_mode(mode_in);
5381 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5382 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5383 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5384 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5385 	}
5386 
5387 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5388 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5389 		timing_out->vic = avi_frame.video_code;
5390 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5391 		timing_out->hdmi_vic = hv_frame.vic;
5392 	}
5393 
5394 	if (is_freesync_video_mode(mode_in, aconnector)) {
5395 		timing_out->h_addressable = mode_in->hdisplay;
5396 		timing_out->h_total = mode_in->htotal;
5397 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5398 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5399 		timing_out->v_total = mode_in->vtotal;
5400 		timing_out->v_addressable = mode_in->vdisplay;
5401 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5402 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5403 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5404 	} else {
5405 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5406 		timing_out->h_total = mode_in->crtc_htotal;
5407 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5408 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5409 		timing_out->v_total = mode_in->crtc_vtotal;
5410 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5411 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5412 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5413 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5414 	}
5415 
5416 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5417 
5418 	stream->output_color_space = get_output_color_space(timing_out);
5419 
5420 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5421 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5422 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5423 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5424 		    drm_mode_is_420_also(info, mode_in) &&
5425 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5426 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5427 			adjust_colour_depth_from_display_info(timing_out, info);
5428 		}
5429 	}
5430 }
5431 
5432 static void fill_audio_info(struct audio_info *audio_info,
5433 			    const struct drm_connector *drm_connector,
5434 			    const struct dc_sink *dc_sink)
5435 {
5436 	int i = 0;
5437 	int cea_revision = 0;
5438 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5439 
5440 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5441 	audio_info->product_id = edid_caps->product_id;
5442 
5443 	cea_revision = drm_connector->display_info.cea_rev;
5444 
5445 	strscpy(audio_info->display_name,
5446 		edid_caps->display_name,
5447 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5448 
5449 	if (cea_revision >= 3) {
5450 		audio_info->mode_count = edid_caps->audio_mode_count;
5451 
5452 		for (i = 0; i < audio_info->mode_count; ++i) {
5453 			audio_info->modes[i].format_code =
5454 					(enum audio_format_code)
5455 					(edid_caps->audio_modes[i].format_code);
5456 			audio_info->modes[i].channel_count =
5457 					edid_caps->audio_modes[i].channel_count;
5458 			audio_info->modes[i].sample_rates.all =
5459 					edid_caps->audio_modes[i].sample_rate;
5460 			audio_info->modes[i].sample_size =
5461 					edid_caps->audio_modes[i].sample_size;
5462 		}
5463 	}
5464 
5465 	audio_info->flags.all = edid_caps->speaker_flags;
5466 
5467 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5468 	if (drm_connector->latency_present[0]) {
5469 		audio_info->video_latency = drm_connector->video_latency[0];
5470 		audio_info->audio_latency = drm_connector->audio_latency[0];
5471 	}
5472 
5473 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5474 
5475 }
5476 
5477 static void
5478 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5479 				      struct drm_display_mode *dst_mode)
5480 {
5481 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5482 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5483 	dst_mode->crtc_clock = src_mode->crtc_clock;
5484 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5485 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5486 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5487 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5488 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5489 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5490 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5491 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5492 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5493 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5494 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5495 }
5496 
5497 static void
5498 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5499 					const struct drm_display_mode *native_mode,
5500 					bool scale_enabled)
5501 {
5502 	if (scale_enabled) {
5503 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5504 	} else if (native_mode->clock == drm_mode->clock &&
5505 			native_mode->htotal == drm_mode->htotal &&
5506 			native_mode->vtotal == drm_mode->vtotal) {
5507 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5508 	} else {
5509 		/* no scaling nor amdgpu inserted, no need to patch */
5510 	}
5511 }
5512 
5513 static struct dc_sink *
5514 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5515 {
5516 	struct dc_sink_init_data sink_init_data = { 0 };
5517 	struct dc_sink *sink = NULL;
5518 	sink_init_data.link = aconnector->dc_link;
5519 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5520 
5521 	sink = dc_sink_create(&sink_init_data);
5522 	if (!sink) {
5523 		DRM_ERROR("Failed to create sink!\n");
5524 		return NULL;
5525 	}
5526 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5527 
5528 	return sink;
5529 }
5530 
5531 static void set_multisync_trigger_params(
5532 		struct dc_stream_state *stream)
5533 {
5534 	struct dc_stream_state *master = NULL;
5535 
5536 	if (stream->triggered_crtc_reset.enabled) {
5537 		master = stream->triggered_crtc_reset.event_source;
5538 		stream->triggered_crtc_reset.event =
5539 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5540 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5541 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5542 	}
5543 }
5544 
5545 static void set_master_stream(struct dc_stream_state *stream_set[],
5546 			      int stream_count)
5547 {
5548 	int j, highest_rfr = 0, master_stream = 0;
5549 
5550 	for (j = 0;  j < stream_count; j++) {
5551 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5552 			int refresh_rate = 0;
5553 
5554 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5555 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5556 			if (refresh_rate > highest_rfr) {
5557 				highest_rfr = refresh_rate;
5558 				master_stream = j;
5559 			}
5560 		}
5561 	}
5562 	for (j = 0;  j < stream_count; j++) {
5563 		if (stream_set[j])
5564 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5565 	}
5566 }
5567 
5568 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5569 {
5570 	int i = 0;
5571 	struct dc_stream_state *stream;
5572 
5573 	if (context->stream_count < 2)
5574 		return;
5575 	for (i = 0; i < context->stream_count ; i++) {
5576 		if (!context->streams[i])
5577 			continue;
5578 		/*
5579 		 * TODO: add a function to read AMD VSDB bits and set
5580 		 * crtc_sync_master.multi_sync_enabled flag
5581 		 * For now it's set to false
5582 		 */
5583 	}
5584 
5585 	set_master_stream(context->streams, context->stream_count);
5586 
5587 	for (i = 0; i < context->stream_count ; i++) {
5588 		stream = context->streams[i];
5589 
5590 		if (!stream)
5591 			continue;
5592 
5593 		set_multisync_trigger_params(stream);
5594 	}
5595 }
5596 
5597 #if defined(CONFIG_DRM_AMD_DC_DCN)
5598 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5599 							struct dc_sink *sink, struct dc_stream_state *stream,
5600 							struct dsc_dec_dpcd_caps *dsc_caps)
5601 {
5602 	stream->timing.flags.DSC = 0;
5603 
5604 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5605 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5606 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5607 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5608 				      dsc_caps);
5609 	}
5610 }
5611 
5612 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5613 										struct dc_sink *sink, struct dc_stream_state *stream,
5614 										struct dsc_dec_dpcd_caps *dsc_caps)
5615 {
5616 	struct drm_connector *drm_connector = &aconnector->base;
5617 	uint32_t link_bandwidth_kbps;
5618 
5619 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5620 							dc_link_get_link_cap(aconnector->dc_link));
5621 	/* Set DSC policy according to dsc_clock_en */
5622 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5623 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5624 
5625 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5626 
5627 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5628 						dsc_caps,
5629 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5630 						0,
5631 						link_bandwidth_kbps,
5632 						&stream->timing,
5633 						&stream->timing.dsc_cfg)) {
5634 			stream->timing.flags.DSC = 1;
5635 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5636 		}
5637 	}
5638 
5639 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5640 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5641 		stream->timing.flags.DSC = 1;
5642 
5643 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5644 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5645 
5646 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5647 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5648 
5649 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5650 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5651 }
5652 #endif
5653 
5654 static struct drm_display_mode *
5655 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5656 			  bool use_probed_modes)
5657 {
5658 	struct drm_display_mode *m, *m_pref = NULL;
5659 	u16 current_refresh, highest_refresh;
5660 	struct list_head *list_head = use_probed_modes ?
5661 						    &aconnector->base.probed_modes :
5662 						    &aconnector->base.modes;
5663 
5664 	if (aconnector->freesync_vid_base.clock != 0)
5665 		return &aconnector->freesync_vid_base;
5666 
5667 	/* Find the preferred mode */
5668 	list_for_each_entry (m, list_head, head) {
5669 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5670 			m_pref = m;
5671 			break;
5672 		}
5673 	}
5674 
5675 	if (!m_pref) {
5676 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5677 		m_pref = list_first_entry_or_null(
5678 			&aconnector->base.modes, struct drm_display_mode, head);
5679 		if (!m_pref) {
5680 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5681 			return NULL;
5682 		}
5683 	}
5684 
5685 	highest_refresh = drm_mode_vrefresh(m_pref);
5686 
5687 	/*
5688 	 * Find the mode with highest refresh rate with same resolution.
5689 	 * For some monitors, preferred mode is not the mode with highest
5690 	 * supported refresh rate.
5691 	 */
5692 	list_for_each_entry (m, list_head, head) {
5693 		current_refresh  = drm_mode_vrefresh(m);
5694 
5695 		if (m->hdisplay == m_pref->hdisplay &&
5696 		    m->vdisplay == m_pref->vdisplay &&
5697 		    highest_refresh < current_refresh) {
5698 			highest_refresh = current_refresh;
5699 			m_pref = m;
5700 		}
5701 	}
5702 
5703 	aconnector->freesync_vid_base = *m_pref;
5704 	return m_pref;
5705 }
5706 
5707 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5708 				   struct amdgpu_dm_connector *aconnector)
5709 {
5710 	struct drm_display_mode *high_mode;
5711 	int timing_diff;
5712 
5713 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5714 	if (!high_mode || !mode)
5715 		return false;
5716 
5717 	timing_diff = high_mode->vtotal - mode->vtotal;
5718 
5719 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5720 	    high_mode->hdisplay != mode->hdisplay ||
5721 	    high_mode->vdisplay != mode->vdisplay ||
5722 	    high_mode->hsync_start != mode->hsync_start ||
5723 	    high_mode->hsync_end != mode->hsync_end ||
5724 	    high_mode->htotal != mode->htotal ||
5725 	    high_mode->hskew != mode->hskew ||
5726 	    high_mode->vscan != mode->vscan ||
5727 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5728 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5729 		return false;
5730 	else
5731 		return true;
5732 }
5733 
5734 static struct dc_stream_state *
5735 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5736 		       const struct drm_display_mode *drm_mode,
5737 		       const struct dm_connector_state *dm_state,
5738 		       const struct dc_stream_state *old_stream,
5739 		       int requested_bpc)
5740 {
5741 	struct drm_display_mode *preferred_mode = NULL;
5742 	struct drm_connector *drm_connector;
5743 	const struct drm_connector_state *con_state =
5744 		dm_state ? &dm_state->base : NULL;
5745 	struct dc_stream_state *stream = NULL;
5746 	struct drm_display_mode mode = *drm_mode;
5747 	struct drm_display_mode saved_mode;
5748 	struct drm_display_mode *freesync_mode = NULL;
5749 	bool native_mode_found = false;
5750 	bool recalculate_timing = false;
5751 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5752 	int mode_refresh;
5753 	int preferred_refresh = 0;
5754 #if defined(CONFIG_DRM_AMD_DC_DCN)
5755 	struct dsc_dec_dpcd_caps dsc_caps;
5756 #endif
5757 	struct dc_sink *sink = NULL;
5758 
5759 	memset(&saved_mode, 0, sizeof(saved_mode));
5760 
5761 	if (aconnector == NULL) {
5762 		DRM_ERROR("aconnector is NULL!\n");
5763 		return stream;
5764 	}
5765 
5766 	drm_connector = &aconnector->base;
5767 
5768 	if (!aconnector->dc_sink) {
5769 		sink = create_fake_sink(aconnector);
5770 		if (!sink)
5771 			return stream;
5772 	} else {
5773 		sink = aconnector->dc_sink;
5774 		dc_sink_retain(sink);
5775 	}
5776 
5777 	stream = dc_create_stream_for_sink(sink);
5778 
5779 	if (stream == NULL) {
5780 		DRM_ERROR("Failed to create stream for sink!\n");
5781 		goto finish;
5782 	}
5783 
5784 	stream->dm_stream_context = aconnector;
5785 
5786 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5787 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5788 
5789 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5790 		/* Search for preferred mode */
5791 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5792 			native_mode_found = true;
5793 			break;
5794 		}
5795 	}
5796 	if (!native_mode_found)
5797 		preferred_mode = list_first_entry_or_null(
5798 				&aconnector->base.modes,
5799 				struct drm_display_mode,
5800 				head);
5801 
5802 	mode_refresh = drm_mode_vrefresh(&mode);
5803 
5804 	if (preferred_mode == NULL) {
5805 		/*
5806 		 * This may not be an error, the use case is when we have no
5807 		 * usermode calls to reset and set mode upon hotplug. In this
5808 		 * case, we call set mode ourselves to restore the previous mode
5809 		 * and the modelist may not be filled in in time.
5810 		 */
5811 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5812 	} else {
5813 		recalculate_timing = amdgpu_freesync_vid_mode &&
5814 				 is_freesync_video_mode(&mode, aconnector);
5815 		if (recalculate_timing) {
5816 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5817 			saved_mode = mode;
5818 			mode = *freesync_mode;
5819 		} else {
5820 			decide_crtc_timing_for_drm_display_mode(
5821 				&mode, preferred_mode, scale);
5822 
5823 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5824 		}
5825 	}
5826 
5827 	if (recalculate_timing)
5828 		drm_mode_set_crtcinfo(&saved_mode, 0);
5829 	else if (!dm_state)
5830 		drm_mode_set_crtcinfo(&mode, 0);
5831 
5832        /*
5833 	* If scaling is enabled and refresh rate didn't change
5834 	* we copy the vic and polarities of the old timings
5835 	*/
5836 	if (!scale || mode_refresh != preferred_refresh)
5837 		fill_stream_properties_from_drm_display_mode(
5838 			stream, &mode, &aconnector->base, con_state, NULL,
5839 			requested_bpc);
5840 	else
5841 		fill_stream_properties_from_drm_display_mode(
5842 			stream, &mode, &aconnector->base, con_state, old_stream,
5843 			requested_bpc);
5844 
5845 #if defined(CONFIG_DRM_AMD_DC_DCN)
5846 	/* SST DSC determination policy */
5847 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5848 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5849 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5850 #endif
5851 
5852 	update_stream_scaling_settings(&mode, dm_state, stream);
5853 
5854 	fill_audio_info(
5855 		&stream->audio_info,
5856 		drm_connector,
5857 		sink);
5858 
5859 	update_stream_signal(stream, sink);
5860 
5861 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5862 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5863 
5864 	if (stream->link->psr_settings.psr_feature_enabled) {
5865 		//
5866 		// should decide stream support vsc sdp colorimetry capability
5867 		// before building vsc info packet
5868 		//
5869 		stream->use_vsc_sdp_for_colorimetry = false;
5870 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5871 			stream->use_vsc_sdp_for_colorimetry =
5872 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5873 		} else {
5874 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5875 				stream->use_vsc_sdp_for_colorimetry = true;
5876 		}
5877 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5878 	}
5879 finish:
5880 	dc_sink_release(sink);
5881 
5882 	return stream;
5883 }
5884 
5885 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5886 {
5887 	drm_crtc_cleanup(crtc);
5888 	kfree(crtc);
5889 }
5890 
5891 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5892 				  struct drm_crtc_state *state)
5893 {
5894 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5895 
5896 	/* TODO Destroy dc_stream objects are stream object is flattened */
5897 	if (cur->stream)
5898 		dc_stream_release(cur->stream);
5899 
5900 
5901 	__drm_atomic_helper_crtc_destroy_state(state);
5902 
5903 
5904 	kfree(state);
5905 }
5906 
5907 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5908 {
5909 	struct dm_crtc_state *state;
5910 
5911 	if (crtc->state)
5912 		dm_crtc_destroy_state(crtc, crtc->state);
5913 
5914 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5915 	if (WARN_ON(!state))
5916 		return;
5917 
5918 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5919 }
5920 
5921 static struct drm_crtc_state *
5922 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5923 {
5924 	struct dm_crtc_state *state, *cur;
5925 
5926 	cur = to_dm_crtc_state(crtc->state);
5927 
5928 	if (WARN_ON(!crtc->state))
5929 		return NULL;
5930 
5931 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5932 	if (!state)
5933 		return NULL;
5934 
5935 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5936 
5937 	if (cur->stream) {
5938 		state->stream = cur->stream;
5939 		dc_stream_retain(state->stream);
5940 	}
5941 
5942 	state->active_planes = cur->active_planes;
5943 	state->vrr_infopacket = cur->vrr_infopacket;
5944 	state->abm_level = cur->abm_level;
5945 	state->vrr_supported = cur->vrr_supported;
5946 	state->freesync_config = cur->freesync_config;
5947 	state->cm_has_degamma = cur->cm_has_degamma;
5948 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5949 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5950 
5951 	return &state->base;
5952 }
5953 
5954 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5955 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5956 {
5957 	crtc_debugfs_init(crtc);
5958 
5959 	return 0;
5960 }
5961 #endif
5962 
5963 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5964 {
5965 	enum dc_irq_source irq_source;
5966 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5967 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5968 	int rc;
5969 
5970 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5971 
5972 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5973 
5974 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5975 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5976 	return rc;
5977 }
5978 
5979 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5980 {
5981 	enum dc_irq_source irq_source;
5982 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5983 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5984 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5985 #if defined(CONFIG_DRM_AMD_DC_DCN)
5986 	struct amdgpu_display_manager *dm = &adev->dm;
5987 	unsigned long flags;
5988 #endif
5989 	int rc = 0;
5990 
5991 	if (enable) {
5992 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5993 		if (amdgpu_dm_vrr_active(acrtc_state))
5994 			rc = dm_set_vupdate_irq(crtc, true);
5995 	} else {
5996 		/* vblank irq off -> vupdate irq off */
5997 		rc = dm_set_vupdate_irq(crtc, false);
5998 	}
5999 
6000 	if (rc)
6001 		return rc;
6002 
6003 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6004 
6005 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6006 		return -EBUSY;
6007 
6008 	if (amdgpu_in_reset(adev))
6009 		return 0;
6010 
6011 #if defined(CONFIG_DRM_AMD_DC_DCN)
6012 	spin_lock_irqsave(&dm->vblank_lock, flags);
6013 	dm->vblank_workqueue->dm = dm;
6014 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
6015 	dm->vblank_workqueue->enable = enable;
6016 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
6017 	schedule_work(&dm->vblank_workqueue->mall_work);
6018 #endif
6019 
6020 	return 0;
6021 }
6022 
6023 static int dm_enable_vblank(struct drm_crtc *crtc)
6024 {
6025 	return dm_set_vblank(crtc, true);
6026 }
6027 
6028 static void dm_disable_vblank(struct drm_crtc *crtc)
6029 {
6030 	dm_set_vblank(crtc, false);
6031 }
6032 
6033 /* Implemented only the options currently availible for the driver */
6034 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6035 	.reset = dm_crtc_reset_state,
6036 	.destroy = amdgpu_dm_crtc_destroy,
6037 	.set_config = drm_atomic_helper_set_config,
6038 	.page_flip = drm_atomic_helper_page_flip,
6039 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6040 	.atomic_destroy_state = dm_crtc_destroy_state,
6041 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6042 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6043 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6044 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6045 	.enable_vblank = dm_enable_vblank,
6046 	.disable_vblank = dm_disable_vblank,
6047 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6048 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6049 	.late_register = amdgpu_dm_crtc_late_register,
6050 #endif
6051 };
6052 
6053 static enum drm_connector_status
6054 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6055 {
6056 	bool connected;
6057 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6058 
6059 	/*
6060 	 * Notes:
6061 	 * 1. This interface is NOT called in context of HPD irq.
6062 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6063 	 * makes it a bad place for *any* MST-related activity.
6064 	 */
6065 
6066 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6067 	    !aconnector->fake_enable)
6068 		connected = (aconnector->dc_sink != NULL);
6069 	else
6070 		connected = (aconnector->base.force == DRM_FORCE_ON);
6071 
6072 	update_subconnector_property(aconnector);
6073 
6074 	return (connected ? connector_status_connected :
6075 			connector_status_disconnected);
6076 }
6077 
6078 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6079 					    struct drm_connector_state *connector_state,
6080 					    struct drm_property *property,
6081 					    uint64_t val)
6082 {
6083 	struct drm_device *dev = connector->dev;
6084 	struct amdgpu_device *adev = drm_to_adev(dev);
6085 	struct dm_connector_state *dm_old_state =
6086 		to_dm_connector_state(connector->state);
6087 	struct dm_connector_state *dm_new_state =
6088 		to_dm_connector_state(connector_state);
6089 
6090 	int ret = -EINVAL;
6091 
6092 	if (property == dev->mode_config.scaling_mode_property) {
6093 		enum amdgpu_rmx_type rmx_type;
6094 
6095 		switch (val) {
6096 		case DRM_MODE_SCALE_CENTER:
6097 			rmx_type = RMX_CENTER;
6098 			break;
6099 		case DRM_MODE_SCALE_ASPECT:
6100 			rmx_type = RMX_ASPECT;
6101 			break;
6102 		case DRM_MODE_SCALE_FULLSCREEN:
6103 			rmx_type = RMX_FULL;
6104 			break;
6105 		case DRM_MODE_SCALE_NONE:
6106 		default:
6107 			rmx_type = RMX_OFF;
6108 			break;
6109 		}
6110 
6111 		if (dm_old_state->scaling == rmx_type)
6112 			return 0;
6113 
6114 		dm_new_state->scaling = rmx_type;
6115 		ret = 0;
6116 	} else if (property == adev->mode_info.underscan_hborder_property) {
6117 		dm_new_state->underscan_hborder = val;
6118 		ret = 0;
6119 	} else if (property == adev->mode_info.underscan_vborder_property) {
6120 		dm_new_state->underscan_vborder = val;
6121 		ret = 0;
6122 	} else if (property == adev->mode_info.underscan_property) {
6123 		dm_new_state->underscan_enable = val;
6124 		ret = 0;
6125 	} else if (property == adev->mode_info.abm_level_property) {
6126 		dm_new_state->abm_level = val;
6127 		ret = 0;
6128 	}
6129 
6130 	return ret;
6131 }
6132 
6133 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6134 					    const struct drm_connector_state *state,
6135 					    struct drm_property *property,
6136 					    uint64_t *val)
6137 {
6138 	struct drm_device *dev = connector->dev;
6139 	struct amdgpu_device *adev = drm_to_adev(dev);
6140 	struct dm_connector_state *dm_state =
6141 		to_dm_connector_state(state);
6142 	int ret = -EINVAL;
6143 
6144 	if (property == dev->mode_config.scaling_mode_property) {
6145 		switch (dm_state->scaling) {
6146 		case RMX_CENTER:
6147 			*val = DRM_MODE_SCALE_CENTER;
6148 			break;
6149 		case RMX_ASPECT:
6150 			*val = DRM_MODE_SCALE_ASPECT;
6151 			break;
6152 		case RMX_FULL:
6153 			*val = DRM_MODE_SCALE_FULLSCREEN;
6154 			break;
6155 		case RMX_OFF:
6156 		default:
6157 			*val = DRM_MODE_SCALE_NONE;
6158 			break;
6159 		}
6160 		ret = 0;
6161 	} else if (property == adev->mode_info.underscan_hborder_property) {
6162 		*val = dm_state->underscan_hborder;
6163 		ret = 0;
6164 	} else if (property == adev->mode_info.underscan_vborder_property) {
6165 		*val = dm_state->underscan_vborder;
6166 		ret = 0;
6167 	} else if (property == adev->mode_info.underscan_property) {
6168 		*val = dm_state->underscan_enable;
6169 		ret = 0;
6170 	} else if (property == adev->mode_info.abm_level_property) {
6171 		*val = dm_state->abm_level;
6172 		ret = 0;
6173 	}
6174 
6175 	return ret;
6176 }
6177 
6178 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6179 {
6180 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6181 
6182 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6183 }
6184 
6185 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6186 {
6187 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6188 	const struct dc_link *link = aconnector->dc_link;
6189 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6190 	struct amdgpu_display_manager *dm = &adev->dm;
6191 
6192 	/*
6193 	 * Call only if mst_mgr was iniitalized before since it's not done
6194 	 * for all connector types.
6195 	 */
6196 	if (aconnector->mst_mgr.dev)
6197 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6198 
6199 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6200 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6201 
6202 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6203 	    link->type != dc_connection_none &&
6204 	    dm->backlight_dev) {
6205 		backlight_device_unregister(dm->backlight_dev);
6206 		dm->backlight_dev = NULL;
6207 	}
6208 #endif
6209 
6210 	if (aconnector->dc_em_sink)
6211 		dc_sink_release(aconnector->dc_em_sink);
6212 	aconnector->dc_em_sink = NULL;
6213 	if (aconnector->dc_sink)
6214 		dc_sink_release(aconnector->dc_sink);
6215 	aconnector->dc_sink = NULL;
6216 
6217 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6218 	drm_connector_unregister(connector);
6219 	drm_connector_cleanup(connector);
6220 	if (aconnector->i2c) {
6221 		i2c_del_adapter(&aconnector->i2c->base);
6222 		kfree(aconnector->i2c);
6223 	}
6224 	kfree(aconnector->dm_dp_aux.aux.name);
6225 
6226 	kfree(connector);
6227 }
6228 
6229 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6230 {
6231 	struct dm_connector_state *state =
6232 		to_dm_connector_state(connector->state);
6233 
6234 	if (connector->state)
6235 		__drm_atomic_helper_connector_destroy_state(connector->state);
6236 
6237 	kfree(state);
6238 
6239 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6240 
6241 	if (state) {
6242 		state->scaling = RMX_OFF;
6243 		state->underscan_enable = false;
6244 		state->underscan_hborder = 0;
6245 		state->underscan_vborder = 0;
6246 		state->base.max_requested_bpc = 8;
6247 		state->vcpi_slots = 0;
6248 		state->pbn = 0;
6249 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6250 			state->abm_level = amdgpu_dm_abm_level;
6251 
6252 		__drm_atomic_helper_connector_reset(connector, &state->base);
6253 	}
6254 }
6255 
6256 struct drm_connector_state *
6257 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6258 {
6259 	struct dm_connector_state *state =
6260 		to_dm_connector_state(connector->state);
6261 
6262 	struct dm_connector_state *new_state =
6263 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6264 
6265 	if (!new_state)
6266 		return NULL;
6267 
6268 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6269 
6270 	new_state->freesync_capable = state->freesync_capable;
6271 	new_state->abm_level = state->abm_level;
6272 	new_state->scaling = state->scaling;
6273 	new_state->underscan_enable = state->underscan_enable;
6274 	new_state->underscan_hborder = state->underscan_hborder;
6275 	new_state->underscan_vborder = state->underscan_vborder;
6276 	new_state->vcpi_slots = state->vcpi_slots;
6277 	new_state->pbn = state->pbn;
6278 	return &new_state->base;
6279 }
6280 
6281 static int
6282 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6283 {
6284 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6285 		to_amdgpu_dm_connector(connector);
6286 	int r;
6287 
6288 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6289 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6290 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6291 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6292 		if (r)
6293 			return r;
6294 	}
6295 
6296 #if defined(CONFIG_DEBUG_FS)
6297 	connector_debugfs_init(amdgpu_dm_connector);
6298 #endif
6299 
6300 	return 0;
6301 }
6302 
6303 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6304 	.reset = amdgpu_dm_connector_funcs_reset,
6305 	.detect = amdgpu_dm_connector_detect,
6306 	.fill_modes = drm_helper_probe_single_connector_modes,
6307 	.destroy = amdgpu_dm_connector_destroy,
6308 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6309 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6310 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6311 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6312 	.late_register = amdgpu_dm_connector_late_register,
6313 	.early_unregister = amdgpu_dm_connector_unregister
6314 };
6315 
6316 static int get_modes(struct drm_connector *connector)
6317 {
6318 	return amdgpu_dm_connector_get_modes(connector);
6319 }
6320 
6321 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6322 {
6323 	struct dc_sink_init_data init_params = {
6324 			.link = aconnector->dc_link,
6325 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6326 	};
6327 	struct edid *edid;
6328 
6329 	if (!aconnector->base.edid_blob_ptr) {
6330 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6331 				aconnector->base.name);
6332 
6333 		aconnector->base.force = DRM_FORCE_OFF;
6334 		aconnector->base.override_edid = false;
6335 		return;
6336 	}
6337 
6338 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6339 
6340 	aconnector->edid = edid;
6341 
6342 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6343 		aconnector->dc_link,
6344 		(uint8_t *)edid,
6345 		(edid->extensions + 1) * EDID_LENGTH,
6346 		&init_params);
6347 
6348 	if (aconnector->base.force == DRM_FORCE_ON) {
6349 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6350 		aconnector->dc_link->local_sink :
6351 		aconnector->dc_em_sink;
6352 		dc_sink_retain(aconnector->dc_sink);
6353 	}
6354 }
6355 
6356 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6357 {
6358 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6359 
6360 	/*
6361 	 * In case of headless boot with force on for DP managed connector
6362 	 * Those settings have to be != 0 to get initial modeset
6363 	 */
6364 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6365 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6366 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6367 	}
6368 
6369 
6370 	aconnector->base.override_edid = true;
6371 	create_eml_sink(aconnector);
6372 }
6373 
6374 static struct dc_stream_state *
6375 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6376 				const struct drm_display_mode *drm_mode,
6377 				const struct dm_connector_state *dm_state,
6378 				const struct dc_stream_state *old_stream)
6379 {
6380 	struct drm_connector *connector = &aconnector->base;
6381 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6382 	struct dc_stream_state *stream;
6383 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6384 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6385 	enum dc_status dc_result = DC_OK;
6386 
6387 	do {
6388 		stream = create_stream_for_sink(aconnector, drm_mode,
6389 						dm_state, old_stream,
6390 						requested_bpc);
6391 		if (stream == NULL) {
6392 			DRM_ERROR("Failed to create stream for sink!\n");
6393 			break;
6394 		}
6395 
6396 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6397 
6398 		if (dc_result != DC_OK) {
6399 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6400 				      drm_mode->hdisplay,
6401 				      drm_mode->vdisplay,
6402 				      drm_mode->clock,
6403 				      dc_result,
6404 				      dc_status_to_str(dc_result));
6405 
6406 			dc_stream_release(stream);
6407 			stream = NULL;
6408 			requested_bpc -= 2; /* lower bpc to retry validation */
6409 		}
6410 
6411 	} while (stream == NULL && requested_bpc >= 6);
6412 
6413 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6414 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6415 
6416 		aconnector->force_yuv420_output = true;
6417 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6418 						dm_state, old_stream);
6419 		aconnector->force_yuv420_output = false;
6420 	}
6421 
6422 	return stream;
6423 }
6424 
6425 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6426 				   struct drm_display_mode *mode)
6427 {
6428 	int result = MODE_ERROR;
6429 	struct dc_sink *dc_sink;
6430 	/* TODO: Unhardcode stream count */
6431 	struct dc_stream_state *stream;
6432 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6433 
6434 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6435 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6436 		return result;
6437 
6438 	/*
6439 	 * Only run this the first time mode_valid is called to initilialize
6440 	 * EDID mgmt
6441 	 */
6442 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6443 		!aconnector->dc_em_sink)
6444 		handle_edid_mgmt(aconnector);
6445 
6446 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6447 
6448 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6449 				aconnector->base.force != DRM_FORCE_ON) {
6450 		DRM_ERROR("dc_sink is NULL!\n");
6451 		goto fail;
6452 	}
6453 
6454 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6455 	if (stream) {
6456 		dc_stream_release(stream);
6457 		result = MODE_OK;
6458 	}
6459 
6460 fail:
6461 	/* TODO: error handling*/
6462 	return result;
6463 }
6464 
6465 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6466 				struct dc_info_packet *out)
6467 {
6468 	struct hdmi_drm_infoframe frame;
6469 	unsigned char buf[30]; /* 26 + 4 */
6470 	ssize_t len;
6471 	int ret, i;
6472 
6473 	memset(out, 0, sizeof(*out));
6474 
6475 	if (!state->hdr_output_metadata)
6476 		return 0;
6477 
6478 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6479 	if (ret)
6480 		return ret;
6481 
6482 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6483 	if (len < 0)
6484 		return (int)len;
6485 
6486 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6487 	if (len != 30)
6488 		return -EINVAL;
6489 
6490 	/* Prepare the infopacket for DC. */
6491 	switch (state->connector->connector_type) {
6492 	case DRM_MODE_CONNECTOR_HDMIA:
6493 		out->hb0 = 0x87; /* type */
6494 		out->hb1 = 0x01; /* version */
6495 		out->hb2 = 0x1A; /* length */
6496 		out->sb[0] = buf[3]; /* checksum */
6497 		i = 1;
6498 		break;
6499 
6500 	case DRM_MODE_CONNECTOR_DisplayPort:
6501 	case DRM_MODE_CONNECTOR_eDP:
6502 		out->hb0 = 0x00; /* sdp id, zero */
6503 		out->hb1 = 0x87; /* type */
6504 		out->hb2 = 0x1D; /* payload len - 1 */
6505 		out->hb3 = (0x13 << 2); /* sdp version */
6506 		out->sb[0] = 0x01; /* version */
6507 		out->sb[1] = 0x1A; /* length */
6508 		i = 2;
6509 		break;
6510 
6511 	default:
6512 		return -EINVAL;
6513 	}
6514 
6515 	memcpy(&out->sb[i], &buf[4], 26);
6516 	out->valid = true;
6517 
6518 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6519 		       sizeof(out->sb), false);
6520 
6521 	return 0;
6522 }
6523 
6524 static int
6525 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6526 				 struct drm_atomic_state *state)
6527 {
6528 	struct drm_connector_state *new_con_state =
6529 		drm_atomic_get_new_connector_state(state, conn);
6530 	struct drm_connector_state *old_con_state =
6531 		drm_atomic_get_old_connector_state(state, conn);
6532 	struct drm_crtc *crtc = new_con_state->crtc;
6533 	struct drm_crtc_state *new_crtc_state;
6534 	int ret;
6535 
6536 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6537 
6538 	if (!crtc)
6539 		return 0;
6540 
6541 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6542 		struct dc_info_packet hdr_infopacket;
6543 
6544 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6545 		if (ret)
6546 			return ret;
6547 
6548 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6549 		if (IS_ERR(new_crtc_state))
6550 			return PTR_ERR(new_crtc_state);
6551 
6552 		/*
6553 		 * DC considers the stream backends changed if the
6554 		 * static metadata changes. Forcing the modeset also
6555 		 * gives a simple way for userspace to switch from
6556 		 * 8bpc to 10bpc when setting the metadata to enter
6557 		 * or exit HDR.
6558 		 *
6559 		 * Changing the static metadata after it's been
6560 		 * set is permissible, however. So only force a
6561 		 * modeset if we're entering or exiting HDR.
6562 		 */
6563 		new_crtc_state->mode_changed =
6564 			!old_con_state->hdr_output_metadata ||
6565 			!new_con_state->hdr_output_metadata;
6566 	}
6567 
6568 	return 0;
6569 }
6570 
6571 static const struct drm_connector_helper_funcs
6572 amdgpu_dm_connector_helper_funcs = {
6573 	/*
6574 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6575 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6576 	 * are missing after user start lightdm. So we need to renew modes list.
6577 	 * in get_modes call back, not just return the modes count
6578 	 */
6579 	.get_modes = get_modes,
6580 	.mode_valid = amdgpu_dm_connector_mode_valid,
6581 	.atomic_check = amdgpu_dm_connector_atomic_check,
6582 };
6583 
6584 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6585 {
6586 }
6587 
6588 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6589 {
6590 	struct drm_atomic_state *state = new_crtc_state->state;
6591 	struct drm_plane *plane;
6592 	int num_active = 0;
6593 
6594 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6595 		struct drm_plane_state *new_plane_state;
6596 
6597 		/* Cursor planes are "fake". */
6598 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6599 			continue;
6600 
6601 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6602 
6603 		if (!new_plane_state) {
6604 			/*
6605 			 * The plane is enable on the CRTC and hasn't changed
6606 			 * state. This means that it previously passed
6607 			 * validation and is therefore enabled.
6608 			 */
6609 			num_active += 1;
6610 			continue;
6611 		}
6612 
6613 		/* We need a framebuffer to be considered enabled. */
6614 		num_active += (new_plane_state->fb != NULL);
6615 	}
6616 
6617 	return num_active;
6618 }
6619 
6620 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6621 					 struct drm_crtc_state *new_crtc_state)
6622 {
6623 	struct dm_crtc_state *dm_new_crtc_state =
6624 		to_dm_crtc_state(new_crtc_state);
6625 
6626 	dm_new_crtc_state->active_planes = 0;
6627 
6628 	if (!dm_new_crtc_state->stream)
6629 		return;
6630 
6631 	dm_new_crtc_state->active_planes =
6632 		count_crtc_active_planes(new_crtc_state);
6633 }
6634 
6635 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6636 				       struct drm_atomic_state *state)
6637 {
6638 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6639 									  crtc);
6640 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6641 	struct dc *dc = adev->dm.dc;
6642 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6643 	int ret = -EINVAL;
6644 
6645 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6646 
6647 	dm_update_crtc_active_planes(crtc, crtc_state);
6648 
6649 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6650 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6651 		return ret;
6652 	}
6653 
6654 	/*
6655 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6656 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6657 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6658 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6659 	 */
6660 	if (crtc_state->enable &&
6661 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6662 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6663 		return -EINVAL;
6664 	}
6665 
6666 	/* In some use cases, like reset, no stream is attached */
6667 	if (!dm_crtc_state->stream)
6668 		return 0;
6669 
6670 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6671 		return 0;
6672 
6673 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6674 	return ret;
6675 }
6676 
6677 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6678 				      const struct drm_display_mode *mode,
6679 				      struct drm_display_mode *adjusted_mode)
6680 {
6681 	return true;
6682 }
6683 
6684 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6685 	.disable = dm_crtc_helper_disable,
6686 	.atomic_check = dm_crtc_helper_atomic_check,
6687 	.mode_fixup = dm_crtc_helper_mode_fixup,
6688 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6689 };
6690 
6691 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6692 {
6693 
6694 }
6695 
6696 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6697 {
6698 	switch (display_color_depth) {
6699 		case COLOR_DEPTH_666:
6700 			return 6;
6701 		case COLOR_DEPTH_888:
6702 			return 8;
6703 		case COLOR_DEPTH_101010:
6704 			return 10;
6705 		case COLOR_DEPTH_121212:
6706 			return 12;
6707 		case COLOR_DEPTH_141414:
6708 			return 14;
6709 		case COLOR_DEPTH_161616:
6710 			return 16;
6711 		default:
6712 			break;
6713 		}
6714 	return 0;
6715 }
6716 
6717 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6718 					  struct drm_crtc_state *crtc_state,
6719 					  struct drm_connector_state *conn_state)
6720 {
6721 	struct drm_atomic_state *state = crtc_state->state;
6722 	struct drm_connector *connector = conn_state->connector;
6723 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6724 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6725 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6726 	struct drm_dp_mst_topology_mgr *mst_mgr;
6727 	struct drm_dp_mst_port *mst_port;
6728 	enum dc_color_depth color_depth;
6729 	int clock, bpp = 0;
6730 	bool is_y420 = false;
6731 
6732 	if (!aconnector->port || !aconnector->dc_sink)
6733 		return 0;
6734 
6735 	mst_port = aconnector->port;
6736 	mst_mgr = &aconnector->mst_port->mst_mgr;
6737 
6738 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6739 		return 0;
6740 
6741 	if (!state->duplicated) {
6742 		int max_bpc = conn_state->max_requested_bpc;
6743 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6744 				aconnector->force_yuv420_output;
6745 		color_depth = convert_color_depth_from_display_info(connector,
6746 								    is_y420,
6747 								    max_bpc);
6748 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6749 		clock = adjusted_mode->clock;
6750 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6751 	}
6752 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6753 									   mst_mgr,
6754 									   mst_port,
6755 									   dm_new_connector_state->pbn,
6756 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6757 	if (dm_new_connector_state->vcpi_slots < 0) {
6758 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6759 		return dm_new_connector_state->vcpi_slots;
6760 	}
6761 	return 0;
6762 }
6763 
6764 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6765 	.disable = dm_encoder_helper_disable,
6766 	.atomic_check = dm_encoder_helper_atomic_check
6767 };
6768 
6769 #if defined(CONFIG_DRM_AMD_DC_DCN)
6770 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6771 					    struct dc_state *dc_state)
6772 {
6773 	struct dc_stream_state *stream = NULL;
6774 	struct drm_connector *connector;
6775 	struct drm_connector_state *new_con_state;
6776 	struct amdgpu_dm_connector *aconnector;
6777 	struct dm_connector_state *dm_conn_state;
6778 	int i, j, clock, bpp;
6779 	int vcpi, pbn_div, pbn = 0;
6780 
6781 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6782 
6783 		aconnector = to_amdgpu_dm_connector(connector);
6784 
6785 		if (!aconnector->port)
6786 			continue;
6787 
6788 		if (!new_con_state || !new_con_state->crtc)
6789 			continue;
6790 
6791 		dm_conn_state = to_dm_connector_state(new_con_state);
6792 
6793 		for (j = 0; j < dc_state->stream_count; j++) {
6794 			stream = dc_state->streams[j];
6795 			if (!stream)
6796 				continue;
6797 
6798 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6799 				break;
6800 
6801 			stream = NULL;
6802 		}
6803 
6804 		if (!stream)
6805 			continue;
6806 
6807 		if (stream->timing.flags.DSC != 1) {
6808 			drm_dp_mst_atomic_enable_dsc(state,
6809 						     aconnector->port,
6810 						     dm_conn_state->pbn,
6811 						     0,
6812 						     false);
6813 			continue;
6814 		}
6815 
6816 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6817 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6818 		clock = stream->timing.pix_clk_100hz / 10;
6819 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6820 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6821 						    aconnector->port,
6822 						    pbn, pbn_div,
6823 						    true);
6824 		if (vcpi < 0)
6825 			return vcpi;
6826 
6827 		dm_conn_state->pbn = pbn;
6828 		dm_conn_state->vcpi_slots = vcpi;
6829 	}
6830 	return 0;
6831 }
6832 #endif
6833 
6834 static void dm_drm_plane_reset(struct drm_plane *plane)
6835 {
6836 	struct dm_plane_state *amdgpu_state = NULL;
6837 
6838 	if (plane->state)
6839 		plane->funcs->atomic_destroy_state(plane, plane->state);
6840 
6841 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6842 	WARN_ON(amdgpu_state == NULL);
6843 
6844 	if (amdgpu_state)
6845 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6846 }
6847 
6848 static struct drm_plane_state *
6849 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6850 {
6851 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6852 
6853 	old_dm_plane_state = to_dm_plane_state(plane->state);
6854 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6855 	if (!dm_plane_state)
6856 		return NULL;
6857 
6858 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6859 
6860 	if (old_dm_plane_state->dc_state) {
6861 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6862 		dc_plane_state_retain(dm_plane_state->dc_state);
6863 	}
6864 
6865 	return &dm_plane_state->base;
6866 }
6867 
6868 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6869 				struct drm_plane_state *state)
6870 {
6871 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6872 
6873 	if (dm_plane_state->dc_state)
6874 		dc_plane_state_release(dm_plane_state->dc_state);
6875 
6876 	drm_atomic_helper_plane_destroy_state(plane, state);
6877 }
6878 
6879 static const struct drm_plane_funcs dm_plane_funcs = {
6880 	.update_plane	= drm_atomic_helper_update_plane,
6881 	.disable_plane	= drm_atomic_helper_disable_plane,
6882 	.destroy	= drm_primary_helper_destroy,
6883 	.reset = dm_drm_plane_reset,
6884 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6885 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6886 	.format_mod_supported = dm_plane_format_mod_supported,
6887 };
6888 
6889 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6890 				      struct drm_plane_state *new_state)
6891 {
6892 	struct amdgpu_framebuffer *afb;
6893 	struct drm_gem_object *obj;
6894 	struct amdgpu_device *adev;
6895 	struct amdgpu_bo *rbo;
6896 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6897 	struct list_head list;
6898 	struct ttm_validate_buffer tv;
6899 	struct ww_acquire_ctx ticket;
6900 	uint32_t domain;
6901 	int r;
6902 
6903 	if (!new_state->fb) {
6904 		DRM_DEBUG_KMS("No FB bound\n");
6905 		return 0;
6906 	}
6907 
6908 	afb = to_amdgpu_framebuffer(new_state->fb);
6909 	obj = new_state->fb->obj[0];
6910 	rbo = gem_to_amdgpu_bo(obj);
6911 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6912 	INIT_LIST_HEAD(&list);
6913 
6914 	tv.bo = &rbo->tbo;
6915 	tv.num_shared = 1;
6916 	list_add(&tv.head, &list);
6917 
6918 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6919 	if (r) {
6920 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6921 		return r;
6922 	}
6923 
6924 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6925 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6926 	else
6927 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6928 
6929 	r = amdgpu_bo_pin(rbo, domain);
6930 	if (unlikely(r != 0)) {
6931 		if (r != -ERESTARTSYS)
6932 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6933 		ttm_eu_backoff_reservation(&ticket, &list);
6934 		return r;
6935 	}
6936 
6937 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6938 	if (unlikely(r != 0)) {
6939 		amdgpu_bo_unpin(rbo);
6940 		ttm_eu_backoff_reservation(&ticket, &list);
6941 		DRM_ERROR("%p bind failed\n", rbo);
6942 		return r;
6943 	}
6944 
6945 	ttm_eu_backoff_reservation(&ticket, &list);
6946 
6947 	afb->address = amdgpu_bo_gpu_offset(rbo);
6948 
6949 	amdgpu_bo_ref(rbo);
6950 
6951 	/**
6952 	 * We don't do surface updates on planes that have been newly created,
6953 	 * but we also don't have the afb->address during atomic check.
6954 	 *
6955 	 * Fill in buffer attributes depending on the address here, but only on
6956 	 * newly created planes since they're not being used by DC yet and this
6957 	 * won't modify global state.
6958 	 */
6959 	dm_plane_state_old = to_dm_plane_state(plane->state);
6960 	dm_plane_state_new = to_dm_plane_state(new_state);
6961 
6962 	if (dm_plane_state_new->dc_state &&
6963 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6964 		struct dc_plane_state *plane_state =
6965 			dm_plane_state_new->dc_state;
6966 		bool force_disable_dcc = !plane_state->dcc.enable;
6967 
6968 		fill_plane_buffer_attributes(
6969 			adev, afb, plane_state->format, plane_state->rotation,
6970 			afb->tiling_flags,
6971 			&plane_state->tiling_info, &plane_state->plane_size,
6972 			&plane_state->dcc, &plane_state->address,
6973 			afb->tmz_surface, force_disable_dcc);
6974 	}
6975 
6976 	return 0;
6977 }
6978 
6979 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6980 				       struct drm_plane_state *old_state)
6981 {
6982 	struct amdgpu_bo *rbo;
6983 	int r;
6984 
6985 	if (!old_state->fb)
6986 		return;
6987 
6988 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6989 	r = amdgpu_bo_reserve(rbo, false);
6990 	if (unlikely(r)) {
6991 		DRM_ERROR("failed to reserve rbo before unpin\n");
6992 		return;
6993 	}
6994 
6995 	amdgpu_bo_unpin(rbo);
6996 	amdgpu_bo_unreserve(rbo);
6997 	amdgpu_bo_unref(&rbo);
6998 }
6999 
7000 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7001 				       struct drm_crtc_state *new_crtc_state)
7002 {
7003 	struct drm_framebuffer *fb = state->fb;
7004 	int min_downscale, max_upscale;
7005 	int min_scale = 0;
7006 	int max_scale = INT_MAX;
7007 
7008 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7009 	if (fb && state->crtc) {
7010 		/* Validate viewport to cover the case when only the position changes */
7011 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7012 			int viewport_width = state->crtc_w;
7013 			int viewport_height = state->crtc_h;
7014 
7015 			if (state->crtc_x < 0)
7016 				viewport_width += state->crtc_x;
7017 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7018 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7019 
7020 			if (state->crtc_y < 0)
7021 				viewport_height += state->crtc_y;
7022 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7023 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7024 
7025 			if (viewport_width < 0 || viewport_height < 0) {
7026 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7027 				return -EINVAL;
7028 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7029 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7030 				return -EINVAL;
7031 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7032 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7033 				return -EINVAL;
7034 			}
7035 
7036 		}
7037 
7038 		/* Get min/max allowed scaling factors from plane caps. */
7039 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7040 					     &min_downscale, &max_upscale);
7041 		/*
7042 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7043 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7044 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7045 		 */
7046 		min_scale = (1000 << 16) / max_upscale;
7047 		max_scale = (1000 << 16) / min_downscale;
7048 	}
7049 
7050 	return drm_atomic_helper_check_plane_state(
7051 		state, new_crtc_state, min_scale, max_scale, true, true);
7052 }
7053 
7054 static int dm_plane_atomic_check(struct drm_plane *plane,
7055 				 struct drm_atomic_state *state)
7056 {
7057 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7058 										 plane);
7059 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7060 	struct dc *dc = adev->dm.dc;
7061 	struct dm_plane_state *dm_plane_state;
7062 	struct dc_scaling_info scaling_info;
7063 	struct drm_crtc_state *new_crtc_state;
7064 	int ret;
7065 
7066 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7067 
7068 	dm_plane_state = to_dm_plane_state(new_plane_state);
7069 
7070 	if (!dm_plane_state->dc_state)
7071 		return 0;
7072 
7073 	new_crtc_state =
7074 		drm_atomic_get_new_crtc_state(state,
7075 					      new_plane_state->crtc);
7076 	if (!new_crtc_state)
7077 		return -EINVAL;
7078 
7079 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7080 	if (ret)
7081 		return ret;
7082 
7083 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7084 	if (ret)
7085 		return ret;
7086 
7087 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7088 		return 0;
7089 
7090 	return -EINVAL;
7091 }
7092 
7093 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7094 				       struct drm_atomic_state *state)
7095 {
7096 	/* Only support async updates on cursor planes. */
7097 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7098 		return -EINVAL;
7099 
7100 	return 0;
7101 }
7102 
7103 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7104 					 struct drm_atomic_state *state)
7105 {
7106 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7107 									   plane);
7108 	struct drm_plane_state *old_state =
7109 		drm_atomic_get_old_plane_state(state, plane);
7110 
7111 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7112 
7113 	swap(plane->state->fb, new_state->fb);
7114 
7115 	plane->state->src_x = new_state->src_x;
7116 	plane->state->src_y = new_state->src_y;
7117 	plane->state->src_w = new_state->src_w;
7118 	plane->state->src_h = new_state->src_h;
7119 	plane->state->crtc_x = new_state->crtc_x;
7120 	plane->state->crtc_y = new_state->crtc_y;
7121 	plane->state->crtc_w = new_state->crtc_w;
7122 	plane->state->crtc_h = new_state->crtc_h;
7123 
7124 	handle_cursor_update(plane, old_state);
7125 }
7126 
7127 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7128 	.prepare_fb = dm_plane_helper_prepare_fb,
7129 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7130 	.atomic_check = dm_plane_atomic_check,
7131 	.atomic_async_check = dm_plane_atomic_async_check,
7132 	.atomic_async_update = dm_plane_atomic_async_update
7133 };
7134 
7135 /*
7136  * TODO: these are currently initialized to rgb formats only.
7137  * For future use cases we should either initialize them dynamically based on
7138  * plane capabilities, or initialize this array to all formats, so internal drm
7139  * check will succeed, and let DC implement proper check
7140  */
7141 static const uint32_t rgb_formats[] = {
7142 	DRM_FORMAT_XRGB8888,
7143 	DRM_FORMAT_ARGB8888,
7144 	DRM_FORMAT_RGBA8888,
7145 	DRM_FORMAT_XRGB2101010,
7146 	DRM_FORMAT_XBGR2101010,
7147 	DRM_FORMAT_ARGB2101010,
7148 	DRM_FORMAT_ABGR2101010,
7149 	DRM_FORMAT_XRGB16161616,
7150 	DRM_FORMAT_XBGR16161616,
7151 	DRM_FORMAT_ARGB16161616,
7152 	DRM_FORMAT_ABGR16161616,
7153 	DRM_FORMAT_XBGR8888,
7154 	DRM_FORMAT_ABGR8888,
7155 	DRM_FORMAT_RGB565,
7156 };
7157 
7158 static const uint32_t overlay_formats[] = {
7159 	DRM_FORMAT_XRGB8888,
7160 	DRM_FORMAT_ARGB8888,
7161 	DRM_FORMAT_RGBA8888,
7162 	DRM_FORMAT_XBGR8888,
7163 	DRM_FORMAT_ABGR8888,
7164 	DRM_FORMAT_RGB565
7165 };
7166 
7167 static const u32 cursor_formats[] = {
7168 	DRM_FORMAT_ARGB8888
7169 };
7170 
7171 static int get_plane_formats(const struct drm_plane *plane,
7172 			     const struct dc_plane_cap *plane_cap,
7173 			     uint32_t *formats, int max_formats)
7174 {
7175 	int i, num_formats = 0;
7176 
7177 	/*
7178 	 * TODO: Query support for each group of formats directly from
7179 	 * DC plane caps. This will require adding more formats to the
7180 	 * caps list.
7181 	 */
7182 
7183 	switch (plane->type) {
7184 	case DRM_PLANE_TYPE_PRIMARY:
7185 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7186 			if (num_formats >= max_formats)
7187 				break;
7188 
7189 			formats[num_formats++] = rgb_formats[i];
7190 		}
7191 
7192 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7193 			formats[num_formats++] = DRM_FORMAT_NV12;
7194 		if (plane_cap && plane_cap->pixel_format_support.p010)
7195 			formats[num_formats++] = DRM_FORMAT_P010;
7196 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7197 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7198 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7199 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7200 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7201 		}
7202 		break;
7203 
7204 	case DRM_PLANE_TYPE_OVERLAY:
7205 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7206 			if (num_formats >= max_formats)
7207 				break;
7208 
7209 			formats[num_formats++] = overlay_formats[i];
7210 		}
7211 		break;
7212 
7213 	case DRM_PLANE_TYPE_CURSOR:
7214 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7215 			if (num_formats >= max_formats)
7216 				break;
7217 
7218 			formats[num_formats++] = cursor_formats[i];
7219 		}
7220 		break;
7221 	}
7222 
7223 	return num_formats;
7224 }
7225 
7226 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7227 				struct drm_plane *plane,
7228 				unsigned long possible_crtcs,
7229 				const struct dc_plane_cap *plane_cap)
7230 {
7231 	uint32_t formats[32];
7232 	int num_formats;
7233 	int res = -EPERM;
7234 	unsigned int supported_rotations;
7235 	uint64_t *modifiers = NULL;
7236 
7237 	num_formats = get_plane_formats(plane, plane_cap, formats,
7238 					ARRAY_SIZE(formats));
7239 
7240 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7241 	if (res)
7242 		return res;
7243 
7244 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7245 				       &dm_plane_funcs, formats, num_formats,
7246 				       modifiers, plane->type, NULL);
7247 	kfree(modifiers);
7248 	if (res)
7249 		return res;
7250 
7251 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7252 	    plane_cap && plane_cap->per_pixel_alpha) {
7253 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7254 					  BIT(DRM_MODE_BLEND_PREMULTI);
7255 
7256 		drm_plane_create_alpha_property(plane);
7257 		drm_plane_create_blend_mode_property(plane, blend_caps);
7258 	}
7259 
7260 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7261 	    plane_cap &&
7262 	    (plane_cap->pixel_format_support.nv12 ||
7263 	     plane_cap->pixel_format_support.p010)) {
7264 		/* This only affects YUV formats. */
7265 		drm_plane_create_color_properties(
7266 			plane,
7267 			BIT(DRM_COLOR_YCBCR_BT601) |
7268 			BIT(DRM_COLOR_YCBCR_BT709) |
7269 			BIT(DRM_COLOR_YCBCR_BT2020),
7270 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7271 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7272 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7273 	}
7274 
7275 	supported_rotations =
7276 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7277 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7278 
7279 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7280 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7281 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7282 						   supported_rotations);
7283 
7284 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7285 
7286 	/* Create (reset) the plane state */
7287 	if (plane->funcs->reset)
7288 		plane->funcs->reset(plane);
7289 
7290 	return 0;
7291 }
7292 
7293 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7294 			       struct drm_plane *plane,
7295 			       uint32_t crtc_index)
7296 {
7297 	struct amdgpu_crtc *acrtc = NULL;
7298 	struct drm_plane *cursor_plane;
7299 
7300 	int res = -ENOMEM;
7301 
7302 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7303 	if (!cursor_plane)
7304 		goto fail;
7305 
7306 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7307 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7308 
7309 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7310 	if (!acrtc)
7311 		goto fail;
7312 
7313 	res = drm_crtc_init_with_planes(
7314 			dm->ddev,
7315 			&acrtc->base,
7316 			plane,
7317 			cursor_plane,
7318 			&amdgpu_dm_crtc_funcs, NULL);
7319 
7320 	if (res)
7321 		goto fail;
7322 
7323 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7324 
7325 	/* Create (reset) the plane state */
7326 	if (acrtc->base.funcs->reset)
7327 		acrtc->base.funcs->reset(&acrtc->base);
7328 
7329 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7330 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7331 
7332 	acrtc->crtc_id = crtc_index;
7333 	acrtc->base.enabled = false;
7334 	acrtc->otg_inst = -1;
7335 
7336 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7337 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7338 				   true, MAX_COLOR_LUT_ENTRIES);
7339 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7340 
7341 	return 0;
7342 
7343 fail:
7344 	kfree(acrtc);
7345 	kfree(cursor_plane);
7346 	return res;
7347 }
7348 
7349 
7350 static int to_drm_connector_type(enum signal_type st)
7351 {
7352 	switch (st) {
7353 	case SIGNAL_TYPE_HDMI_TYPE_A:
7354 		return DRM_MODE_CONNECTOR_HDMIA;
7355 	case SIGNAL_TYPE_EDP:
7356 		return DRM_MODE_CONNECTOR_eDP;
7357 	case SIGNAL_TYPE_LVDS:
7358 		return DRM_MODE_CONNECTOR_LVDS;
7359 	case SIGNAL_TYPE_RGB:
7360 		return DRM_MODE_CONNECTOR_VGA;
7361 	case SIGNAL_TYPE_DISPLAY_PORT:
7362 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7363 		return DRM_MODE_CONNECTOR_DisplayPort;
7364 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7365 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7366 		return DRM_MODE_CONNECTOR_DVID;
7367 	case SIGNAL_TYPE_VIRTUAL:
7368 		return DRM_MODE_CONNECTOR_VIRTUAL;
7369 
7370 	default:
7371 		return DRM_MODE_CONNECTOR_Unknown;
7372 	}
7373 }
7374 
7375 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7376 {
7377 	struct drm_encoder *encoder;
7378 
7379 	/* There is only one encoder per connector */
7380 	drm_connector_for_each_possible_encoder(connector, encoder)
7381 		return encoder;
7382 
7383 	return NULL;
7384 }
7385 
7386 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7387 {
7388 	struct drm_encoder *encoder;
7389 	struct amdgpu_encoder *amdgpu_encoder;
7390 
7391 	encoder = amdgpu_dm_connector_to_encoder(connector);
7392 
7393 	if (encoder == NULL)
7394 		return;
7395 
7396 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7397 
7398 	amdgpu_encoder->native_mode.clock = 0;
7399 
7400 	if (!list_empty(&connector->probed_modes)) {
7401 		struct drm_display_mode *preferred_mode = NULL;
7402 
7403 		list_for_each_entry(preferred_mode,
7404 				    &connector->probed_modes,
7405 				    head) {
7406 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7407 				amdgpu_encoder->native_mode = *preferred_mode;
7408 
7409 			break;
7410 		}
7411 
7412 	}
7413 }
7414 
7415 static struct drm_display_mode *
7416 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7417 			     char *name,
7418 			     int hdisplay, int vdisplay)
7419 {
7420 	struct drm_device *dev = encoder->dev;
7421 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7422 	struct drm_display_mode *mode = NULL;
7423 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7424 
7425 	mode = drm_mode_duplicate(dev, native_mode);
7426 
7427 	if (mode == NULL)
7428 		return NULL;
7429 
7430 	mode->hdisplay = hdisplay;
7431 	mode->vdisplay = vdisplay;
7432 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7433 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7434 
7435 	return mode;
7436 
7437 }
7438 
7439 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7440 						 struct drm_connector *connector)
7441 {
7442 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7443 	struct drm_display_mode *mode = NULL;
7444 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7445 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7446 				to_amdgpu_dm_connector(connector);
7447 	int i;
7448 	int n;
7449 	struct mode_size {
7450 		char name[DRM_DISPLAY_MODE_LEN];
7451 		int w;
7452 		int h;
7453 	} common_modes[] = {
7454 		{  "640x480",  640,  480},
7455 		{  "800x600",  800,  600},
7456 		{ "1024x768", 1024,  768},
7457 		{ "1280x720", 1280,  720},
7458 		{ "1280x800", 1280,  800},
7459 		{"1280x1024", 1280, 1024},
7460 		{ "1440x900", 1440,  900},
7461 		{"1680x1050", 1680, 1050},
7462 		{"1600x1200", 1600, 1200},
7463 		{"1920x1080", 1920, 1080},
7464 		{"1920x1200", 1920, 1200}
7465 	};
7466 
7467 	n = ARRAY_SIZE(common_modes);
7468 
7469 	for (i = 0; i < n; i++) {
7470 		struct drm_display_mode *curmode = NULL;
7471 		bool mode_existed = false;
7472 
7473 		if (common_modes[i].w > native_mode->hdisplay ||
7474 		    common_modes[i].h > native_mode->vdisplay ||
7475 		   (common_modes[i].w == native_mode->hdisplay &&
7476 		    common_modes[i].h == native_mode->vdisplay))
7477 			continue;
7478 
7479 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7480 			if (common_modes[i].w == curmode->hdisplay &&
7481 			    common_modes[i].h == curmode->vdisplay) {
7482 				mode_existed = true;
7483 				break;
7484 			}
7485 		}
7486 
7487 		if (mode_existed)
7488 			continue;
7489 
7490 		mode = amdgpu_dm_create_common_mode(encoder,
7491 				common_modes[i].name, common_modes[i].w,
7492 				common_modes[i].h);
7493 		drm_mode_probed_add(connector, mode);
7494 		amdgpu_dm_connector->num_modes++;
7495 	}
7496 }
7497 
7498 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7499 					      struct edid *edid)
7500 {
7501 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7502 			to_amdgpu_dm_connector(connector);
7503 
7504 	if (edid) {
7505 		/* empty probed_modes */
7506 		INIT_LIST_HEAD(&connector->probed_modes);
7507 		amdgpu_dm_connector->num_modes =
7508 				drm_add_edid_modes(connector, edid);
7509 
7510 		/* sorting the probed modes before calling function
7511 		 * amdgpu_dm_get_native_mode() since EDID can have
7512 		 * more than one preferred mode. The modes that are
7513 		 * later in the probed mode list could be of higher
7514 		 * and preferred resolution. For example, 3840x2160
7515 		 * resolution in base EDID preferred timing and 4096x2160
7516 		 * preferred resolution in DID extension block later.
7517 		 */
7518 		drm_mode_sort(&connector->probed_modes);
7519 		amdgpu_dm_get_native_mode(connector);
7520 
7521 		/* Freesync capabilities are reset by calling
7522 		 * drm_add_edid_modes() and need to be
7523 		 * restored here.
7524 		 */
7525 		amdgpu_dm_update_freesync_caps(connector, edid);
7526 	} else {
7527 		amdgpu_dm_connector->num_modes = 0;
7528 	}
7529 }
7530 
7531 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7532 			      struct drm_display_mode *mode)
7533 {
7534 	struct drm_display_mode *m;
7535 
7536 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7537 		if (drm_mode_equal(m, mode))
7538 			return true;
7539 	}
7540 
7541 	return false;
7542 }
7543 
7544 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7545 {
7546 	const struct drm_display_mode *m;
7547 	struct drm_display_mode *new_mode;
7548 	uint i;
7549 	uint32_t new_modes_count = 0;
7550 
7551 	/* Standard FPS values
7552 	 *
7553 	 * 23.976   - TV/NTSC
7554 	 * 24 	    - Cinema
7555 	 * 25 	    - TV/PAL
7556 	 * 29.97    - TV/NTSC
7557 	 * 30 	    - TV/NTSC
7558 	 * 48 	    - Cinema HFR
7559 	 * 50 	    - TV/PAL
7560 	 * 60 	    - Commonly used
7561 	 * 48,72,96 - Multiples of 24
7562 	 */
7563 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7564 					 48000, 50000, 60000, 72000, 96000 };
7565 
7566 	/*
7567 	 * Find mode with highest refresh rate with the same resolution
7568 	 * as the preferred mode. Some monitors report a preferred mode
7569 	 * with lower resolution than the highest refresh rate supported.
7570 	 */
7571 
7572 	m = get_highest_refresh_rate_mode(aconnector, true);
7573 	if (!m)
7574 		return 0;
7575 
7576 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7577 		uint64_t target_vtotal, target_vtotal_diff;
7578 		uint64_t num, den;
7579 
7580 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7581 			continue;
7582 
7583 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7584 		    common_rates[i] > aconnector->max_vfreq * 1000)
7585 			continue;
7586 
7587 		num = (unsigned long long)m->clock * 1000 * 1000;
7588 		den = common_rates[i] * (unsigned long long)m->htotal;
7589 		target_vtotal = div_u64(num, den);
7590 		target_vtotal_diff = target_vtotal - m->vtotal;
7591 
7592 		/* Check for illegal modes */
7593 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7594 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7595 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7596 			continue;
7597 
7598 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7599 		if (!new_mode)
7600 			goto out;
7601 
7602 		new_mode->vtotal += (u16)target_vtotal_diff;
7603 		new_mode->vsync_start += (u16)target_vtotal_diff;
7604 		new_mode->vsync_end += (u16)target_vtotal_diff;
7605 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7606 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7607 
7608 		if (!is_duplicate_mode(aconnector, new_mode)) {
7609 			drm_mode_probed_add(&aconnector->base, new_mode);
7610 			new_modes_count += 1;
7611 		} else
7612 			drm_mode_destroy(aconnector->base.dev, new_mode);
7613 	}
7614  out:
7615 	return new_modes_count;
7616 }
7617 
7618 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7619 						   struct edid *edid)
7620 {
7621 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7622 		to_amdgpu_dm_connector(connector);
7623 
7624 	if (!(amdgpu_freesync_vid_mode && edid))
7625 		return;
7626 
7627 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7628 		amdgpu_dm_connector->num_modes +=
7629 			add_fs_modes(amdgpu_dm_connector);
7630 }
7631 
7632 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7633 {
7634 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7635 			to_amdgpu_dm_connector(connector);
7636 	struct drm_encoder *encoder;
7637 	struct edid *edid = amdgpu_dm_connector->edid;
7638 
7639 	encoder = amdgpu_dm_connector_to_encoder(connector);
7640 
7641 	if (!drm_edid_is_valid(edid)) {
7642 		amdgpu_dm_connector->num_modes =
7643 				drm_add_modes_noedid(connector, 640, 480);
7644 	} else {
7645 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7646 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7647 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7648 	}
7649 	amdgpu_dm_fbc_init(connector);
7650 
7651 	return amdgpu_dm_connector->num_modes;
7652 }
7653 
7654 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7655 				     struct amdgpu_dm_connector *aconnector,
7656 				     int connector_type,
7657 				     struct dc_link *link,
7658 				     int link_index)
7659 {
7660 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7661 
7662 	/*
7663 	 * Some of the properties below require access to state, like bpc.
7664 	 * Allocate some default initial connector state with our reset helper.
7665 	 */
7666 	if (aconnector->base.funcs->reset)
7667 		aconnector->base.funcs->reset(&aconnector->base);
7668 
7669 	aconnector->connector_id = link_index;
7670 	aconnector->dc_link = link;
7671 	aconnector->base.interlace_allowed = false;
7672 	aconnector->base.doublescan_allowed = false;
7673 	aconnector->base.stereo_allowed = false;
7674 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7675 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7676 	aconnector->audio_inst = -1;
7677 	mutex_init(&aconnector->hpd_lock);
7678 
7679 	/*
7680 	 * configure support HPD hot plug connector_>polled default value is 0
7681 	 * which means HPD hot plug not supported
7682 	 */
7683 	switch (connector_type) {
7684 	case DRM_MODE_CONNECTOR_HDMIA:
7685 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7686 		aconnector->base.ycbcr_420_allowed =
7687 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7688 		break;
7689 	case DRM_MODE_CONNECTOR_DisplayPort:
7690 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7691 		aconnector->base.ycbcr_420_allowed =
7692 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7693 		break;
7694 	case DRM_MODE_CONNECTOR_DVID:
7695 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7696 		break;
7697 	default:
7698 		break;
7699 	}
7700 
7701 	drm_object_attach_property(&aconnector->base.base,
7702 				dm->ddev->mode_config.scaling_mode_property,
7703 				DRM_MODE_SCALE_NONE);
7704 
7705 	drm_object_attach_property(&aconnector->base.base,
7706 				adev->mode_info.underscan_property,
7707 				UNDERSCAN_OFF);
7708 	drm_object_attach_property(&aconnector->base.base,
7709 				adev->mode_info.underscan_hborder_property,
7710 				0);
7711 	drm_object_attach_property(&aconnector->base.base,
7712 				adev->mode_info.underscan_vborder_property,
7713 				0);
7714 
7715 	if (!aconnector->mst_port)
7716 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7717 
7718 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7719 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7720 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7721 
7722 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7723 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7724 		drm_object_attach_property(&aconnector->base.base,
7725 				adev->mode_info.abm_level_property, 0);
7726 	}
7727 
7728 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7729 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7730 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7731 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7732 
7733 		if (!aconnector->mst_port)
7734 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7735 
7736 #ifdef CONFIG_DRM_AMD_DC_HDCP
7737 		if (adev->dm.hdcp_workqueue)
7738 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7739 #endif
7740 	}
7741 }
7742 
7743 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7744 			      struct i2c_msg *msgs, int num)
7745 {
7746 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7747 	struct ddc_service *ddc_service = i2c->ddc_service;
7748 	struct i2c_command cmd;
7749 	int i;
7750 	int result = -EIO;
7751 
7752 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7753 
7754 	if (!cmd.payloads)
7755 		return result;
7756 
7757 	cmd.number_of_payloads = num;
7758 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7759 	cmd.speed = 100;
7760 
7761 	for (i = 0; i < num; i++) {
7762 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7763 		cmd.payloads[i].address = msgs[i].addr;
7764 		cmd.payloads[i].length = msgs[i].len;
7765 		cmd.payloads[i].data = msgs[i].buf;
7766 	}
7767 
7768 	if (dc_submit_i2c(
7769 			ddc_service->ctx->dc,
7770 			ddc_service->ddc_pin->hw_info.ddc_channel,
7771 			&cmd))
7772 		result = num;
7773 
7774 	kfree(cmd.payloads);
7775 	return result;
7776 }
7777 
7778 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7779 {
7780 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7781 }
7782 
7783 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7784 	.master_xfer = amdgpu_dm_i2c_xfer,
7785 	.functionality = amdgpu_dm_i2c_func,
7786 };
7787 
7788 static struct amdgpu_i2c_adapter *
7789 create_i2c(struct ddc_service *ddc_service,
7790 	   int link_index,
7791 	   int *res)
7792 {
7793 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7794 	struct amdgpu_i2c_adapter *i2c;
7795 
7796 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7797 	if (!i2c)
7798 		return NULL;
7799 	i2c->base.owner = THIS_MODULE;
7800 	i2c->base.class = I2C_CLASS_DDC;
7801 	i2c->base.dev.parent = &adev->pdev->dev;
7802 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7803 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7804 	i2c_set_adapdata(&i2c->base, i2c);
7805 	i2c->ddc_service = ddc_service;
7806 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7807 
7808 	return i2c;
7809 }
7810 
7811 
7812 /*
7813  * Note: this function assumes that dc_link_detect() was called for the
7814  * dc_link which will be represented by this aconnector.
7815  */
7816 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7817 				    struct amdgpu_dm_connector *aconnector,
7818 				    uint32_t link_index,
7819 				    struct amdgpu_encoder *aencoder)
7820 {
7821 	int res = 0;
7822 	int connector_type;
7823 	struct dc *dc = dm->dc;
7824 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7825 	struct amdgpu_i2c_adapter *i2c;
7826 
7827 	link->priv = aconnector;
7828 
7829 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7830 
7831 	i2c = create_i2c(link->ddc, link->link_index, &res);
7832 	if (!i2c) {
7833 		DRM_ERROR("Failed to create i2c adapter data\n");
7834 		return -ENOMEM;
7835 	}
7836 
7837 	aconnector->i2c = i2c;
7838 	res = i2c_add_adapter(&i2c->base);
7839 
7840 	if (res) {
7841 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7842 		goto out_free;
7843 	}
7844 
7845 	connector_type = to_drm_connector_type(link->connector_signal);
7846 
7847 	res = drm_connector_init_with_ddc(
7848 			dm->ddev,
7849 			&aconnector->base,
7850 			&amdgpu_dm_connector_funcs,
7851 			connector_type,
7852 			&i2c->base);
7853 
7854 	if (res) {
7855 		DRM_ERROR("connector_init failed\n");
7856 		aconnector->connector_id = -1;
7857 		goto out_free;
7858 	}
7859 
7860 	drm_connector_helper_add(
7861 			&aconnector->base,
7862 			&amdgpu_dm_connector_helper_funcs);
7863 
7864 	amdgpu_dm_connector_init_helper(
7865 		dm,
7866 		aconnector,
7867 		connector_type,
7868 		link,
7869 		link_index);
7870 
7871 	drm_connector_attach_encoder(
7872 		&aconnector->base, &aencoder->base);
7873 
7874 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7875 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7876 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7877 
7878 out_free:
7879 	if (res) {
7880 		kfree(i2c);
7881 		aconnector->i2c = NULL;
7882 	}
7883 	return res;
7884 }
7885 
7886 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7887 {
7888 	switch (adev->mode_info.num_crtc) {
7889 	case 1:
7890 		return 0x1;
7891 	case 2:
7892 		return 0x3;
7893 	case 3:
7894 		return 0x7;
7895 	case 4:
7896 		return 0xf;
7897 	case 5:
7898 		return 0x1f;
7899 	case 6:
7900 	default:
7901 		return 0x3f;
7902 	}
7903 }
7904 
7905 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7906 				  struct amdgpu_encoder *aencoder,
7907 				  uint32_t link_index)
7908 {
7909 	struct amdgpu_device *adev = drm_to_adev(dev);
7910 
7911 	int res = drm_encoder_init(dev,
7912 				   &aencoder->base,
7913 				   &amdgpu_dm_encoder_funcs,
7914 				   DRM_MODE_ENCODER_TMDS,
7915 				   NULL);
7916 
7917 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7918 
7919 	if (!res)
7920 		aencoder->encoder_id = link_index;
7921 	else
7922 		aencoder->encoder_id = -1;
7923 
7924 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7925 
7926 	return res;
7927 }
7928 
7929 static void manage_dm_interrupts(struct amdgpu_device *adev,
7930 				 struct amdgpu_crtc *acrtc,
7931 				 bool enable)
7932 {
7933 	/*
7934 	 * We have no guarantee that the frontend index maps to the same
7935 	 * backend index - some even map to more than one.
7936 	 *
7937 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7938 	 */
7939 	int irq_type =
7940 		amdgpu_display_crtc_idx_to_irq_type(
7941 			adev,
7942 			acrtc->crtc_id);
7943 
7944 	if (enable) {
7945 		drm_crtc_vblank_on(&acrtc->base);
7946 		amdgpu_irq_get(
7947 			adev,
7948 			&adev->pageflip_irq,
7949 			irq_type);
7950 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7951 		amdgpu_irq_get(
7952 			adev,
7953 			&adev->vline0_irq,
7954 			irq_type);
7955 #endif
7956 	} else {
7957 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7958 		amdgpu_irq_put(
7959 			adev,
7960 			&adev->vline0_irq,
7961 			irq_type);
7962 #endif
7963 		amdgpu_irq_put(
7964 			adev,
7965 			&adev->pageflip_irq,
7966 			irq_type);
7967 		drm_crtc_vblank_off(&acrtc->base);
7968 	}
7969 }
7970 
7971 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7972 				      struct amdgpu_crtc *acrtc)
7973 {
7974 	int irq_type =
7975 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7976 
7977 	/**
7978 	 * This reads the current state for the IRQ and force reapplies
7979 	 * the setting to hardware.
7980 	 */
7981 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7982 }
7983 
7984 static bool
7985 is_scaling_state_different(const struct dm_connector_state *dm_state,
7986 			   const struct dm_connector_state *old_dm_state)
7987 {
7988 	if (dm_state->scaling != old_dm_state->scaling)
7989 		return true;
7990 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7991 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7992 			return true;
7993 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7994 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7995 			return true;
7996 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7997 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7998 		return true;
7999 	return false;
8000 }
8001 
8002 #ifdef CONFIG_DRM_AMD_DC_HDCP
8003 static bool is_content_protection_different(struct drm_connector_state *state,
8004 					    const struct drm_connector_state *old_state,
8005 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8006 {
8007 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8008 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8009 
8010 	/* Handle: Type0/1 change */
8011 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8012 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8013 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8014 		return true;
8015 	}
8016 
8017 	/* CP is being re enabled, ignore this
8018 	 *
8019 	 * Handles:	ENABLED -> DESIRED
8020 	 */
8021 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8022 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8023 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8024 		return false;
8025 	}
8026 
8027 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8028 	 *
8029 	 * Handles:	UNDESIRED -> ENABLED
8030 	 */
8031 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8032 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8033 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8034 
8035 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8036 	 * hot-plug, headless s3, dpms
8037 	 *
8038 	 * Handles:	DESIRED -> DESIRED (Special case)
8039 	 */
8040 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8041 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8042 		dm_con_state->update_hdcp = false;
8043 		return true;
8044 	}
8045 
8046 	/*
8047 	 * Handles:	UNDESIRED -> UNDESIRED
8048 	 *		DESIRED -> DESIRED
8049 	 *		ENABLED -> ENABLED
8050 	 */
8051 	if (old_state->content_protection == state->content_protection)
8052 		return false;
8053 
8054 	/*
8055 	 * Handles:	UNDESIRED -> DESIRED
8056 	 *		DESIRED -> UNDESIRED
8057 	 *		ENABLED -> UNDESIRED
8058 	 */
8059 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8060 		return true;
8061 
8062 	/*
8063 	 * Handles:	DESIRED -> ENABLED
8064 	 */
8065 	return false;
8066 }
8067 
8068 #endif
8069 static void remove_stream(struct amdgpu_device *adev,
8070 			  struct amdgpu_crtc *acrtc,
8071 			  struct dc_stream_state *stream)
8072 {
8073 	/* this is the update mode case */
8074 
8075 	acrtc->otg_inst = -1;
8076 	acrtc->enabled = false;
8077 }
8078 
8079 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8080 			       struct dc_cursor_position *position)
8081 {
8082 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8083 	int x, y;
8084 	int xorigin = 0, yorigin = 0;
8085 
8086 	if (!crtc || !plane->state->fb)
8087 		return 0;
8088 
8089 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8090 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8091 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8092 			  __func__,
8093 			  plane->state->crtc_w,
8094 			  plane->state->crtc_h);
8095 		return -EINVAL;
8096 	}
8097 
8098 	x = plane->state->crtc_x;
8099 	y = plane->state->crtc_y;
8100 
8101 	if (x <= -amdgpu_crtc->max_cursor_width ||
8102 	    y <= -amdgpu_crtc->max_cursor_height)
8103 		return 0;
8104 
8105 	if (x < 0) {
8106 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8107 		x = 0;
8108 	}
8109 	if (y < 0) {
8110 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8111 		y = 0;
8112 	}
8113 	position->enable = true;
8114 	position->translate_by_source = true;
8115 	position->x = x;
8116 	position->y = y;
8117 	position->x_hotspot = xorigin;
8118 	position->y_hotspot = yorigin;
8119 
8120 	return 0;
8121 }
8122 
8123 static void handle_cursor_update(struct drm_plane *plane,
8124 				 struct drm_plane_state *old_plane_state)
8125 {
8126 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8127 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8128 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8129 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8130 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8131 	uint64_t address = afb ? afb->address : 0;
8132 	struct dc_cursor_position position = {0};
8133 	struct dc_cursor_attributes attributes;
8134 	int ret;
8135 
8136 	if (!plane->state->fb && !old_plane_state->fb)
8137 		return;
8138 
8139 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8140 		      __func__,
8141 		      amdgpu_crtc->crtc_id,
8142 		      plane->state->crtc_w,
8143 		      plane->state->crtc_h);
8144 
8145 	ret = get_cursor_position(plane, crtc, &position);
8146 	if (ret)
8147 		return;
8148 
8149 	if (!position.enable) {
8150 		/* turn off cursor */
8151 		if (crtc_state && crtc_state->stream) {
8152 			mutex_lock(&adev->dm.dc_lock);
8153 			dc_stream_set_cursor_position(crtc_state->stream,
8154 						      &position);
8155 			mutex_unlock(&adev->dm.dc_lock);
8156 		}
8157 		return;
8158 	}
8159 
8160 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8161 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8162 
8163 	memset(&attributes, 0, sizeof(attributes));
8164 	attributes.address.high_part = upper_32_bits(address);
8165 	attributes.address.low_part  = lower_32_bits(address);
8166 	attributes.width             = plane->state->crtc_w;
8167 	attributes.height            = plane->state->crtc_h;
8168 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8169 	attributes.rotation_angle    = 0;
8170 	attributes.attribute_flags.value = 0;
8171 
8172 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8173 
8174 	if (crtc_state->stream) {
8175 		mutex_lock(&adev->dm.dc_lock);
8176 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8177 							 &attributes))
8178 			DRM_ERROR("DC failed to set cursor attributes\n");
8179 
8180 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8181 						   &position))
8182 			DRM_ERROR("DC failed to set cursor position\n");
8183 		mutex_unlock(&adev->dm.dc_lock);
8184 	}
8185 }
8186 
8187 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8188 {
8189 
8190 	assert_spin_locked(&acrtc->base.dev->event_lock);
8191 	WARN_ON(acrtc->event);
8192 
8193 	acrtc->event = acrtc->base.state->event;
8194 
8195 	/* Set the flip status */
8196 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8197 
8198 	/* Mark this event as consumed */
8199 	acrtc->base.state->event = NULL;
8200 
8201 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8202 		     acrtc->crtc_id);
8203 }
8204 
8205 static void update_freesync_state_on_stream(
8206 	struct amdgpu_display_manager *dm,
8207 	struct dm_crtc_state *new_crtc_state,
8208 	struct dc_stream_state *new_stream,
8209 	struct dc_plane_state *surface,
8210 	u32 flip_timestamp_in_us)
8211 {
8212 	struct mod_vrr_params vrr_params;
8213 	struct dc_info_packet vrr_infopacket = {0};
8214 	struct amdgpu_device *adev = dm->adev;
8215 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8216 	unsigned long flags;
8217 	bool pack_sdp_v1_3 = false;
8218 
8219 	if (!new_stream)
8220 		return;
8221 
8222 	/*
8223 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8224 	 * For now it's sufficient to just guard against these conditions.
8225 	 */
8226 
8227 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8228 		return;
8229 
8230 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8231         vrr_params = acrtc->dm_irq_params.vrr_params;
8232 
8233 	if (surface) {
8234 		mod_freesync_handle_preflip(
8235 			dm->freesync_module,
8236 			surface,
8237 			new_stream,
8238 			flip_timestamp_in_us,
8239 			&vrr_params);
8240 
8241 		if (adev->family < AMDGPU_FAMILY_AI &&
8242 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8243 			mod_freesync_handle_v_update(dm->freesync_module,
8244 						     new_stream, &vrr_params);
8245 
8246 			/* Need to call this before the frame ends. */
8247 			dc_stream_adjust_vmin_vmax(dm->dc,
8248 						   new_crtc_state->stream,
8249 						   &vrr_params.adjust);
8250 		}
8251 	}
8252 
8253 	mod_freesync_build_vrr_infopacket(
8254 		dm->freesync_module,
8255 		new_stream,
8256 		&vrr_params,
8257 		PACKET_TYPE_VRR,
8258 		TRANSFER_FUNC_UNKNOWN,
8259 		&vrr_infopacket,
8260 		pack_sdp_v1_3);
8261 
8262 	new_crtc_state->freesync_timing_changed |=
8263 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8264 			&vrr_params.adjust,
8265 			sizeof(vrr_params.adjust)) != 0);
8266 
8267 	new_crtc_state->freesync_vrr_info_changed |=
8268 		(memcmp(&new_crtc_state->vrr_infopacket,
8269 			&vrr_infopacket,
8270 			sizeof(vrr_infopacket)) != 0);
8271 
8272 	acrtc->dm_irq_params.vrr_params = vrr_params;
8273 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8274 
8275 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8276 	new_stream->vrr_infopacket = vrr_infopacket;
8277 
8278 	if (new_crtc_state->freesync_vrr_info_changed)
8279 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8280 			      new_crtc_state->base.crtc->base.id,
8281 			      (int)new_crtc_state->base.vrr_enabled,
8282 			      (int)vrr_params.state);
8283 
8284 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8285 }
8286 
8287 static void update_stream_irq_parameters(
8288 	struct amdgpu_display_manager *dm,
8289 	struct dm_crtc_state *new_crtc_state)
8290 {
8291 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8292 	struct mod_vrr_params vrr_params;
8293 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8294 	struct amdgpu_device *adev = dm->adev;
8295 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8296 	unsigned long flags;
8297 
8298 	if (!new_stream)
8299 		return;
8300 
8301 	/*
8302 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8303 	 * For now it's sufficient to just guard against these conditions.
8304 	 */
8305 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8306 		return;
8307 
8308 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8309 	vrr_params = acrtc->dm_irq_params.vrr_params;
8310 
8311 	if (new_crtc_state->vrr_supported &&
8312 	    config.min_refresh_in_uhz &&
8313 	    config.max_refresh_in_uhz) {
8314 		/*
8315 		 * if freesync compatible mode was set, config.state will be set
8316 		 * in atomic check
8317 		 */
8318 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8319 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8320 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8321 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8322 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8323 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8324 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8325 		} else {
8326 			config.state = new_crtc_state->base.vrr_enabled ?
8327 						     VRR_STATE_ACTIVE_VARIABLE :
8328 						     VRR_STATE_INACTIVE;
8329 		}
8330 	} else {
8331 		config.state = VRR_STATE_UNSUPPORTED;
8332 	}
8333 
8334 	mod_freesync_build_vrr_params(dm->freesync_module,
8335 				      new_stream,
8336 				      &config, &vrr_params);
8337 
8338 	new_crtc_state->freesync_timing_changed |=
8339 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8340 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8341 
8342 	new_crtc_state->freesync_config = config;
8343 	/* Copy state for access from DM IRQ handler */
8344 	acrtc->dm_irq_params.freesync_config = config;
8345 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8346 	acrtc->dm_irq_params.vrr_params = vrr_params;
8347 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8348 }
8349 
8350 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8351 					    struct dm_crtc_state *new_state)
8352 {
8353 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8354 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8355 
8356 	if (!old_vrr_active && new_vrr_active) {
8357 		/* Transition VRR inactive -> active:
8358 		 * While VRR is active, we must not disable vblank irq, as a
8359 		 * reenable after disable would compute bogus vblank/pflip
8360 		 * timestamps if it likely happened inside display front-porch.
8361 		 *
8362 		 * We also need vupdate irq for the actual core vblank handling
8363 		 * at end of vblank.
8364 		 */
8365 		dm_set_vupdate_irq(new_state->base.crtc, true);
8366 		drm_crtc_vblank_get(new_state->base.crtc);
8367 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8368 				 __func__, new_state->base.crtc->base.id);
8369 	} else if (old_vrr_active && !new_vrr_active) {
8370 		/* Transition VRR active -> inactive:
8371 		 * Allow vblank irq disable again for fixed refresh rate.
8372 		 */
8373 		dm_set_vupdate_irq(new_state->base.crtc, false);
8374 		drm_crtc_vblank_put(new_state->base.crtc);
8375 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8376 				 __func__, new_state->base.crtc->base.id);
8377 	}
8378 }
8379 
8380 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8381 {
8382 	struct drm_plane *plane;
8383 	struct drm_plane_state *old_plane_state;
8384 	int i;
8385 
8386 	/*
8387 	 * TODO: Make this per-stream so we don't issue redundant updates for
8388 	 * commits with multiple streams.
8389 	 */
8390 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8391 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8392 			handle_cursor_update(plane, old_plane_state);
8393 }
8394 
8395 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8396 				    struct dc_state *dc_state,
8397 				    struct drm_device *dev,
8398 				    struct amdgpu_display_manager *dm,
8399 				    struct drm_crtc *pcrtc,
8400 				    bool wait_for_vblank)
8401 {
8402 	uint32_t i;
8403 	uint64_t timestamp_ns;
8404 	struct drm_plane *plane;
8405 	struct drm_plane_state *old_plane_state, *new_plane_state;
8406 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8407 	struct drm_crtc_state *new_pcrtc_state =
8408 			drm_atomic_get_new_crtc_state(state, pcrtc);
8409 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8410 	struct dm_crtc_state *dm_old_crtc_state =
8411 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8412 	int planes_count = 0, vpos, hpos;
8413 	long r;
8414 	unsigned long flags;
8415 	struct amdgpu_bo *abo;
8416 	uint32_t target_vblank, last_flip_vblank;
8417 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8418 	bool pflip_present = false;
8419 	struct {
8420 		struct dc_surface_update surface_updates[MAX_SURFACES];
8421 		struct dc_plane_info plane_infos[MAX_SURFACES];
8422 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8423 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8424 		struct dc_stream_update stream_update;
8425 	} *bundle;
8426 
8427 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8428 
8429 	if (!bundle) {
8430 		dm_error("Failed to allocate update bundle\n");
8431 		goto cleanup;
8432 	}
8433 
8434 	/*
8435 	 * Disable the cursor first if we're disabling all the planes.
8436 	 * It'll remain on the screen after the planes are re-enabled
8437 	 * if we don't.
8438 	 */
8439 	if (acrtc_state->active_planes == 0)
8440 		amdgpu_dm_commit_cursors(state);
8441 
8442 	/* update planes when needed */
8443 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8444 		struct drm_crtc *crtc = new_plane_state->crtc;
8445 		struct drm_crtc_state *new_crtc_state;
8446 		struct drm_framebuffer *fb = new_plane_state->fb;
8447 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8448 		bool plane_needs_flip;
8449 		struct dc_plane_state *dc_plane;
8450 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8451 
8452 		/* Cursor plane is handled after stream updates */
8453 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8454 			continue;
8455 
8456 		if (!fb || !crtc || pcrtc != crtc)
8457 			continue;
8458 
8459 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8460 		if (!new_crtc_state->active)
8461 			continue;
8462 
8463 		dc_plane = dm_new_plane_state->dc_state;
8464 
8465 		bundle->surface_updates[planes_count].surface = dc_plane;
8466 		if (new_pcrtc_state->color_mgmt_changed) {
8467 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8468 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8469 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8470 		}
8471 
8472 		fill_dc_scaling_info(new_plane_state,
8473 				     &bundle->scaling_infos[planes_count]);
8474 
8475 		bundle->surface_updates[planes_count].scaling_info =
8476 			&bundle->scaling_infos[planes_count];
8477 
8478 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8479 
8480 		pflip_present = pflip_present || plane_needs_flip;
8481 
8482 		if (!plane_needs_flip) {
8483 			planes_count += 1;
8484 			continue;
8485 		}
8486 
8487 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8488 
8489 		/*
8490 		 * Wait for all fences on this FB. Do limited wait to avoid
8491 		 * deadlock during GPU reset when this fence will not signal
8492 		 * but we hold reservation lock for the BO.
8493 		 */
8494 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8495 							false,
8496 							msecs_to_jiffies(5000));
8497 		if (unlikely(r <= 0))
8498 			DRM_ERROR("Waiting for fences timed out!");
8499 
8500 		fill_dc_plane_info_and_addr(
8501 			dm->adev, new_plane_state,
8502 			afb->tiling_flags,
8503 			&bundle->plane_infos[planes_count],
8504 			&bundle->flip_addrs[planes_count].address,
8505 			afb->tmz_surface, false);
8506 
8507 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8508 				 new_plane_state->plane->index,
8509 				 bundle->plane_infos[planes_count].dcc.enable);
8510 
8511 		bundle->surface_updates[planes_count].plane_info =
8512 			&bundle->plane_infos[planes_count];
8513 
8514 		/*
8515 		 * Only allow immediate flips for fast updates that don't
8516 		 * change FB pitch, DCC state, rotation or mirroing.
8517 		 */
8518 		bundle->flip_addrs[planes_count].flip_immediate =
8519 			crtc->state->async_flip &&
8520 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8521 
8522 		timestamp_ns = ktime_get_ns();
8523 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8524 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8525 		bundle->surface_updates[planes_count].surface = dc_plane;
8526 
8527 		if (!bundle->surface_updates[planes_count].surface) {
8528 			DRM_ERROR("No surface for CRTC: id=%d\n",
8529 					acrtc_attach->crtc_id);
8530 			continue;
8531 		}
8532 
8533 		if (plane == pcrtc->primary)
8534 			update_freesync_state_on_stream(
8535 				dm,
8536 				acrtc_state,
8537 				acrtc_state->stream,
8538 				dc_plane,
8539 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8540 
8541 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8542 				 __func__,
8543 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8544 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8545 
8546 		planes_count += 1;
8547 
8548 	}
8549 
8550 	if (pflip_present) {
8551 		if (!vrr_active) {
8552 			/* Use old throttling in non-vrr fixed refresh rate mode
8553 			 * to keep flip scheduling based on target vblank counts
8554 			 * working in a backwards compatible way, e.g., for
8555 			 * clients using the GLX_OML_sync_control extension or
8556 			 * DRI3/Present extension with defined target_msc.
8557 			 */
8558 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8559 		}
8560 		else {
8561 			/* For variable refresh rate mode only:
8562 			 * Get vblank of last completed flip to avoid > 1 vrr
8563 			 * flips per video frame by use of throttling, but allow
8564 			 * flip programming anywhere in the possibly large
8565 			 * variable vrr vblank interval for fine-grained flip
8566 			 * timing control and more opportunity to avoid stutter
8567 			 * on late submission of flips.
8568 			 */
8569 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8570 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8571 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8572 		}
8573 
8574 		target_vblank = last_flip_vblank + wait_for_vblank;
8575 
8576 		/*
8577 		 * Wait until we're out of the vertical blank period before the one
8578 		 * targeted by the flip
8579 		 */
8580 		while ((acrtc_attach->enabled &&
8581 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8582 							    0, &vpos, &hpos, NULL,
8583 							    NULL, &pcrtc->hwmode)
8584 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8585 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8586 			(int)(target_vblank -
8587 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8588 			usleep_range(1000, 1100);
8589 		}
8590 
8591 		/**
8592 		 * Prepare the flip event for the pageflip interrupt to handle.
8593 		 *
8594 		 * This only works in the case where we've already turned on the
8595 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8596 		 * from 0 -> n planes we have to skip a hardware generated event
8597 		 * and rely on sending it from software.
8598 		 */
8599 		if (acrtc_attach->base.state->event &&
8600 		    acrtc_state->active_planes > 0) {
8601 			drm_crtc_vblank_get(pcrtc);
8602 
8603 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8604 
8605 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8606 			prepare_flip_isr(acrtc_attach);
8607 
8608 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8609 		}
8610 
8611 		if (acrtc_state->stream) {
8612 			if (acrtc_state->freesync_vrr_info_changed)
8613 				bundle->stream_update.vrr_infopacket =
8614 					&acrtc_state->stream->vrr_infopacket;
8615 		}
8616 	}
8617 
8618 	/* Update the planes if changed or disable if we don't have any. */
8619 	if ((planes_count || acrtc_state->active_planes == 0) &&
8620 		acrtc_state->stream) {
8621 		bundle->stream_update.stream = acrtc_state->stream;
8622 		if (new_pcrtc_state->mode_changed) {
8623 			bundle->stream_update.src = acrtc_state->stream->src;
8624 			bundle->stream_update.dst = acrtc_state->stream->dst;
8625 		}
8626 
8627 		if (new_pcrtc_state->color_mgmt_changed) {
8628 			/*
8629 			 * TODO: This isn't fully correct since we've actually
8630 			 * already modified the stream in place.
8631 			 */
8632 			bundle->stream_update.gamut_remap =
8633 				&acrtc_state->stream->gamut_remap_matrix;
8634 			bundle->stream_update.output_csc_transform =
8635 				&acrtc_state->stream->csc_color_matrix;
8636 			bundle->stream_update.out_transfer_func =
8637 				acrtc_state->stream->out_transfer_func;
8638 		}
8639 
8640 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8641 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8642 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8643 
8644 		/*
8645 		 * If FreeSync state on the stream has changed then we need to
8646 		 * re-adjust the min/max bounds now that DC doesn't handle this
8647 		 * as part of commit.
8648 		 */
8649 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8650 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8651 			dc_stream_adjust_vmin_vmax(
8652 				dm->dc, acrtc_state->stream,
8653 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8654 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8655 		}
8656 		mutex_lock(&dm->dc_lock);
8657 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8658 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8659 			amdgpu_dm_psr_disable(acrtc_state->stream);
8660 
8661 		dc_commit_updates_for_stream(dm->dc,
8662 						     bundle->surface_updates,
8663 						     planes_count,
8664 						     acrtc_state->stream,
8665 						     &bundle->stream_update,
8666 						     dc_state);
8667 
8668 		/**
8669 		 * Enable or disable the interrupts on the backend.
8670 		 *
8671 		 * Most pipes are put into power gating when unused.
8672 		 *
8673 		 * When power gating is enabled on a pipe we lose the
8674 		 * interrupt enablement state when power gating is disabled.
8675 		 *
8676 		 * So we need to update the IRQ control state in hardware
8677 		 * whenever the pipe turns on (since it could be previously
8678 		 * power gated) or off (since some pipes can't be power gated
8679 		 * on some ASICs).
8680 		 */
8681 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8682 			dm_update_pflip_irq_state(drm_to_adev(dev),
8683 						  acrtc_attach);
8684 
8685 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8686 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8687 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8688 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8689 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8690 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8691 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8692 			amdgpu_dm_psr_enable(acrtc_state->stream);
8693 		}
8694 
8695 		mutex_unlock(&dm->dc_lock);
8696 	}
8697 
8698 	/*
8699 	 * Update cursor state *after* programming all the planes.
8700 	 * This avoids redundant programming in the case where we're going
8701 	 * to be disabling a single plane - those pipes are being disabled.
8702 	 */
8703 	if (acrtc_state->active_planes)
8704 		amdgpu_dm_commit_cursors(state);
8705 
8706 cleanup:
8707 	kfree(bundle);
8708 }
8709 
8710 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8711 				   struct drm_atomic_state *state)
8712 {
8713 	struct amdgpu_device *adev = drm_to_adev(dev);
8714 	struct amdgpu_dm_connector *aconnector;
8715 	struct drm_connector *connector;
8716 	struct drm_connector_state *old_con_state, *new_con_state;
8717 	struct drm_crtc_state *new_crtc_state;
8718 	struct dm_crtc_state *new_dm_crtc_state;
8719 	const struct dc_stream_status *status;
8720 	int i, inst;
8721 
8722 	/* Notify device removals. */
8723 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8724 		if (old_con_state->crtc != new_con_state->crtc) {
8725 			/* CRTC changes require notification. */
8726 			goto notify;
8727 		}
8728 
8729 		if (!new_con_state->crtc)
8730 			continue;
8731 
8732 		new_crtc_state = drm_atomic_get_new_crtc_state(
8733 			state, new_con_state->crtc);
8734 
8735 		if (!new_crtc_state)
8736 			continue;
8737 
8738 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8739 			continue;
8740 
8741 	notify:
8742 		aconnector = to_amdgpu_dm_connector(connector);
8743 
8744 		mutex_lock(&adev->dm.audio_lock);
8745 		inst = aconnector->audio_inst;
8746 		aconnector->audio_inst = -1;
8747 		mutex_unlock(&adev->dm.audio_lock);
8748 
8749 		amdgpu_dm_audio_eld_notify(adev, inst);
8750 	}
8751 
8752 	/* Notify audio device additions. */
8753 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8754 		if (!new_con_state->crtc)
8755 			continue;
8756 
8757 		new_crtc_state = drm_atomic_get_new_crtc_state(
8758 			state, new_con_state->crtc);
8759 
8760 		if (!new_crtc_state)
8761 			continue;
8762 
8763 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8764 			continue;
8765 
8766 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8767 		if (!new_dm_crtc_state->stream)
8768 			continue;
8769 
8770 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8771 		if (!status)
8772 			continue;
8773 
8774 		aconnector = to_amdgpu_dm_connector(connector);
8775 
8776 		mutex_lock(&adev->dm.audio_lock);
8777 		inst = status->audio_inst;
8778 		aconnector->audio_inst = inst;
8779 		mutex_unlock(&adev->dm.audio_lock);
8780 
8781 		amdgpu_dm_audio_eld_notify(adev, inst);
8782 	}
8783 }
8784 
8785 /*
8786  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8787  * @crtc_state: the DRM CRTC state
8788  * @stream_state: the DC stream state.
8789  *
8790  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8791  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8792  */
8793 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8794 						struct dc_stream_state *stream_state)
8795 {
8796 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8797 }
8798 
8799 /**
8800  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8801  * @state: The atomic state to commit
8802  *
8803  * This will tell DC to commit the constructed DC state from atomic_check,
8804  * programming the hardware. Any failures here implies a hardware failure, since
8805  * atomic check should have filtered anything non-kosher.
8806  */
8807 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8808 {
8809 	struct drm_device *dev = state->dev;
8810 	struct amdgpu_device *adev = drm_to_adev(dev);
8811 	struct amdgpu_display_manager *dm = &adev->dm;
8812 	struct dm_atomic_state *dm_state;
8813 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8814 	uint32_t i, j;
8815 	struct drm_crtc *crtc;
8816 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8817 	unsigned long flags;
8818 	bool wait_for_vblank = true;
8819 	struct drm_connector *connector;
8820 	struct drm_connector_state *old_con_state, *new_con_state;
8821 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8822 	int crtc_disable_count = 0;
8823 	bool mode_set_reset_required = false;
8824 
8825 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8826 
8827 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8828 
8829 	dm_state = dm_atomic_get_new_state(state);
8830 	if (dm_state && dm_state->context) {
8831 		dc_state = dm_state->context;
8832 	} else {
8833 		/* No state changes, retain current state. */
8834 		dc_state_temp = dc_create_state(dm->dc);
8835 		ASSERT(dc_state_temp);
8836 		dc_state = dc_state_temp;
8837 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8838 	}
8839 
8840 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8841 				       new_crtc_state, i) {
8842 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8843 
8844 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8845 
8846 		if (old_crtc_state->active &&
8847 		    (!new_crtc_state->active ||
8848 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8849 			manage_dm_interrupts(adev, acrtc, false);
8850 			dc_stream_release(dm_old_crtc_state->stream);
8851 		}
8852 	}
8853 
8854 	drm_atomic_helper_calc_timestamping_constants(state);
8855 
8856 	/* update changed items */
8857 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8858 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8859 
8860 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8861 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8862 
8863 		DRM_DEBUG_ATOMIC(
8864 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8865 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8866 			"connectors_changed:%d\n",
8867 			acrtc->crtc_id,
8868 			new_crtc_state->enable,
8869 			new_crtc_state->active,
8870 			new_crtc_state->planes_changed,
8871 			new_crtc_state->mode_changed,
8872 			new_crtc_state->active_changed,
8873 			new_crtc_state->connectors_changed);
8874 
8875 		/* Disable cursor if disabling crtc */
8876 		if (old_crtc_state->active && !new_crtc_state->active) {
8877 			struct dc_cursor_position position;
8878 
8879 			memset(&position, 0, sizeof(position));
8880 			mutex_lock(&dm->dc_lock);
8881 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8882 			mutex_unlock(&dm->dc_lock);
8883 		}
8884 
8885 		/* Copy all transient state flags into dc state */
8886 		if (dm_new_crtc_state->stream) {
8887 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8888 							    dm_new_crtc_state->stream);
8889 		}
8890 
8891 		/* handles headless hotplug case, updating new_state and
8892 		 * aconnector as needed
8893 		 */
8894 
8895 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8896 
8897 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8898 
8899 			if (!dm_new_crtc_state->stream) {
8900 				/*
8901 				 * this could happen because of issues with
8902 				 * userspace notifications delivery.
8903 				 * In this case userspace tries to set mode on
8904 				 * display which is disconnected in fact.
8905 				 * dc_sink is NULL in this case on aconnector.
8906 				 * We expect reset mode will come soon.
8907 				 *
8908 				 * This can also happen when unplug is done
8909 				 * during resume sequence ended
8910 				 *
8911 				 * In this case, we want to pretend we still
8912 				 * have a sink to keep the pipe running so that
8913 				 * hw state is consistent with the sw state
8914 				 */
8915 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8916 						__func__, acrtc->base.base.id);
8917 				continue;
8918 			}
8919 
8920 			if (dm_old_crtc_state->stream)
8921 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8922 
8923 			pm_runtime_get_noresume(dev->dev);
8924 
8925 			acrtc->enabled = true;
8926 			acrtc->hw_mode = new_crtc_state->mode;
8927 			crtc->hwmode = new_crtc_state->mode;
8928 			mode_set_reset_required = true;
8929 		} else if (modereset_required(new_crtc_state)) {
8930 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8931 			/* i.e. reset mode */
8932 			if (dm_old_crtc_state->stream)
8933 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8934 
8935 			mode_set_reset_required = true;
8936 		}
8937 	} /* for_each_crtc_in_state() */
8938 
8939 	if (dc_state) {
8940 		/* if there mode set or reset, disable eDP PSR */
8941 		if (mode_set_reset_required)
8942 			amdgpu_dm_psr_disable_all(dm);
8943 
8944 		dm_enable_per_frame_crtc_master_sync(dc_state);
8945 		mutex_lock(&dm->dc_lock);
8946 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8947 #if defined(CONFIG_DRM_AMD_DC_DCN)
8948                /* Allow idle optimization when vblank count is 0 for display off */
8949                if (dm->active_vblank_irq_count == 0)
8950                    dc_allow_idle_optimizations(dm->dc,true);
8951 #endif
8952 		mutex_unlock(&dm->dc_lock);
8953 	}
8954 
8955 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8956 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8957 
8958 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8959 
8960 		if (dm_new_crtc_state->stream != NULL) {
8961 			const struct dc_stream_status *status =
8962 					dc_stream_get_status(dm_new_crtc_state->stream);
8963 
8964 			if (!status)
8965 				status = dc_stream_get_status_from_state(dc_state,
8966 									 dm_new_crtc_state->stream);
8967 			if (!status)
8968 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8969 			else
8970 				acrtc->otg_inst = status->primary_otg_inst;
8971 		}
8972 	}
8973 #ifdef CONFIG_DRM_AMD_DC_HDCP
8974 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8975 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8976 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8977 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8978 
8979 		new_crtc_state = NULL;
8980 
8981 		if (acrtc)
8982 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8983 
8984 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8985 
8986 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8987 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8988 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8989 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8990 			dm_new_con_state->update_hdcp = true;
8991 			continue;
8992 		}
8993 
8994 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8995 			hdcp_update_display(
8996 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8997 				new_con_state->hdcp_content_type,
8998 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8999 	}
9000 #endif
9001 
9002 	/* Handle connector state changes */
9003 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9004 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9005 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9006 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9007 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9008 		struct dc_stream_update stream_update;
9009 		struct dc_info_packet hdr_packet;
9010 		struct dc_stream_status *status = NULL;
9011 		bool abm_changed, hdr_changed, scaling_changed;
9012 
9013 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9014 		memset(&stream_update, 0, sizeof(stream_update));
9015 
9016 		if (acrtc) {
9017 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9018 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9019 		}
9020 
9021 		/* Skip any modesets/resets */
9022 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9023 			continue;
9024 
9025 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9026 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9027 
9028 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9029 							     dm_old_con_state);
9030 
9031 		abm_changed = dm_new_crtc_state->abm_level !=
9032 			      dm_old_crtc_state->abm_level;
9033 
9034 		hdr_changed =
9035 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9036 
9037 		if (!scaling_changed && !abm_changed && !hdr_changed)
9038 			continue;
9039 
9040 		stream_update.stream = dm_new_crtc_state->stream;
9041 		if (scaling_changed) {
9042 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9043 					dm_new_con_state, dm_new_crtc_state->stream);
9044 
9045 			stream_update.src = dm_new_crtc_state->stream->src;
9046 			stream_update.dst = dm_new_crtc_state->stream->dst;
9047 		}
9048 
9049 		if (abm_changed) {
9050 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9051 
9052 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9053 		}
9054 
9055 		if (hdr_changed) {
9056 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9057 			stream_update.hdr_static_metadata = &hdr_packet;
9058 		}
9059 
9060 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9061 
9062 		if (WARN_ON(!status))
9063 			continue;
9064 
9065 		WARN_ON(!status->plane_count);
9066 
9067 		/*
9068 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9069 		 * Here we create an empty update on each plane.
9070 		 * To fix this, DC should permit updating only stream properties.
9071 		 */
9072 		for (j = 0; j < status->plane_count; j++)
9073 			dummy_updates[j].surface = status->plane_states[0];
9074 
9075 
9076 		mutex_lock(&dm->dc_lock);
9077 		dc_commit_updates_for_stream(dm->dc,
9078 						     dummy_updates,
9079 						     status->plane_count,
9080 						     dm_new_crtc_state->stream,
9081 						     &stream_update,
9082 						     dc_state);
9083 		mutex_unlock(&dm->dc_lock);
9084 	}
9085 
9086 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9087 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9088 				      new_crtc_state, i) {
9089 		if (old_crtc_state->active && !new_crtc_state->active)
9090 			crtc_disable_count++;
9091 
9092 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9093 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9094 
9095 		/* For freesync config update on crtc state and params for irq */
9096 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9097 
9098 		/* Handle vrr on->off / off->on transitions */
9099 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9100 						dm_new_crtc_state);
9101 	}
9102 
9103 	/**
9104 	 * Enable interrupts for CRTCs that are newly enabled or went through
9105 	 * a modeset. It was intentionally deferred until after the front end
9106 	 * state was modified to wait until the OTG was on and so the IRQ
9107 	 * handlers didn't access stale or invalid state.
9108 	 */
9109 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9110 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9111 #ifdef CONFIG_DEBUG_FS
9112 		bool configure_crc = false;
9113 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9114 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9115 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9116 #endif
9117 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9118 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9119 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9120 #endif
9121 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9122 
9123 		if (new_crtc_state->active &&
9124 		    (!old_crtc_state->active ||
9125 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9126 			dc_stream_retain(dm_new_crtc_state->stream);
9127 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9128 			manage_dm_interrupts(adev, acrtc, true);
9129 
9130 #ifdef CONFIG_DEBUG_FS
9131 			/**
9132 			 * Frontend may have changed so reapply the CRC capture
9133 			 * settings for the stream.
9134 			 */
9135 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9136 
9137 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9138 				configure_crc = true;
9139 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9140 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9141 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9142 					acrtc->dm_irq_params.crc_window.update_win = true;
9143 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9144 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9145 					crc_rd_wrk->crtc = crtc;
9146 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9147 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9148 				}
9149 #endif
9150 			}
9151 
9152 			if (configure_crc)
9153 				if (amdgpu_dm_crtc_configure_crc_source(
9154 					crtc, dm_new_crtc_state, cur_crc_src))
9155 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9156 #endif
9157 		}
9158 	}
9159 
9160 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9161 		if (new_crtc_state->async_flip)
9162 			wait_for_vblank = false;
9163 
9164 	/* update planes when needed per crtc*/
9165 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9166 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9167 
9168 		if (dm_new_crtc_state->stream)
9169 			amdgpu_dm_commit_planes(state, dc_state, dev,
9170 						dm, crtc, wait_for_vblank);
9171 	}
9172 
9173 	/* Update audio instances for each connector. */
9174 	amdgpu_dm_commit_audio(dev, state);
9175 
9176 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9177 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9178 	/* restore the backlight level */
9179 	if (dm->backlight_dev)
9180 		amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
9181 #endif
9182 	/*
9183 	 * send vblank event on all events not handled in flip and
9184 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9185 	 */
9186 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9187 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9188 
9189 		if (new_crtc_state->event)
9190 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9191 
9192 		new_crtc_state->event = NULL;
9193 	}
9194 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9195 
9196 	/* Signal HW programming completion */
9197 	drm_atomic_helper_commit_hw_done(state);
9198 
9199 	if (wait_for_vblank)
9200 		drm_atomic_helper_wait_for_flip_done(dev, state);
9201 
9202 	drm_atomic_helper_cleanup_planes(dev, state);
9203 
9204 	/* return the stolen vga memory back to VRAM */
9205 	if (!adev->mman.keep_stolen_vga_memory)
9206 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9207 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9208 
9209 	/*
9210 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9211 	 * so we can put the GPU into runtime suspend if we're not driving any
9212 	 * displays anymore
9213 	 */
9214 	for (i = 0; i < crtc_disable_count; i++)
9215 		pm_runtime_put_autosuspend(dev->dev);
9216 	pm_runtime_mark_last_busy(dev->dev);
9217 
9218 	if (dc_state_temp)
9219 		dc_release_state(dc_state_temp);
9220 }
9221 
9222 
9223 static int dm_force_atomic_commit(struct drm_connector *connector)
9224 {
9225 	int ret = 0;
9226 	struct drm_device *ddev = connector->dev;
9227 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9228 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9229 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9230 	struct drm_connector_state *conn_state;
9231 	struct drm_crtc_state *crtc_state;
9232 	struct drm_plane_state *plane_state;
9233 
9234 	if (!state)
9235 		return -ENOMEM;
9236 
9237 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9238 
9239 	/* Construct an atomic state to restore previous display setting */
9240 
9241 	/*
9242 	 * Attach connectors to drm_atomic_state
9243 	 */
9244 	conn_state = drm_atomic_get_connector_state(state, connector);
9245 
9246 	ret = PTR_ERR_OR_ZERO(conn_state);
9247 	if (ret)
9248 		goto out;
9249 
9250 	/* Attach crtc to drm_atomic_state*/
9251 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9252 
9253 	ret = PTR_ERR_OR_ZERO(crtc_state);
9254 	if (ret)
9255 		goto out;
9256 
9257 	/* force a restore */
9258 	crtc_state->mode_changed = true;
9259 
9260 	/* Attach plane to drm_atomic_state */
9261 	plane_state = drm_atomic_get_plane_state(state, plane);
9262 
9263 	ret = PTR_ERR_OR_ZERO(plane_state);
9264 	if (ret)
9265 		goto out;
9266 
9267 	/* Call commit internally with the state we just constructed */
9268 	ret = drm_atomic_commit(state);
9269 
9270 out:
9271 	drm_atomic_state_put(state);
9272 	if (ret)
9273 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9274 
9275 	return ret;
9276 }
9277 
9278 /*
9279  * This function handles all cases when set mode does not come upon hotplug.
9280  * This includes when a display is unplugged then plugged back into the
9281  * same port and when running without usermode desktop manager supprot
9282  */
9283 void dm_restore_drm_connector_state(struct drm_device *dev,
9284 				    struct drm_connector *connector)
9285 {
9286 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9287 	struct amdgpu_crtc *disconnected_acrtc;
9288 	struct dm_crtc_state *acrtc_state;
9289 
9290 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9291 		return;
9292 
9293 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9294 	if (!disconnected_acrtc)
9295 		return;
9296 
9297 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9298 	if (!acrtc_state->stream)
9299 		return;
9300 
9301 	/*
9302 	 * If the previous sink is not released and different from the current,
9303 	 * we deduce we are in a state where we can not rely on usermode call
9304 	 * to turn on the display, so we do it here
9305 	 */
9306 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9307 		dm_force_atomic_commit(&aconnector->base);
9308 }
9309 
9310 /*
9311  * Grabs all modesetting locks to serialize against any blocking commits,
9312  * Waits for completion of all non blocking commits.
9313  */
9314 static int do_aquire_global_lock(struct drm_device *dev,
9315 				 struct drm_atomic_state *state)
9316 {
9317 	struct drm_crtc *crtc;
9318 	struct drm_crtc_commit *commit;
9319 	long ret;
9320 
9321 	/*
9322 	 * Adding all modeset locks to aquire_ctx will
9323 	 * ensure that when the framework release it the
9324 	 * extra locks we are locking here will get released to
9325 	 */
9326 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9327 	if (ret)
9328 		return ret;
9329 
9330 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9331 		spin_lock(&crtc->commit_lock);
9332 		commit = list_first_entry_or_null(&crtc->commit_list,
9333 				struct drm_crtc_commit, commit_entry);
9334 		if (commit)
9335 			drm_crtc_commit_get(commit);
9336 		spin_unlock(&crtc->commit_lock);
9337 
9338 		if (!commit)
9339 			continue;
9340 
9341 		/*
9342 		 * Make sure all pending HW programming completed and
9343 		 * page flips done
9344 		 */
9345 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9346 
9347 		if (ret > 0)
9348 			ret = wait_for_completion_interruptible_timeout(
9349 					&commit->flip_done, 10*HZ);
9350 
9351 		if (ret == 0)
9352 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9353 				  "timed out\n", crtc->base.id, crtc->name);
9354 
9355 		drm_crtc_commit_put(commit);
9356 	}
9357 
9358 	return ret < 0 ? ret : 0;
9359 }
9360 
9361 static void get_freesync_config_for_crtc(
9362 	struct dm_crtc_state *new_crtc_state,
9363 	struct dm_connector_state *new_con_state)
9364 {
9365 	struct mod_freesync_config config = {0};
9366 	struct amdgpu_dm_connector *aconnector =
9367 			to_amdgpu_dm_connector(new_con_state->base.connector);
9368 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9369 	int vrefresh = drm_mode_vrefresh(mode);
9370 	bool fs_vid_mode = false;
9371 
9372 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9373 					vrefresh >= aconnector->min_vfreq &&
9374 					vrefresh <= aconnector->max_vfreq;
9375 
9376 	if (new_crtc_state->vrr_supported) {
9377 		new_crtc_state->stream->ignore_msa_timing_param = true;
9378 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9379 
9380 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9381 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9382 		config.vsif_supported = true;
9383 		config.btr = true;
9384 
9385 		if (fs_vid_mode) {
9386 			config.state = VRR_STATE_ACTIVE_FIXED;
9387 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9388 			goto out;
9389 		} else if (new_crtc_state->base.vrr_enabled) {
9390 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9391 		} else {
9392 			config.state = VRR_STATE_INACTIVE;
9393 		}
9394 	}
9395 out:
9396 	new_crtc_state->freesync_config = config;
9397 }
9398 
9399 static void reset_freesync_config_for_crtc(
9400 	struct dm_crtc_state *new_crtc_state)
9401 {
9402 	new_crtc_state->vrr_supported = false;
9403 
9404 	memset(&new_crtc_state->vrr_infopacket, 0,
9405 	       sizeof(new_crtc_state->vrr_infopacket));
9406 }
9407 
9408 static bool
9409 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9410 				 struct drm_crtc_state *new_crtc_state)
9411 {
9412 	struct drm_display_mode old_mode, new_mode;
9413 
9414 	if (!old_crtc_state || !new_crtc_state)
9415 		return false;
9416 
9417 	old_mode = old_crtc_state->mode;
9418 	new_mode = new_crtc_state->mode;
9419 
9420 	if (old_mode.clock       == new_mode.clock &&
9421 	    old_mode.hdisplay    == new_mode.hdisplay &&
9422 	    old_mode.vdisplay    == new_mode.vdisplay &&
9423 	    old_mode.htotal      == new_mode.htotal &&
9424 	    old_mode.vtotal      != new_mode.vtotal &&
9425 	    old_mode.hsync_start == new_mode.hsync_start &&
9426 	    old_mode.vsync_start != new_mode.vsync_start &&
9427 	    old_mode.hsync_end   == new_mode.hsync_end &&
9428 	    old_mode.vsync_end   != new_mode.vsync_end &&
9429 	    old_mode.hskew       == new_mode.hskew &&
9430 	    old_mode.vscan       == new_mode.vscan &&
9431 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9432 	    (new_mode.vsync_end - new_mode.vsync_start))
9433 		return true;
9434 
9435 	return false;
9436 }
9437 
9438 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9439 	uint64_t num, den, res;
9440 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9441 
9442 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9443 
9444 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9445 	den = (unsigned long long)new_crtc_state->mode.htotal *
9446 	      (unsigned long long)new_crtc_state->mode.vtotal;
9447 
9448 	res = div_u64(num, den);
9449 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9450 }
9451 
9452 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9453 				struct drm_atomic_state *state,
9454 				struct drm_crtc *crtc,
9455 				struct drm_crtc_state *old_crtc_state,
9456 				struct drm_crtc_state *new_crtc_state,
9457 				bool enable,
9458 				bool *lock_and_validation_needed)
9459 {
9460 	struct dm_atomic_state *dm_state = NULL;
9461 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9462 	struct dc_stream_state *new_stream;
9463 	int ret = 0;
9464 
9465 	/*
9466 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9467 	 * update changed items
9468 	 */
9469 	struct amdgpu_crtc *acrtc = NULL;
9470 	struct amdgpu_dm_connector *aconnector = NULL;
9471 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9472 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9473 
9474 	new_stream = NULL;
9475 
9476 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9477 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9478 	acrtc = to_amdgpu_crtc(crtc);
9479 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9480 
9481 	/* TODO This hack should go away */
9482 	if (aconnector && enable) {
9483 		/* Make sure fake sink is created in plug-in scenario */
9484 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9485 							    &aconnector->base);
9486 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9487 							    &aconnector->base);
9488 
9489 		if (IS_ERR(drm_new_conn_state)) {
9490 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9491 			goto fail;
9492 		}
9493 
9494 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9495 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9496 
9497 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9498 			goto skip_modeset;
9499 
9500 		new_stream = create_validate_stream_for_sink(aconnector,
9501 							     &new_crtc_state->mode,
9502 							     dm_new_conn_state,
9503 							     dm_old_crtc_state->stream);
9504 
9505 		/*
9506 		 * we can have no stream on ACTION_SET if a display
9507 		 * was disconnected during S3, in this case it is not an
9508 		 * error, the OS will be updated after detection, and
9509 		 * will do the right thing on next atomic commit
9510 		 */
9511 
9512 		if (!new_stream) {
9513 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9514 					__func__, acrtc->base.base.id);
9515 			ret = -ENOMEM;
9516 			goto fail;
9517 		}
9518 
9519 		/*
9520 		 * TODO: Check VSDB bits to decide whether this should
9521 		 * be enabled or not.
9522 		 */
9523 		new_stream->triggered_crtc_reset.enabled =
9524 			dm->force_timing_sync;
9525 
9526 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9527 
9528 		ret = fill_hdr_info_packet(drm_new_conn_state,
9529 					   &new_stream->hdr_static_metadata);
9530 		if (ret)
9531 			goto fail;
9532 
9533 		/*
9534 		 * If we already removed the old stream from the context
9535 		 * (and set the new stream to NULL) then we can't reuse
9536 		 * the old stream even if the stream and scaling are unchanged.
9537 		 * We'll hit the BUG_ON and black screen.
9538 		 *
9539 		 * TODO: Refactor this function to allow this check to work
9540 		 * in all conditions.
9541 		 */
9542 		if (amdgpu_freesync_vid_mode &&
9543 		    dm_new_crtc_state->stream &&
9544 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9545 			goto skip_modeset;
9546 
9547 		if (dm_new_crtc_state->stream &&
9548 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9549 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9550 			new_crtc_state->mode_changed = false;
9551 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9552 					 new_crtc_state->mode_changed);
9553 		}
9554 	}
9555 
9556 	/* mode_changed flag may get updated above, need to check again */
9557 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9558 		goto skip_modeset;
9559 
9560 	DRM_DEBUG_ATOMIC(
9561 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9562 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9563 		"connectors_changed:%d\n",
9564 		acrtc->crtc_id,
9565 		new_crtc_state->enable,
9566 		new_crtc_state->active,
9567 		new_crtc_state->planes_changed,
9568 		new_crtc_state->mode_changed,
9569 		new_crtc_state->active_changed,
9570 		new_crtc_state->connectors_changed);
9571 
9572 	/* Remove stream for any changed/disabled CRTC */
9573 	if (!enable) {
9574 
9575 		if (!dm_old_crtc_state->stream)
9576 			goto skip_modeset;
9577 
9578 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9579 		    is_timing_unchanged_for_freesync(new_crtc_state,
9580 						     old_crtc_state)) {
9581 			new_crtc_state->mode_changed = false;
9582 			DRM_DEBUG_DRIVER(
9583 				"Mode change not required for front porch change, "
9584 				"setting mode_changed to %d",
9585 				new_crtc_state->mode_changed);
9586 
9587 			set_freesync_fixed_config(dm_new_crtc_state);
9588 
9589 			goto skip_modeset;
9590 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9591 			   is_freesync_video_mode(&new_crtc_state->mode,
9592 						  aconnector)) {
9593 			set_freesync_fixed_config(dm_new_crtc_state);
9594 		}
9595 
9596 		ret = dm_atomic_get_state(state, &dm_state);
9597 		if (ret)
9598 			goto fail;
9599 
9600 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9601 				crtc->base.id);
9602 
9603 		/* i.e. reset mode */
9604 		if (dc_remove_stream_from_ctx(
9605 				dm->dc,
9606 				dm_state->context,
9607 				dm_old_crtc_state->stream) != DC_OK) {
9608 			ret = -EINVAL;
9609 			goto fail;
9610 		}
9611 
9612 		dc_stream_release(dm_old_crtc_state->stream);
9613 		dm_new_crtc_state->stream = NULL;
9614 
9615 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9616 
9617 		*lock_and_validation_needed = true;
9618 
9619 	} else {/* Add stream for any updated/enabled CRTC */
9620 		/*
9621 		 * Quick fix to prevent NULL pointer on new_stream when
9622 		 * added MST connectors not found in existing crtc_state in the chained mode
9623 		 * TODO: need to dig out the root cause of that
9624 		 */
9625 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9626 			goto skip_modeset;
9627 
9628 		if (modereset_required(new_crtc_state))
9629 			goto skip_modeset;
9630 
9631 		if (modeset_required(new_crtc_state, new_stream,
9632 				     dm_old_crtc_state->stream)) {
9633 
9634 			WARN_ON(dm_new_crtc_state->stream);
9635 
9636 			ret = dm_atomic_get_state(state, &dm_state);
9637 			if (ret)
9638 				goto fail;
9639 
9640 			dm_new_crtc_state->stream = new_stream;
9641 
9642 			dc_stream_retain(new_stream);
9643 
9644 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9645 					 crtc->base.id);
9646 
9647 			if (dc_add_stream_to_ctx(
9648 					dm->dc,
9649 					dm_state->context,
9650 					dm_new_crtc_state->stream) != DC_OK) {
9651 				ret = -EINVAL;
9652 				goto fail;
9653 			}
9654 
9655 			*lock_and_validation_needed = true;
9656 		}
9657 	}
9658 
9659 skip_modeset:
9660 	/* Release extra reference */
9661 	if (new_stream)
9662 		 dc_stream_release(new_stream);
9663 
9664 	/*
9665 	 * We want to do dc stream updates that do not require a
9666 	 * full modeset below.
9667 	 */
9668 	if (!(enable && aconnector && new_crtc_state->active))
9669 		return 0;
9670 	/*
9671 	 * Given above conditions, the dc state cannot be NULL because:
9672 	 * 1. We're in the process of enabling CRTCs (just been added
9673 	 *    to the dc context, or already is on the context)
9674 	 * 2. Has a valid connector attached, and
9675 	 * 3. Is currently active and enabled.
9676 	 * => The dc stream state currently exists.
9677 	 */
9678 	BUG_ON(dm_new_crtc_state->stream == NULL);
9679 
9680 	/* Scaling or underscan settings */
9681 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9682 		update_stream_scaling_settings(
9683 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9684 
9685 	/* ABM settings */
9686 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9687 
9688 	/*
9689 	 * Color management settings. We also update color properties
9690 	 * when a modeset is needed, to ensure it gets reprogrammed.
9691 	 */
9692 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9693 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9694 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9695 		if (ret)
9696 			goto fail;
9697 	}
9698 
9699 	/* Update Freesync settings. */
9700 	get_freesync_config_for_crtc(dm_new_crtc_state,
9701 				     dm_new_conn_state);
9702 
9703 	return ret;
9704 
9705 fail:
9706 	if (new_stream)
9707 		dc_stream_release(new_stream);
9708 	return ret;
9709 }
9710 
9711 static bool should_reset_plane(struct drm_atomic_state *state,
9712 			       struct drm_plane *plane,
9713 			       struct drm_plane_state *old_plane_state,
9714 			       struct drm_plane_state *new_plane_state)
9715 {
9716 	struct drm_plane *other;
9717 	struct drm_plane_state *old_other_state, *new_other_state;
9718 	struct drm_crtc_state *new_crtc_state;
9719 	int i;
9720 
9721 	/*
9722 	 * TODO: Remove this hack once the checks below are sufficient
9723 	 * enough to determine when we need to reset all the planes on
9724 	 * the stream.
9725 	 */
9726 	if (state->allow_modeset)
9727 		return true;
9728 
9729 	/* Exit early if we know that we're adding or removing the plane. */
9730 	if (old_plane_state->crtc != new_plane_state->crtc)
9731 		return true;
9732 
9733 	/* old crtc == new_crtc == NULL, plane not in context. */
9734 	if (!new_plane_state->crtc)
9735 		return false;
9736 
9737 	new_crtc_state =
9738 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9739 
9740 	if (!new_crtc_state)
9741 		return true;
9742 
9743 	/* CRTC Degamma changes currently require us to recreate planes. */
9744 	if (new_crtc_state->color_mgmt_changed)
9745 		return true;
9746 
9747 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9748 		return true;
9749 
9750 	/*
9751 	 * If there are any new primary or overlay planes being added or
9752 	 * removed then the z-order can potentially change. To ensure
9753 	 * correct z-order and pipe acquisition the current DC architecture
9754 	 * requires us to remove and recreate all existing planes.
9755 	 *
9756 	 * TODO: Come up with a more elegant solution for this.
9757 	 */
9758 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9759 		struct amdgpu_framebuffer *old_afb, *new_afb;
9760 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9761 			continue;
9762 
9763 		if (old_other_state->crtc != new_plane_state->crtc &&
9764 		    new_other_state->crtc != new_plane_state->crtc)
9765 			continue;
9766 
9767 		if (old_other_state->crtc != new_other_state->crtc)
9768 			return true;
9769 
9770 		/* Src/dst size and scaling updates. */
9771 		if (old_other_state->src_w != new_other_state->src_w ||
9772 		    old_other_state->src_h != new_other_state->src_h ||
9773 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9774 		    old_other_state->crtc_h != new_other_state->crtc_h)
9775 			return true;
9776 
9777 		/* Rotation / mirroring updates. */
9778 		if (old_other_state->rotation != new_other_state->rotation)
9779 			return true;
9780 
9781 		/* Blending updates. */
9782 		if (old_other_state->pixel_blend_mode !=
9783 		    new_other_state->pixel_blend_mode)
9784 			return true;
9785 
9786 		/* Alpha updates. */
9787 		if (old_other_state->alpha != new_other_state->alpha)
9788 			return true;
9789 
9790 		/* Colorspace changes. */
9791 		if (old_other_state->color_range != new_other_state->color_range ||
9792 		    old_other_state->color_encoding != new_other_state->color_encoding)
9793 			return true;
9794 
9795 		/* Framebuffer checks fall at the end. */
9796 		if (!old_other_state->fb || !new_other_state->fb)
9797 			continue;
9798 
9799 		/* Pixel format changes can require bandwidth updates. */
9800 		if (old_other_state->fb->format != new_other_state->fb->format)
9801 			return true;
9802 
9803 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9804 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9805 
9806 		/* Tiling and DCC changes also require bandwidth updates. */
9807 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9808 		    old_afb->base.modifier != new_afb->base.modifier)
9809 			return true;
9810 	}
9811 
9812 	return false;
9813 }
9814 
9815 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9816 			      struct drm_plane_state *new_plane_state,
9817 			      struct drm_framebuffer *fb)
9818 {
9819 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9820 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9821 	unsigned int pitch;
9822 	bool linear;
9823 
9824 	if (fb->width > new_acrtc->max_cursor_width ||
9825 	    fb->height > new_acrtc->max_cursor_height) {
9826 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9827 				 new_plane_state->fb->width,
9828 				 new_plane_state->fb->height);
9829 		return -EINVAL;
9830 	}
9831 	if (new_plane_state->src_w != fb->width << 16 ||
9832 	    new_plane_state->src_h != fb->height << 16) {
9833 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9834 		return -EINVAL;
9835 	}
9836 
9837 	/* Pitch in pixels */
9838 	pitch = fb->pitches[0] / fb->format->cpp[0];
9839 
9840 	if (fb->width != pitch) {
9841 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9842 				 fb->width, pitch);
9843 		return -EINVAL;
9844 	}
9845 
9846 	switch (pitch) {
9847 	case 64:
9848 	case 128:
9849 	case 256:
9850 		/* FB pitch is supported by cursor plane */
9851 		break;
9852 	default:
9853 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9854 		return -EINVAL;
9855 	}
9856 
9857 	/* Core DRM takes care of checking FB modifiers, so we only need to
9858 	 * check tiling flags when the FB doesn't have a modifier. */
9859 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9860 		if (adev->family < AMDGPU_FAMILY_AI) {
9861 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9862 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9863 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9864 		} else {
9865 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9866 		}
9867 		if (!linear) {
9868 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9869 			return -EINVAL;
9870 		}
9871 	}
9872 
9873 	return 0;
9874 }
9875 
9876 static int dm_update_plane_state(struct dc *dc,
9877 				 struct drm_atomic_state *state,
9878 				 struct drm_plane *plane,
9879 				 struct drm_plane_state *old_plane_state,
9880 				 struct drm_plane_state *new_plane_state,
9881 				 bool enable,
9882 				 bool *lock_and_validation_needed)
9883 {
9884 
9885 	struct dm_atomic_state *dm_state = NULL;
9886 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9887 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9888 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9889 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9890 	struct amdgpu_crtc *new_acrtc;
9891 	bool needs_reset;
9892 	int ret = 0;
9893 
9894 
9895 	new_plane_crtc = new_plane_state->crtc;
9896 	old_plane_crtc = old_plane_state->crtc;
9897 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9898 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9899 
9900 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9901 		if (!enable || !new_plane_crtc ||
9902 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9903 			return 0;
9904 
9905 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9906 
9907 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9908 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9909 			return -EINVAL;
9910 		}
9911 
9912 		if (new_plane_state->fb) {
9913 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9914 						 new_plane_state->fb);
9915 			if (ret)
9916 				return ret;
9917 		}
9918 
9919 		return 0;
9920 	}
9921 
9922 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9923 					 new_plane_state);
9924 
9925 	/* Remove any changed/removed planes */
9926 	if (!enable) {
9927 		if (!needs_reset)
9928 			return 0;
9929 
9930 		if (!old_plane_crtc)
9931 			return 0;
9932 
9933 		old_crtc_state = drm_atomic_get_old_crtc_state(
9934 				state, old_plane_crtc);
9935 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9936 
9937 		if (!dm_old_crtc_state->stream)
9938 			return 0;
9939 
9940 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9941 				plane->base.id, old_plane_crtc->base.id);
9942 
9943 		ret = dm_atomic_get_state(state, &dm_state);
9944 		if (ret)
9945 			return ret;
9946 
9947 		if (!dc_remove_plane_from_context(
9948 				dc,
9949 				dm_old_crtc_state->stream,
9950 				dm_old_plane_state->dc_state,
9951 				dm_state->context)) {
9952 
9953 			return -EINVAL;
9954 		}
9955 
9956 
9957 		dc_plane_state_release(dm_old_plane_state->dc_state);
9958 		dm_new_plane_state->dc_state = NULL;
9959 
9960 		*lock_and_validation_needed = true;
9961 
9962 	} else { /* Add new planes */
9963 		struct dc_plane_state *dc_new_plane_state;
9964 
9965 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9966 			return 0;
9967 
9968 		if (!new_plane_crtc)
9969 			return 0;
9970 
9971 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9972 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9973 
9974 		if (!dm_new_crtc_state->stream)
9975 			return 0;
9976 
9977 		if (!needs_reset)
9978 			return 0;
9979 
9980 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9981 		if (ret)
9982 			return ret;
9983 
9984 		WARN_ON(dm_new_plane_state->dc_state);
9985 
9986 		dc_new_plane_state = dc_create_plane_state(dc);
9987 		if (!dc_new_plane_state)
9988 			return -ENOMEM;
9989 
9990 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9991 				 plane->base.id, new_plane_crtc->base.id);
9992 
9993 		ret = fill_dc_plane_attributes(
9994 			drm_to_adev(new_plane_crtc->dev),
9995 			dc_new_plane_state,
9996 			new_plane_state,
9997 			new_crtc_state);
9998 		if (ret) {
9999 			dc_plane_state_release(dc_new_plane_state);
10000 			return ret;
10001 		}
10002 
10003 		ret = dm_atomic_get_state(state, &dm_state);
10004 		if (ret) {
10005 			dc_plane_state_release(dc_new_plane_state);
10006 			return ret;
10007 		}
10008 
10009 		/*
10010 		 * Any atomic check errors that occur after this will
10011 		 * not need a release. The plane state will be attached
10012 		 * to the stream, and therefore part of the atomic
10013 		 * state. It'll be released when the atomic state is
10014 		 * cleaned.
10015 		 */
10016 		if (!dc_add_plane_to_context(
10017 				dc,
10018 				dm_new_crtc_state->stream,
10019 				dc_new_plane_state,
10020 				dm_state->context)) {
10021 
10022 			dc_plane_state_release(dc_new_plane_state);
10023 			return -EINVAL;
10024 		}
10025 
10026 		dm_new_plane_state->dc_state = dc_new_plane_state;
10027 
10028 		/* Tell DC to do a full surface update every time there
10029 		 * is a plane change. Inefficient, but works for now.
10030 		 */
10031 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10032 
10033 		*lock_and_validation_needed = true;
10034 	}
10035 
10036 
10037 	return ret;
10038 }
10039 
10040 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10041 				struct drm_crtc *crtc,
10042 				struct drm_crtc_state *new_crtc_state)
10043 {
10044 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10045 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10046 
10047 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10048 	 * cursor per pipe but it's going to inherit the scaling and
10049 	 * positioning from the underlying pipe. Check the cursor plane's
10050 	 * blending properties match the primary plane's. */
10051 
10052 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10053 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10054 	if (!new_cursor_state || !new_primary_state ||
10055 	    !new_cursor_state->fb || !new_primary_state->fb) {
10056 		return 0;
10057 	}
10058 
10059 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10060 			 (new_cursor_state->src_w >> 16);
10061 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10062 			 (new_cursor_state->src_h >> 16);
10063 
10064 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10065 			 (new_primary_state->src_w >> 16);
10066 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10067 			 (new_primary_state->src_h >> 16);
10068 
10069 	if (cursor_scale_w != primary_scale_w ||
10070 	    cursor_scale_h != primary_scale_h) {
10071 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10072 		return -EINVAL;
10073 	}
10074 
10075 	return 0;
10076 }
10077 
10078 #if defined(CONFIG_DRM_AMD_DC_DCN)
10079 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10080 {
10081 	struct drm_connector *connector;
10082 	struct drm_connector_state *conn_state;
10083 	struct amdgpu_dm_connector *aconnector = NULL;
10084 	int i;
10085 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10086 		if (conn_state->crtc != crtc)
10087 			continue;
10088 
10089 		aconnector = to_amdgpu_dm_connector(connector);
10090 		if (!aconnector->port || !aconnector->mst_port)
10091 			aconnector = NULL;
10092 		else
10093 			break;
10094 	}
10095 
10096 	if (!aconnector)
10097 		return 0;
10098 
10099 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10100 }
10101 #endif
10102 
10103 static int validate_overlay(struct drm_atomic_state *state)
10104 {
10105 	int i;
10106 	struct drm_plane *plane;
10107 	struct drm_plane_state *new_plane_state;
10108 	struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
10109 
10110 	/* Check if primary plane is contained inside overlay */
10111 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10112 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10113 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10114 				return 0;
10115 
10116 			overlay_state = new_plane_state;
10117 			continue;
10118 		}
10119 	}
10120 
10121 	/* check if we're making changes to the overlay plane */
10122 	if (!overlay_state)
10123 		return 0;
10124 
10125 	/* check if overlay plane is enabled */
10126 	if (!overlay_state->crtc)
10127 		return 0;
10128 
10129 	/* find the primary plane for the CRTC that the overlay is enabled on */
10130 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10131 	if (IS_ERR(primary_state))
10132 		return PTR_ERR(primary_state);
10133 
10134 	/* check if primary plane is enabled */
10135 	if (!primary_state->crtc)
10136 		return 0;
10137 
10138 	/* check if cursor plane is enabled */
10139 	cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
10140 	if (IS_ERR(cursor_state))
10141 		return PTR_ERR(cursor_state);
10142 
10143 	if (drm_atomic_plane_disabling(plane->state, cursor_state))
10144 		return 0;
10145 
10146 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10147 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10148 	    primary_state->crtc_y < overlay_state->crtc_y ||
10149 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10150 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10151 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10152 		return -EINVAL;
10153 	}
10154 
10155 	return 0;
10156 }
10157 
10158 /**
10159  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10160  * @dev: The DRM device
10161  * @state: The atomic state to commit
10162  *
10163  * Validate that the given atomic state is programmable by DC into hardware.
10164  * This involves constructing a &struct dc_state reflecting the new hardware
10165  * state we wish to commit, then querying DC to see if it is programmable. It's
10166  * important not to modify the existing DC state. Otherwise, atomic_check
10167  * may unexpectedly commit hardware changes.
10168  *
10169  * When validating the DC state, it's important that the right locks are
10170  * acquired. For full updates case which removes/adds/updates streams on one
10171  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10172  * that any such full update commit will wait for completion of any outstanding
10173  * flip using DRMs synchronization events.
10174  *
10175  * Note that DM adds the affected connectors for all CRTCs in state, when that
10176  * might not seem necessary. This is because DC stream creation requires the
10177  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10178  * be possible but non-trivial - a possible TODO item.
10179  *
10180  * Return: -Error code if validation failed.
10181  */
10182 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10183 				  struct drm_atomic_state *state)
10184 {
10185 	struct amdgpu_device *adev = drm_to_adev(dev);
10186 	struct dm_atomic_state *dm_state = NULL;
10187 	struct dc *dc = adev->dm.dc;
10188 	struct drm_connector *connector;
10189 	struct drm_connector_state *old_con_state, *new_con_state;
10190 	struct drm_crtc *crtc;
10191 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10192 	struct drm_plane *plane;
10193 	struct drm_plane_state *old_plane_state, *new_plane_state;
10194 	enum dc_status status;
10195 	int ret, i;
10196 	bool lock_and_validation_needed = false;
10197 	struct dm_crtc_state *dm_old_crtc_state;
10198 
10199 	trace_amdgpu_dm_atomic_check_begin(state);
10200 
10201 	ret = drm_atomic_helper_check_modeset(dev, state);
10202 	if (ret)
10203 		goto fail;
10204 
10205 	/* Check connector changes */
10206 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10207 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10208 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10209 
10210 		/* Skip connectors that are disabled or part of modeset already. */
10211 		if (!old_con_state->crtc && !new_con_state->crtc)
10212 			continue;
10213 
10214 		if (!new_con_state->crtc)
10215 			continue;
10216 
10217 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10218 		if (IS_ERR(new_crtc_state)) {
10219 			ret = PTR_ERR(new_crtc_state);
10220 			goto fail;
10221 		}
10222 
10223 		if (dm_old_con_state->abm_level !=
10224 		    dm_new_con_state->abm_level)
10225 			new_crtc_state->connectors_changed = true;
10226 	}
10227 
10228 #if defined(CONFIG_DRM_AMD_DC_DCN)
10229 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10230 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10231 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10232 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10233 				if (ret)
10234 					goto fail;
10235 			}
10236 		}
10237 	}
10238 #endif
10239 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10240 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10241 
10242 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10243 		    !new_crtc_state->color_mgmt_changed &&
10244 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10245 			dm_old_crtc_state->dsc_force_changed == false)
10246 			continue;
10247 
10248 		if (!new_crtc_state->enable)
10249 			continue;
10250 
10251 		ret = drm_atomic_add_affected_connectors(state, crtc);
10252 		if (ret)
10253 			return ret;
10254 
10255 		ret = drm_atomic_add_affected_planes(state, crtc);
10256 		if (ret)
10257 			goto fail;
10258 
10259 		if (dm_old_crtc_state->dsc_force_changed)
10260 			new_crtc_state->mode_changed = true;
10261 	}
10262 
10263 	/*
10264 	 * Add all primary and overlay planes on the CRTC to the state
10265 	 * whenever a plane is enabled to maintain correct z-ordering
10266 	 * and to enable fast surface updates.
10267 	 */
10268 	drm_for_each_crtc(crtc, dev) {
10269 		bool modified = false;
10270 
10271 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10272 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10273 				continue;
10274 
10275 			if (new_plane_state->crtc == crtc ||
10276 			    old_plane_state->crtc == crtc) {
10277 				modified = true;
10278 				break;
10279 			}
10280 		}
10281 
10282 		if (!modified)
10283 			continue;
10284 
10285 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10286 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10287 				continue;
10288 
10289 			new_plane_state =
10290 				drm_atomic_get_plane_state(state, plane);
10291 
10292 			if (IS_ERR(new_plane_state)) {
10293 				ret = PTR_ERR(new_plane_state);
10294 				goto fail;
10295 			}
10296 		}
10297 	}
10298 
10299 	/* Remove exiting planes if they are modified */
10300 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10301 		ret = dm_update_plane_state(dc, state, plane,
10302 					    old_plane_state,
10303 					    new_plane_state,
10304 					    false,
10305 					    &lock_and_validation_needed);
10306 		if (ret)
10307 			goto fail;
10308 	}
10309 
10310 	/* Disable all crtcs which require disable */
10311 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10312 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10313 					   old_crtc_state,
10314 					   new_crtc_state,
10315 					   false,
10316 					   &lock_and_validation_needed);
10317 		if (ret)
10318 			goto fail;
10319 	}
10320 
10321 	/* Enable all crtcs which require enable */
10322 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10323 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10324 					   old_crtc_state,
10325 					   new_crtc_state,
10326 					   true,
10327 					   &lock_and_validation_needed);
10328 		if (ret)
10329 			goto fail;
10330 	}
10331 
10332 	ret = validate_overlay(state);
10333 	if (ret)
10334 		goto fail;
10335 
10336 	/* Add new/modified planes */
10337 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10338 		ret = dm_update_plane_state(dc, state, plane,
10339 					    old_plane_state,
10340 					    new_plane_state,
10341 					    true,
10342 					    &lock_and_validation_needed);
10343 		if (ret)
10344 			goto fail;
10345 	}
10346 
10347 	/* Run this here since we want to validate the streams we created */
10348 	ret = drm_atomic_helper_check_planes(dev, state);
10349 	if (ret)
10350 		goto fail;
10351 
10352 	/* Check cursor planes scaling */
10353 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10354 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10355 		if (ret)
10356 			goto fail;
10357 	}
10358 
10359 	if (state->legacy_cursor_update) {
10360 		/*
10361 		 * This is a fast cursor update coming from the plane update
10362 		 * helper, check if it can be done asynchronously for better
10363 		 * performance.
10364 		 */
10365 		state->async_update =
10366 			!drm_atomic_helper_async_check(dev, state);
10367 
10368 		/*
10369 		 * Skip the remaining global validation if this is an async
10370 		 * update. Cursor updates can be done without affecting
10371 		 * state or bandwidth calcs and this avoids the performance
10372 		 * penalty of locking the private state object and
10373 		 * allocating a new dc_state.
10374 		 */
10375 		if (state->async_update)
10376 			return 0;
10377 	}
10378 
10379 	/* Check scaling and underscan changes*/
10380 	/* TODO Removed scaling changes validation due to inability to commit
10381 	 * new stream into context w\o causing full reset. Need to
10382 	 * decide how to handle.
10383 	 */
10384 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10385 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10386 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10387 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10388 
10389 		/* Skip any modesets/resets */
10390 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10391 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10392 			continue;
10393 
10394 		/* Skip any thing not scale or underscan changes */
10395 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10396 			continue;
10397 
10398 		lock_and_validation_needed = true;
10399 	}
10400 
10401 	/**
10402 	 * Streams and planes are reset when there are changes that affect
10403 	 * bandwidth. Anything that affects bandwidth needs to go through
10404 	 * DC global validation to ensure that the configuration can be applied
10405 	 * to hardware.
10406 	 *
10407 	 * We have to currently stall out here in atomic_check for outstanding
10408 	 * commits to finish in this case because our IRQ handlers reference
10409 	 * DRM state directly - we can end up disabling interrupts too early
10410 	 * if we don't.
10411 	 *
10412 	 * TODO: Remove this stall and drop DM state private objects.
10413 	 */
10414 	if (lock_and_validation_needed) {
10415 		ret = dm_atomic_get_state(state, &dm_state);
10416 		if (ret)
10417 			goto fail;
10418 
10419 		ret = do_aquire_global_lock(dev, state);
10420 		if (ret)
10421 			goto fail;
10422 
10423 #if defined(CONFIG_DRM_AMD_DC_DCN)
10424 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10425 			goto fail;
10426 
10427 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10428 		if (ret)
10429 			goto fail;
10430 #endif
10431 
10432 		/*
10433 		 * Perform validation of MST topology in the state:
10434 		 * We need to perform MST atomic check before calling
10435 		 * dc_validate_global_state(), or there is a chance
10436 		 * to get stuck in an infinite loop and hang eventually.
10437 		 */
10438 		ret = drm_dp_mst_atomic_check(state);
10439 		if (ret)
10440 			goto fail;
10441 		status = dc_validate_global_state(dc, dm_state->context, false);
10442 		if (status != DC_OK) {
10443 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10444 				       dc_status_to_str(status), status);
10445 			ret = -EINVAL;
10446 			goto fail;
10447 		}
10448 	} else {
10449 		/*
10450 		 * The commit is a fast update. Fast updates shouldn't change
10451 		 * the DC context, affect global validation, and can have their
10452 		 * commit work done in parallel with other commits not touching
10453 		 * the same resource. If we have a new DC context as part of
10454 		 * the DM atomic state from validation we need to free it and
10455 		 * retain the existing one instead.
10456 		 *
10457 		 * Furthermore, since the DM atomic state only contains the DC
10458 		 * context and can safely be annulled, we can free the state
10459 		 * and clear the associated private object now to free
10460 		 * some memory and avoid a possible use-after-free later.
10461 		 */
10462 
10463 		for (i = 0; i < state->num_private_objs; i++) {
10464 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10465 
10466 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10467 				int j = state->num_private_objs-1;
10468 
10469 				dm_atomic_destroy_state(obj,
10470 						state->private_objs[i].state);
10471 
10472 				/* If i is not at the end of the array then the
10473 				 * last element needs to be moved to where i was
10474 				 * before the array can safely be truncated.
10475 				 */
10476 				if (i != j)
10477 					state->private_objs[i] =
10478 						state->private_objs[j];
10479 
10480 				state->private_objs[j].ptr = NULL;
10481 				state->private_objs[j].state = NULL;
10482 				state->private_objs[j].old_state = NULL;
10483 				state->private_objs[j].new_state = NULL;
10484 
10485 				state->num_private_objs = j;
10486 				break;
10487 			}
10488 		}
10489 	}
10490 
10491 	/* Store the overall update type for use later in atomic check. */
10492 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10493 		struct dm_crtc_state *dm_new_crtc_state =
10494 			to_dm_crtc_state(new_crtc_state);
10495 
10496 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10497 							 UPDATE_TYPE_FULL :
10498 							 UPDATE_TYPE_FAST;
10499 	}
10500 
10501 	/* Must be success */
10502 	WARN_ON(ret);
10503 
10504 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10505 
10506 	return ret;
10507 
10508 fail:
10509 	if (ret == -EDEADLK)
10510 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10511 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10512 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10513 	else
10514 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10515 
10516 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10517 
10518 	return ret;
10519 }
10520 
10521 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10522 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10523 {
10524 	uint8_t dpcd_data;
10525 	bool capable = false;
10526 
10527 	if (amdgpu_dm_connector->dc_link &&
10528 		dm_helpers_dp_read_dpcd(
10529 				NULL,
10530 				amdgpu_dm_connector->dc_link,
10531 				DP_DOWN_STREAM_PORT_COUNT,
10532 				&dpcd_data,
10533 				sizeof(dpcd_data))) {
10534 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10535 	}
10536 
10537 	return capable;
10538 }
10539 
10540 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10541 		uint8_t *edid_ext, int len,
10542 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10543 {
10544 	int i;
10545 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10546 	struct dc *dc = adev->dm.dc;
10547 
10548 	/* send extension block to DMCU for parsing */
10549 	for (i = 0; i < len; i += 8) {
10550 		bool res;
10551 		int offset;
10552 
10553 		/* send 8 bytes a time */
10554 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10555 			return false;
10556 
10557 		if (i+8 == len) {
10558 			/* EDID block sent completed, expect result */
10559 			int version, min_rate, max_rate;
10560 
10561 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10562 			if (res) {
10563 				/* amd vsdb found */
10564 				vsdb_info->freesync_supported = 1;
10565 				vsdb_info->amd_vsdb_version = version;
10566 				vsdb_info->min_refresh_rate_hz = min_rate;
10567 				vsdb_info->max_refresh_rate_hz = max_rate;
10568 				return true;
10569 			}
10570 			/* not amd vsdb */
10571 			return false;
10572 		}
10573 
10574 		/* check for ack*/
10575 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10576 		if (!res)
10577 			return false;
10578 	}
10579 
10580 	return false;
10581 }
10582 
10583 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10584 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10585 {
10586 	uint8_t *edid_ext = NULL;
10587 	int i;
10588 	bool valid_vsdb_found = false;
10589 
10590 	/*----- drm_find_cea_extension() -----*/
10591 	/* No EDID or EDID extensions */
10592 	if (edid == NULL || edid->extensions == 0)
10593 		return -ENODEV;
10594 
10595 	/* Find CEA extension */
10596 	for (i = 0; i < edid->extensions; i++) {
10597 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10598 		if (edid_ext[0] == CEA_EXT)
10599 			break;
10600 	}
10601 
10602 	if (i == edid->extensions)
10603 		return -ENODEV;
10604 
10605 	/*----- cea_db_offsets() -----*/
10606 	if (edid_ext[0] != CEA_EXT)
10607 		return -ENODEV;
10608 
10609 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10610 
10611 	return valid_vsdb_found ? i : -ENODEV;
10612 }
10613 
10614 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10615 					struct edid *edid)
10616 {
10617 	int i = 0;
10618 	struct detailed_timing *timing;
10619 	struct detailed_non_pixel *data;
10620 	struct detailed_data_monitor_range *range;
10621 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10622 			to_amdgpu_dm_connector(connector);
10623 	struct dm_connector_state *dm_con_state = NULL;
10624 
10625 	struct drm_device *dev = connector->dev;
10626 	struct amdgpu_device *adev = drm_to_adev(dev);
10627 	bool freesync_capable = false;
10628 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10629 
10630 	if (!connector->state) {
10631 		DRM_ERROR("%s - Connector has no state", __func__);
10632 		goto update;
10633 	}
10634 
10635 	if (!edid) {
10636 		dm_con_state = to_dm_connector_state(connector->state);
10637 
10638 		amdgpu_dm_connector->min_vfreq = 0;
10639 		amdgpu_dm_connector->max_vfreq = 0;
10640 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10641 
10642 		goto update;
10643 	}
10644 
10645 	dm_con_state = to_dm_connector_state(connector->state);
10646 
10647 	if (!amdgpu_dm_connector->dc_sink) {
10648 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10649 		goto update;
10650 	}
10651 	if (!adev->dm.freesync_module)
10652 		goto update;
10653 
10654 
10655 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10656 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10657 		bool edid_check_required = false;
10658 
10659 		if (edid) {
10660 			edid_check_required = is_dp_capable_without_timing_msa(
10661 						adev->dm.dc,
10662 						amdgpu_dm_connector);
10663 		}
10664 
10665 		if (edid_check_required == true && (edid->version > 1 ||
10666 		   (edid->version == 1 && edid->revision > 1))) {
10667 			for (i = 0; i < 4; i++) {
10668 
10669 				timing	= &edid->detailed_timings[i];
10670 				data	= &timing->data.other_data;
10671 				range	= &data->data.range;
10672 				/*
10673 				 * Check if monitor has continuous frequency mode
10674 				 */
10675 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10676 					continue;
10677 				/*
10678 				 * Check for flag range limits only. If flag == 1 then
10679 				 * no additional timing information provided.
10680 				 * Default GTF, GTF Secondary curve and CVT are not
10681 				 * supported
10682 				 */
10683 				if (range->flags != 1)
10684 					continue;
10685 
10686 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10687 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10688 				amdgpu_dm_connector->pixel_clock_mhz =
10689 					range->pixel_clock_mhz * 10;
10690 
10691 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10692 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10693 
10694 				break;
10695 			}
10696 
10697 			if (amdgpu_dm_connector->max_vfreq -
10698 			    amdgpu_dm_connector->min_vfreq > 10) {
10699 
10700 				freesync_capable = true;
10701 			}
10702 		}
10703 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10704 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10705 		if (i >= 0 && vsdb_info.freesync_supported) {
10706 			timing  = &edid->detailed_timings[i];
10707 			data    = &timing->data.other_data;
10708 
10709 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10710 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10711 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10712 				freesync_capable = true;
10713 
10714 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10715 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10716 		}
10717 	}
10718 
10719 update:
10720 	if (dm_con_state)
10721 		dm_con_state->freesync_capable = freesync_capable;
10722 
10723 	if (connector->vrr_capable_property)
10724 		drm_connector_set_vrr_capable_property(connector,
10725 						       freesync_capable);
10726 }
10727 
10728 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10729 {
10730 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10731 
10732 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10733 		return;
10734 	if (link->type == dc_connection_none)
10735 		return;
10736 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10737 					dpcd_data, sizeof(dpcd_data))) {
10738 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10739 
10740 		if (dpcd_data[0] == 0) {
10741 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10742 			link->psr_settings.psr_feature_enabled = false;
10743 		} else {
10744 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10745 			link->psr_settings.psr_feature_enabled = true;
10746 		}
10747 
10748 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10749 	}
10750 }
10751 
10752 /*
10753  * amdgpu_dm_link_setup_psr() - configure psr link
10754  * @stream: stream state
10755  *
10756  * Return: true if success
10757  */
10758 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10759 {
10760 	struct dc_link *link = NULL;
10761 	struct psr_config psr_config = {0};
10762 	struct psr_context psr_context = {0};
10763 	bool ret = false;
10764 
10765 	if (stream == NULL)
10766 		return false;
10767 
10768 	link = stream->link;
10769 
10770 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10771 
10772 	if (psr_config.psr_version > 0) {
10773 		psr_config.psr_exit_link_training_required = 0x1;
10774 		psr_config.psr_frame_capture_indication_req = 0;
10775 		psr_config.psr_rfb_setup_time = 0x37;
10776 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10777 		psr_config.allow_smu_optimizations = 0x0;
10778 
10779 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10780 
10781 	}
10782 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10783 
10784 	return ret;
10785 }
10786 
10787 /*
10788  * amdgpu_dm_psr_enable() - enable psr f/w
10789  * @stream: stream state
10790  *
10791  * Return: true if success
10792  */
10793 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10794 {
10795 	struct dc_link *link = stream->link;
10796 	unsigned int vsync_rate_hz = 0;
10797 	struct dc_static_screen_params params = {0};
10798 	/* Calculate number of static frames before generating interrupt to
10799 	 * enter PSR.
10800 	 */
10801 	// Init fail safe of 2 frames static
10802 	unsigned int num_frames_static = 2;
10803 
10804 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10805 
10806 	vsync_rate_hz = div64_u64(div64_u64((
10807 			stream->timing.pix_clk_100hz * 100),
10808 			stream->timing.v_total),
10809 			stream->timing.h_total);
10810 
10811 	/* Round up
10812 	 * Calculate number of frames such that at least 30 ms of time has
10813 	 * passed.
10814 	 */
10815 	if (vsync_rate_hz != 0) {
10816 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10817 		num_frames_static = (30000 / frame_time_microsec) + 1;
10818 	}
10819 
10820 	params.triggers.cursor_update = true;
10821 	params.triggers.overlay_update = true;
10822 	params.triggers.surface_update = true;
10823 	params.num_frames = num_frames_static;
10824 
10825 	dc_stream_set_static_screen_params(link->ctx->dc,
10826 					   &stream, 1,
10827 					   &params);
10828 
10829 	return dc_link_set_psr_allow_active(link, true, false, false);
10830 }
10831 
10832 /*
10833  * amdgpu_dm_psr_disable() - disable psr f/w
10834  * @stream:  stream state
10835  *
10836  * Return: true if success
10837  */
10838 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10839 {
10840 
10841 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10842 
10843 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10844 }
10845 
10846 /*
10847  * amdgpu_dm_psr_disable() - disable psr f/w
10848  * if psr is enabled on any stream
10849  *
10850  * Return: true if success
10851  */
10852 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10853 {
10854 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10855 	return dc_set_psr_allow_active(dm->dc, false);
10856 }
10857 
10858 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10859 {
10860 	struct amdgpu_device *adev = drm_to_adev(dev);
10861 	struct dc *dc = adev->dm.dc;
10862 	int i;
10863 
10864 	mutex_lock(&adev->dm.dc_lock);
10865 	if (dc->current_state) {
10866 		for (i = 0; i < dc->current_state->stream_count; ++i)
10867 			dc->current_state->streams[i]
10868 				->triggered_crtc_reset.enabled =
10869 				adev->dm.force_timing_sync;
10870 
10871 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10872 		dc_trigger_sync(dc, dc->current_state);
10873 	}
10874 	mutex_unlock(&adev->dm.dc_lock);
10875 }
10876 
10877 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10878 		       uint32_t value, const char *func_name)
10879 {
10880 #ifdef DM_CHECK_ADDR_0
10881 	if (address == 0) {
10882 		DC_ERR("invalid register write. address = 0");
10883 		return;
10884 	}
10885 #endif
10886 	cgs_write_register(ctx->cgs_device, address, value);
10887 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10888 }
10889 
10890 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10891 			  const char *func_name)
10892 {
10893 	uint32_t value;
10894 #ifdef DM_CHECK_ADDR_0
10895 	if (address == 0) {
10896 		DC_ERR("invalid register read; address = 0\n");
10897 		return 0;
10898 	}
10899 #endif
10900 
10901 	if (ctx->dmub_srv &&
10902 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10903 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10904 		ASSERT(false);
10905 		return 0;
10906 	}
10907 
10908 	value = cgs_read_register(ctx->cgs_device, address);
10909 
10910 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10911 
10912 	return value;
10913 }
10914 
10915 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
10916 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
10917 {
10918 	struct amdgpu_device *adev = ctx->driver_context;
10919 	int ret = 0;
10920 
10921 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
10922 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
10923 	if (ret == 0) {
10924 		*operation_result = AUX_RET_ERROR_TIMEOUT;
10925 		return -1;
10926 	}
10927 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
10928 
10929 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10930 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
10931 
10932 		// For read case, Copy data to payload
10933 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10934 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
10935 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10936 			adev->dm.dmub_notify->aux_reply.length);
10937 	}
10938 
10939 	return adev->dm.dmub_notify->aux_reply.length;
10940 }
10941