1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #include "amdgpu_dm_plane.h"
50 #include "amdgpu_dm_crtc.h"
51 #ifdef CONFIG_DRM_AMD_DC_HDCP
52 #include "amdgpu_dm_hdcp.h"
53 #include <drm/display/drm_hdcp_helper.h>
54 #endif
55 #include "amdgpu_pm.h"
56 #include "amdgpu_atombios.h"
57 
58 #include "amd_shared.h"
59 #include "amdgpu_dm_irq.h"
60 #include "dm_helpers.h"
61 #include "amdgpu_dm_mst_types.h"
62 #if defined(CONFIG_DEBUG_FS)
63 #include "amdgpu_dm_debugfs.h"
64 #endif
65 #include "amdgpu_dm_psr.h"
66 
67 #include "ivsrcid/ivsrcid_vislands30.h"
68 
69 #include "i2caux_interface.h"
70 #include <linux/module.h>
71 #include <linux/moduleparam.h>
72 #include <linux/types.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/pci.h>
75 #include <linux/firmware.h>
76 #include <linux/component.h>
77 #include <linux/dmi.h>
78 
79 #include <drm/display/drm_dp_mst_helper.h>
80 #include <drm/display/drm_hdmi_helper.h>
81 #include <drm/drm_atomic.h>
82 #include <drm/drm_atomic_uapi.h>
83 #include <drm/drm_atomic_helper.h>
84 #include <drm/drm_blend.h>
85 #include <drm/drm_fb_helper.h>
86 #include <drm/drm_fourcc.h>
87 #include <drm/drm_edid.h>
88 #include <drm/drm_vblank.h>
89 #include <drm/drm_audio_component.h>
90 #include <drm/drm_gem_atomic_helper.h>
91 
92 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
93 
94 #include "dcn/dcn_1_0_offset.h"
95 #include "dcn/dcn_1_0_sh_mask.h"
96 #include "soc15_hw_ip.h"
97 #include "soc15_common.h"
98 #include "vega10_ip_offset.h"
99 
100 #include "soc15_common.h"
101 
102 #include "gc/gc_11_0_0_offset.h"
103 #include "gc/gc_11_0_0_sh_mask.h"
104 
105 #include "modules/inc/mod_freesync.h"
106 #include "modules/power/power_helpers.h"
107 #include "modules/inc/mod_info_packet.h"
108 
109 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
111 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
113 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
115 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
117 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
119 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
121 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
123 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
125 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
127 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
129 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
131 
132 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
133 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
134 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
135 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
136 
137 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
138 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
139 
140 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
141 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
142 
143 /* Number of bytes in PSP header for firmware. */
144 #define PSP_HEADER_BYTES 0x100
145 
146 /* Number of bytes in PSP footer for firmware. */
147 #define PSP_FOOTER_BYTES 0x100
148 
149 /**
150  * DOC: overview
151  *
152  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
153  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
154  * requests into DC requests, and DC responses into DRM responses.
155  *
156  * The root control structure is &struct amdgpu_display_manager.
157  */
158 
159 /* basic init/fini API */
160 static int amdgpu_dm_init(struct amdgpu_device *adev);
161 static void amdgpu_dm_fini(struct amdgpu_device *adev);
162 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
163 
164 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
165 {
166 	switch (link->dpcd_caps.dongle_type) {
167 	case DISPLAY_DONGLE_NONE:
168 		return DRM_MODE_SUBCONNECTOR_Native;
169 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
170 		return DRM_MODE_SUBCONNECTOR_VGA;
171 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
172 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
173 		return DRM_MODE_SUBCONNECTOR_DVID;
174 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
175 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
176 		return DRM_MODE_SUBCONNECTOR_HDMIA;
177 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
178 	default:
179 		return DRM_MODE_SUBCONNECTOR_Unknown;
180 	}
181 }
182 
183 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
184 {
185 	struct dc_link *link = aconnector->dc_link;
186 	struct drm_connector *connector = &aconnector->base;
187 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
188 
189 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
190 		return;
191 
192 	if (aconnector->dc_sink)
193 		subconnector = get_subconnector_type(link);
194 
195 	drm_object_property_set_value(&connector->base,
196 			connector->dev->mode_config.dp_subconnector_property,
197 			subconnector);
198 }
199 
200 /*
201  * initializes drm_device display related structures, based on the information
202  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
203  * drm_encoder, drm_mode_config
204  *
205  * Returns 0 on success
206  */
207 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
208 /* removes and deallocates the drm structures, created by the above function */
209 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
210 
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
213 				    uint32_t link_index,
214 				    struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 				  struct amdgpu_encoder *aencoder,
217 				  uint32_t link_index);
218 
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220 
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222 
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 				  struct drm_atomic_state *state);
225 
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
228 
229 static bool
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231 				 struct drm_crtc_state *new_crtc_state);
232 /*
233  * dm_vblank_get_counter
234  *
235  * @brief
236  * Get counter for number of vertical blanks
237  *
238  * @param
239  * struct amdgpu_device *adev - [in] desired amdgpu device
240  * int disp_idx - [in] which CRTC to get the counter from
241  *
242  * @return
243  * Counter for vertical blanks
244  */
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
246 {
247 	if (crtc >= adev->mode_info.num_crtc)
248 		return 0;
249 	else {
250 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251 
252 		if (acrtc->dm_irq_params.stream == NULL) {
253 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254 				  crtc);
255 			return 0;
256 		}
257 
258 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
259 	}
260 }
261 
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263 				  u32 *vbl, u32 *position)
264 {
265 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
266 
267 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268 		return -EINVAL;
269 	else {
270 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
271 
272 		if (acrtc->dm_irq_params.stream ==  NULL) {
273 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
274 				  crtc);
275 			return 0;
276 		}
277 
278 		/*
279 		 * TODO rework base driver to use values directly.
280 		 * for now parse it back into reg-format
281 		 */
282 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
283 					 &v_blank_start,
284 					 &v_blank_end,
285 					 &h_position,
286 					 &v_position);
287 
288 		*position = v_position | (h_position << 16);
289 		*vbl = v_blank_start | (v_blank_end << 16);
290 	}
291 
292 	return 0;
293 }
294 
295 static bool dm_is_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return true;
299 }
300 
301 static int dm_wait_for_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static bool dm_check_soft_reset(void *handle)
308 {
309 	return false;
310 }
311 
312 static int dm_soft_reset(void *handle)
313 {
314 	/* XXX todo */
315 	return 0;
316 }
317 
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320 		     int otg_inst)
321 {
322 	struct drm_device *dev = adev_to_drm(adev);
323 	struct drm_crtc *crtc;
324 	struct amdgpu_crtc *amdgpu_crtc;
325 
326 	if (WARN_ON(otg_inst == -1))
327 		return adev->mode_info.crtcs[0];
328 
329 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330 		amdgpu_crtc = to_amdgpu_crtc(crtc);
331 
332 		if (amdgpu_crtc->otg_inst == otg_inst)
333 			return amdgpu_crtc;
334 	}
335 
336 	return NULL;
337 }
338 
339 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
340 					      struct dm_crtc_state *new_state)
341 {
342 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
343 		return true;
344 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
345 		return true;
346 	else
347 		return false;
348 }
349 
350 /**
351  * dm_pflip_high_irq() - Handle pageflip interrupt
352  * @interrupt_params: ignored
353  *
354  * Handles the pageflip interrupt by notifying all interested parties
355  * that the pageflip has been completed.
356  */
357 static void dm_pflip_high_irq(void *interrupt_params)
358 {
359 	struct amdgpu_crtc *amdgpu_crtc;
360 	struct common_irq_params *irq_params = interrupt_params;
361 	struct amdgpu_device *adev = irq_params->adev;
362 	unsigned long flags;
363 	struct drm_pending_vblank_event *e;
364 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
365 	bool vrr_active;
366 
367 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
368 
369 	/* IRQ could occur when in initial stage */
370 	/* TODO work and BO cleanup */
371 	if (amdgpu_crtc == NULL) {
372 		DC_LOG_PFLIP("CRTC is null, returning.\n");
373 		return;
374 	}
375 
376 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
377 
378 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
379 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
380 						 amdgpu_crtc->pflip_status,
381 						 AMDGPU_FLIP_SUBMITTED,
382 						 amdgpu_crtc->crtc_id,
383 						 amdgpu_crtc);
384 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
385 		return;
386 	}
387 
388 	/* page flip completed. */
389 	e = amdgpu_crtc->event;
390 	amdgpu_crtc->event = NULL;
391 
392 	WARN_ON(!e);
393 
394 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
395 
396 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
397 	if (!vrr_active ||
398 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
399 				      &v_blank_end, &hpos, &vpos) ||
400 	    (vpos < v_blank_start)) {
401 		/* Update to correct count and vblank timestamp if racing with
402 		 * vblank irq. This also updates to the correct vblank timestamp
403 		 * even in VRR mode, as scanout is past the front-porch atm.
404 		 */
405 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
406 
407 		/* Wake up userspace by sending the pageflip event with proper
408 		 * count and timestamp of vblank of flip completion.
409 		 */
410 		if (e) {
411 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
412 
413 			/* Event sent, so done with vblank for this flip */
414 			drm_crtc_vblank_put(&amdgpu_crtc->base);
415 		}
416 	} else if (e) {
417 		/* VRR active and inside front-porch: vblank count and
418 		 * timestamp for pageflip event will only be up to date after
419 		 * drm_crtc_handle_vblank() has been executed from late vblank
420 		 * irq handler after start of back-porch (vline 0). We queue the
421 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
422 		 * updated timestamp and count, once it runs after us.
423 		 *
424 		 * We need to open-code this instead of using the helper
425 		 * drm_crtc_arm_vblank_event(), as that helper would
426 		 * call drm_crtc_accurate_vblank_count(), which we must
427 		 * not call in VRR mode while we are in front-porch!
428 		 */
429 
430 		/* sequence will be replaced by real count during send-out. */
431 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
432 		e->pipe = amdgpu_crtc->crtc_id;
433 
434 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
435 		e = NULL;
436 	}
437 
438 	/* Keep track of vblank of this flip for flip throttling. We use the
439 	 * cooked hw counter, as that one incremented at start of this vblank
440 	 * of pageflip completion, so last_flip_vblank is the forbidden count
441 	 * for queueing new pageflips if vsync + VRR is enabled.
442 	 */
443 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
444 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
445 
446 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
447 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
448 
449 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
450 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
451 		     vrr_active, (int) !e);
452 }
453 
454 static void dm_vupdate_high_irq(void *interrupt_params)
455 {
456 	struct common_irq_params *irq_params = interrupt_params;
457 	struct amdgpu_device *adev = irq_params->adev;
458 	struct amdgpu_crtc *acrtc;
459 	struct drm_device *drm_dev;
460 	struct drm_vblank_crtc *vblank;
461 	ktime_t frame_duration_ns, previous_timestamp;
462 	unsigned long flags;
463 	int vrr_active;
464 
465 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466 
467 	if (acrtc) {
468 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469 		drm_dev = acrtc->base.dev;
470 		vblank = &drm_dev->vblank[acrtc->base.index];
471 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
472 		frame_duration_ns = vblank->time - previous_timestamp;
473 
474 		if (frame_duration_ns > 0) {
475 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
476 						frame_duration_ns,
477 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
478 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
479 		}
480 
481 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
482 			      acrtc->crtc_id,
483 			      vrr_active);
484 
485 		/* Core vblank handling is done here after end of front-porch in
486 		 * vrr mode, as vblank timestamping will give valid results
487 		 * while now done after front-porch. This will also deliver
488 		 * page-flip completion events that have been queued to us
489 		 * if a pageflip happened inside front-porch.
490 		 */
491 		if (vrr_active) {
492 			dm_crtc_handle_vblank(acrtc);
493 
494 			/* BTR processing for pre-DCE12 ASICs */
495 			if (acrtc->dm_irq_params.stream &&
496 			    adev->family < AMDGPU_FAMILY_AI) {
497 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
498 				mod_freesync_handle_v_update(
499 				    adev->dm.freesync_module,
500 				    acrtc->dm_irq_params.stream,
501 				    &acrtc->dm_irq_params.vrr_params);
502 
503 				dc_stream_adjust_vmin_vmax(
504 				    adev->dm.dc,
505 				    acrtc->dm_irq_params.stream,
506 				    &acrtc->dm_irq_params.vrr_params.adjust);
507 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
508 			}
509 		}
510 	}
511 }
512 
513 /**
514  * dm_crtc_high_irq() - Handles CRTC interrupt
515  * @interrupt_params: used for determining the CRTC instance
516  *
517  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
518  * event handler.
519  */
520 static void dm_crtc_high_irq(void *interrupt_params)
521 {
522 	struct common_irq_params *irq_params = interrupt_params;
523 	struct amdgpu_device *adev = irq_params->adev;
524 	struct amdgpu_crtc *acrtc;
525 	unsigned long flags;
526 	int vrr_active;
527 
528 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
529 	if (!acrtc)
530 		return;
531 
532 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
533 
534 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
535 		      vrr_active, acrtc->dm_irq_params.active_planes);
536 
537 	/**
538 	 * Core vblank handling at start of front-porch is only possible
539 	 * in non-vrr mode, as only there vblank timestamping will give
540 	 * valid results while done in front-porch. Otherwise defer it
541 	 * to dm_vupdate_high_irq after end of front-porch.
542 	 */
543 	if (!vrr_active)
544 		dm_crtc_handle_vblank(acrtc);
545 
546 	/**
547 	 * Following stuff must happen at start of vblank, for crc
548 	 * computation and below-the-range btr support in vrr mode.
549 	 */
550 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
551 
552 	/* BTR updates need to happen before VUPDATE on Vega and above. */
553 	if (adev->family < AMDGPU_FAMILY_AI)
554 		return;
555 
556 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
557 
558 	if (acrtc->dm_irq_params.stream &&
559 	    acrtc->dm_irq_params.vrr_params.supported &&
560 	    acrtc->dm_irq_params.freesync_config.state ==
561 		    VRR_STATE_ACTIVE_VARIABLE) {
562 		mod_freesync_handle_v_update(adev->dm.freesync_module,
563 					     acrtc->dm_irq_params.stream,
564 					     &acrtc->dm_irq_params.vrr_params);
565 
566 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
567 					   &acrtc->dm_irq_params.vrr_params.adjust);
568 	}
569 
570 	/*
571 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
572 	 * In that case, pageflip completion interrupts won't fire and pageflip
573 	 * completion events won't get delivered. Prevent this by sending
574 	 * pending pageflip events from here if a flip is still pending.
575 	 *
576 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
577 	 * avoid race conditions between flip programming and completion,
578 	 * which could cause too early flip completion events.
579 	 */
580 	if (adev->family >= AMDGPU_FAMILY_RV &&
581 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
582 	    acrtc->dm_irq_params.active_planes == 0) {
583 		if (acrtc->event) {
584 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
585 			acrtc->event = NULL;
586 			drm_crtc_vblank_put(&acrtc->base);
587 		}
588 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
589 	}
590 
591 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
592 }
593 
594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
595 /**
596  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
597  * DCN generation ASICs
598  * @interrupt_params: interrupt parameters
599  *
600  * Used to set crc window/read out crc value at vertical line 0 position
601  */
602 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
603 {
604 	struct common_irq_params *irq_params = interrupt_params;
605 	struct amdgpu_device *adev = irq_params->adev;
606 	struct amdgpu_crtc *acrtc;
607 
608 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
609 
610 	if (!acrtc)
611 		return;
612 
613 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
614 }
615 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
616 
617 /**
618  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
619  * @adev: amdgpu_device pointer
620  * @notify: dmub notification structure
621  *
622  * Dmub AUX or SET_CONFIG command completion processing callback
623  * Copies dmub notification to DM which is to be read by AUX command.
624  * issuing thread and also signals the event to wake up the thread.
625  */
626 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
627 					struct dmub_notification *notify)
628 {
629 	if (adev->dm.dmub_notify)
630 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
631 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
632 		complete(&adev->dm.dmub_aux_transfer_done);
633 }
634 
635 /**
636  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
637  * @adev: amdgpu_device pointer
638  * @notify: dmub notification structure
639  *
640  * Dmub Hpd interrupt processing callback. Gets displayindex through the
641  * ink index and calls helper to do the processing.
642  */
643 static void dmub_hpd_callback(struct amdgpu_device *adev,
644 			      struct dmub_notification *notify)
645 {
646 	struct amdgpu_dm_connector *aconnector;
647 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
648 	struct drm_connector *connector;
649 	struct drm_connector_list_iter iter;
650 	struct dc_link *link;
651 	uint8_t link_index = 0;
652 	struct drm_device *dev;
653 
654 	if (adev == NULL)
655 		return;
656 
657 	if (notify == NULL) {
658 		DRM_ERROR("DMUB HPD callback notification was NULL");
659 		return;
660 	}
661 
662 	if (notify->link_index > adev->dm.dc->link_count) {
663 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
664 		return;
665 	}
666 
667 	link_index = notify->link_index;
668 	link = adev->dm.dc->links[link_index];
669 	dev = adev->dm.ddev;
670 
671 	drm_connector_list_iter_begin(dev, &iter);
672 	drm_for_each_connector_iter(connector, &iter) {
673 		aconnector = to_amdgpu_dm_connector(connector);
674 		if (link && aconnector->dc_link == link) {
675 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
676 			hpd_aconnector = aconnector;
677 			break;
678 		}
679 	}
680 	drm_connector_list_iter_end(&iter);
681 
682 	if (hpd_aconnector) {
683 		if (notify->type == DMUB_NOTIFICATION_HPD)
684 			handle_hpd_irq_helper(hpd_aconnector);
685 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
686 			handle_hpd_rx_irq(hpd_aconnector);
687 	}
688 }
689 
690 /**
691  * register_dmub_notify_callback - Sets callback for DMUB notify
692  * @adev: amdgpu_device pointer
693  * @type: Type of dmub notification
694  * @callback: Dmub interrupt callback function
695  * @dmub_int_thread_offload: offload indicator
696  *
697  * API to register a dmub callback handler for a dmub notification
698  * Also sets indicator whether callback processing to be offloaded.
699  * to dmub interrupt handling thread
700  * Return: true if successfully registered, false if there is existing registration
701  */
702 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
703 					  enum dmub_notification_type type,
704 					  dmub_notify_interrupt_callback_t callback,
705 					  bool dmub_int_thread_offload)
706 {
707 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
708 		adev->dm.dmub_callback[type] = callback;
709 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
710 	} else
711 		return false;
712 
713 	return true;
714 }
715 
716 static void dm_handle_hpd_work(struct work_struct *work)
717 {
718 	struct dmub_hpd_work *dmub_hpd_wrk;
719 
720 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
721 
722 	if (!dmub_hpd_wrk->dmub_notify) {
723 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
724 		return;
725 	}
726 
727 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
728 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
729 		dmub_hpd_wrk->dmub_notify);
730 	}
731 
732 	kfree(dmub_hpd_wrk->dmub_notify);
733 	kfree(dmub_hpd_wrk);
734 
735 }
736 
737 #define DMUB_TRACE_MAX_READ 64
738 /**
739  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
740  * @interrupt_params: used for determining the Outbox instance
741  *
742  * Handles the Outbox Interrupt
743  * event handler.
744  */
745 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
746 {
747 	struct dmub_notification notify;
748 	struct common_irq_params *irq_params = interrupt_params;
749 	struct amdgpu_device *adev = irq_params->adev;
750 	struct amdgpu_display_manager *dm = &adev->dm;
751 	struct dmcub_trace_buf_entry entry = { 0 };
752 	uint32_t count = 0;
753 	struct dmub_hpd_work *dmub_hpd_wrk;
754 	struct dc_link *plink = NULL;
755 
756 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
757 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758 
759 		do {
760 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
761 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
762 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
763 				continue;
764 			}
765 			if (!dm->dmub_callback[notify.type]) {
766 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
767 				continue;
768 			}
769 			if (dm->dmub_thread_offload[notify.type] == true) {
770 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
771 				if (!dmub_hpd_wrk) {
772 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
773 					return;
774 				}
775 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
776 				if (!dmub_hpd_wrk->dmub_notify) {
777 					kfree(dmub_hpd_wrk);
778 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
779 					return;
780 				}
781 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
782 				if (dmub_hpd_wrk->dmub_notify)
783 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
784 				dmub_hpd_wrk->adev = adev;
785 				if (notify.type == DMUB_NOTIFICATION_HPD) {
786 					plink = adev->dm.dc->links[notify.link_index];
787 					if (plink) {
788 						plink->hpd_status =
789 							notify.hpd_status == DP_HPD_PLUG;
790 					}
791 				}
792 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
793 			} else {
794 				dm->dmub_callback[notify.type](adev, &notify);
795 			}
796 		} while (notify.pending_notification);
797 	}
798 
799 
800 	do {
801 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
802 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
803 							entry.param0, entry.param1);
804 
805 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
806 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
807 		} else
808 			break;
809 
810 		count++;
811 
812 	} while (count <= DMUB_TRACE_MAX_READ);
813 
814 	if (count > DMUB_TRACE_MAX_READ)
815 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
816 }
817 
818 static int dm_set_clockgating_state(void *handle,
819 		  enum amd_clockgating_state state)
820 {
821 	return 0;
822 }
823 
824 static int dm_set_powergating_state(void *handle,
825 		  enum amd_powergating_state state)
826 {
827 	return 0;
828 }
829 
830 /* Prototypes of private functions */
831 static int dm_early_init(void* handle);
832 
833 /* Allocate memory for FBC compressed data  */
834 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
835 {
836 	struct drm_device *dev = connector->dev;
837 	struct amdgpu_device *adev = drm_to_adev(dev);
838 	struct dm_compressor_info *compressor = &adev->dm.compressor;
839 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
840 	struct drm_display_mode *mode;
841 	unsigned long max_size = 0;
842 
843 	if (adev->dm.dc->fbc_compressor == NULL)
844 		return;
845 
846 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
847 		return;
848 
849 	if (compressor->bo_ptr)
850 		return;
851 
852 
853 	list_for_each_entry(mode, &connector->modes, head) {
854 		if (max_size < mode->htotal * mode->vtotal)
855 			max_size = mode->htotal * mode->vtotal;
856 	}
857 
858 	if (max_size) {
859 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
860 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
861 			    &compressor->gpu_addr, &compressor->cpu_addr);
862 
863 		if (r)
864 			DRM_ERROR("DM: Failed to initialize FBC\n");
865 		else {
866 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
867 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
868 		}
869 
870 	}
871 
872 }
873 
874 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
875 					  int pipe, bool *enabled,
876 					  unsigned char *buf, int max_bytes)
877 {
878 	struct drm_device *dev = dev_get_drvdata(kdev);
879 	struct amdgpu_device *adev = drm_to_adev(dev);
880 	struct drm_connector *connector;
881 	struct drm_connector_list_iter conn_iter;
882 	struct amdgpu_dm_connector *aconnector;
883 	int ret = 0;
884 
885 	*enabled = false;
886 
887 	mutex_lock(&adev->dm.audio_lock);
888 
889 	drm_connector_list_iter_begin(dev, &conn_iter);
890 	drm_for_each_connector_iter(connector, &conn_iter) {
891 		aconnector = to_amdgpu_dm_connector(connector);
892 		if (aconnector->audio_inst != port)
893 			continue;
894 
895 		*enabled = true;
896 		ret = drm_eld_size(connector->eld);
897 		memcpy(buf, connector->eld, min(max_bytes, ret));
898 
899 		break;
900 	}
901 	drm_connector_list_iter_end(&conn_iter);
902 
903 	mutex_unlock(&adev->dm.audio_lock);
904 
905 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
906 
907 	return ret;
908 }
909 
910 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
911 	.get_eld = amdgpu_dm_audio_component_get_eld,
912 };
913 
914 static int amdgpu_dm_audio_component_bind(struct device *kdev,
915 				       struct device *hda_kdev, void *data)
916 {
917 	struct drm_device *dev = dev_get_drvdata(kdev);
918 	struct amdgpu_device *adev = drm_to_adev(dev);
919 	struct drm_audio_component *acomp = data;
920 
921 	acomp->ops = &amdgpu_dm_audio_component_ops;
922 	acomp->dev = kdev;
923 	adev->dm.audio_component = acomp;
924 
925 	return 0;
926 }
927 
928 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
929 					  struct device *hda_kdev, void *data)
930 {
931 	struct drm_device *dev = dev_get_drvdata(kdev);
932 	struct amdgpu_device *adev = drm_to_adev(dev);
933 	struct drm_audio_component *acomp = data;
934 
935 	acomp->ops = NULL;
936 	acomp->dev = NULL;
937 	adev->dm.audio_component = NULL;
938 }
939 
940 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
941 	.bind	= amdgpu_dm_audio_component_bind,
942 	.unbind	= amdgpu_dm_audio_component_unbind,
943 };
944 
945 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
946 {
947 	int i, ret;
948 
949 	if (!amdgpu_audio)
950 		return 0;
951 
952 	adev->mode_info.audio.enabled = true;
953 
954 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
955 
956 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
957 		adev->mode_info.audio.pin[i].channels = -1;
958 		adev->mode_info.audio.pin[i].rate = -1;
959 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
960 		adev->mode_info.audio.pin[i].status_bits = 0;
961 		adev->mode_info.audio.pin[i].category_code = 0;
962 		adev->mode_info.audio.pin[i].connected = false;
963 		adev->mode_info.audio.pin[i].id =
964 			adev->dm.dc->res_pool->audios[i]->inst;
965 		adev->mode_info.audio.pin[i].offset = 0;
966 	}
967 
968 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
969 	if (ret < 0)
970 		return ret;
971 
972 	adev->dm.audio_registered = true;
973 
974 	return 0;
975 }
976 
977 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
978 {
979 	if (!amdgpu_audio)
980 		return;
981 
982 	if (!adev->mode_info.audio.enabled)
983 		return;
984 
985 	if (adev->dm.audio_registered) {
986 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
987 		adev->dm.audio_registered = false;
988 	}
989 
990 	/* TODO: Disable audio? */
991 
992 	adev->mode_info.audio.enabled = false;
993 }
994 
995 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
996 {
997 	struct drm_audio_component *acomp = adev->dm.audio_component;
998 
999 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1000 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1001 
1002 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1003 						 pin, -1);
1004 	}
1005 }
1006 
1007 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1008 {
1009 	const struct dmcub_firmware_header_v1_0 *hdr;
1010 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1011 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1012 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1013 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1014 	struct abm *abm = adev->dm.dc->res_pool->abm;
1015 	struct dmub_srv_hw_params hw_params;
1016 	enum dmub_status status;
1017 	const unsigned char *fw_inst_const, *fw_bss_data;
1018 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1019 	bool has_hw_support;
1020 
1021 	if (!dmub_srv)
1022 		/* DMUB isn't supported on the ASIC. */
1023 		return 0;
1024 
1025 	if (!fb_info) {
1026 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1027 		return -EINVAL;
1028 	}
1029 
1030 	if (!dmub_fw) {
1031 		/* Firmware required for DMUB support. */
1032 		DRM_ERROR("No firmware provided for DMUB.\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1037 	if (status != DMUB_STATUS_OK) {
1038 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1039 		return -EINVAL;
1040 	}
1041 
1042 	if (!has_hw_support) {
1043 		DRM_INFO("DMUB unsupported on ASIC\n");
1044 		return 0;
1045 	}
1046 
1047 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1048 	status = dmub_srv_hw_reset(dmub_srv);
1049 	if (status != DMUB_STATUS_OK)
1050 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1051 
1052 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1053 
1054 	fw_inst_const = dmub_fw->data +
1055 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1056 			PSP_HEADER_BYTES;
1057 
1058 	fw_bss_data = dmub_fw->data +
1059 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1060 		      le32_to_cpu(hdr->inst_const_bytes);
1061 
1062 	/* Copy firmware and bios info into FB memory. */
1063 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1064 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1065 
1066 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1067 
1068 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1069 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1070 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1071 	 * will be done by dm_dmub_hw_init
1072 	 */
1073 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1075 				fw_inst_const_size);
1076 	}
1077 
1078 	if (fw_bss_data_size)
1079 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1080 		       fw_bss_data, fw_bss_data_size);
1081 
1082 	/* Copy firmware bios info into FB memory. */
1083 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1084 	       adev->bios_size);
1085 
1086 	/* Reset regions that need to be reset. */
1087 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1088 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1089 
1090 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1091 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1092 
1093 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1094 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1095 
1096 	/* Initialize hardware. */
1097 	memset(&hw_params, 0, sizeof(hw_params));
1098 	hw_params.fb_base = adev->gmc.fb_start;
1099 	hw_params.fb_offset = adev->gmc.aper_base;
1100 
1101 	/* backdoor load firmware and trigger dmub running */
1102 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1103 		hw_params.load_inst_const = true;
1104 
1105 	if (dmcu)
1106 		hw_params.psp_version = dmcu->psp_version;
1107 
1108 	for (i = 0; i < fb_info->num_fb; ++i)
1109 		hw_params.fb[i] = &fb_info->fb[i];
1110 
1111 	switch (adev->ip_versions[DCE_HWIP][0]) {
1112 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1113 		hw_params.dpia_supported = true;
1114 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1115 		break;
1116 	default:
1117 		break;
1118 	}
1119 
1120 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1121 	if (status != DMUB_STATUS_OK) {
1122 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1123 		return -EINVAL;
1124 	}
1125 
1126 	/* Wait for firmware load to finish. */
1127 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1128 	if (status != DMUB_STATUS_OK)
1129 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1130 
1131 	/* Init DMCU and ABM if available. */
1132 	if (dmcu && abm) {
1133 		dmcu->funcs->dmcu_init(dmcu);
1134 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1135 	}
1136 
1137 	if (!adev->dm.dc->ctx->dmub_srv)
1138 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1139 	if (!adev->dm.dc->ctx->dmub_srv) {
1140 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1141 		return -ENOMEM;
1142 	}
1143 
1144 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1145 		 adev->dm.dmcub_fw_version);
1146 
1147 	return 0;
1148 }
1149 
1150 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1151 {
1152 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1153 	enum dmub_status status;
1154 	bool init;
1155 
1156 	if (!dmub_srv) {
1157 		/* DMUB isn't supported on the ASIC. */
1158 		return;
1159 	}
1160 
1161 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1162 	if (status != DMUB_STATUS_OK)
1163 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1164 
1165 	if (status == DMUB_STATUS_OK && init) {
1166 		/* Wait for firmware load to finish. */
1167 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1168 		if (status != DMUB_STATUS_OK)
1169 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1170 	} else {
1171 		/* Perform the full hardware initialization. */
1172 		dm_dmub_hw_init(adev);
1173 	}
1174 }
1175 
1176 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1177 {
1178 	uint64_t pt_base;
1179 	uint32_t logical_addr_low;
1180 	uint32_t logical_addr_high;
1181 	uint32_t agp_base, agp_bot, agp_top;
1182 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1183 
1184 	memset(pa_config, 0, sizeof(*pa_config));
1185 
1186 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1187 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1188 
1189 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1190 		/*
1191 		 * Raven2 has a HW issue that it is unable to use the vram which
1192 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1193 		 * workaround that increase system aperture high address (add 1)
1194 		 * to get rid of the VM fault and hardware hang.
1195 		 */
1196 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1197 	else
1198 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1199 
1200 	agp_base = 0;
1201 	agp_bot = adev->gmc.agp_start >> 24;
1202 	agp_top = adev->gmc.agp_end >> 24;
1203 
1204 
1205 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1206 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1207 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1208 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1209 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1210 	page_table_base.low_part = lower_32_bits(pt_base);
1211 
1212 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1213 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1214 
1215 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1216 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1217 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1218 
1219 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1220 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1221 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1222 
1223 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1224 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1225 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1226 
1227 	pa_config->is_hvm_enabled = 0;
1228 
1229 }
1230 
1231 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1232 {
1233 	struct hpd_rx_irq_offload_work *offload_work;
1234 	struct amdgpu_dm_connector *aconnector;
1235 	struct dc_link *dc_link;
1236 	struct amdgpu_device *adev;
1237 	enum dc_connection_type new_connection_type = dc_connection_none;
1238 	unsigned long flags;
1239 
1240 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1241 	aconnector = offload_work->offload_wq->aconnector;
1242 
1243 	if (!aconnector) {
1244 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1245 		goto skip;
1246 	}
1247 
1248 	adev = drm_to_adev(aconnector->base.dev);
1249 	dc_link = aconnector->dc_link;
1250 
1251 	mutex_lock(&aconnector->hpd_lock);
1252 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1253 		DRM_ERROR("KMS: Failed to detect connector\n");
1254 	mutex_unlock(&aconnector->hpd_lock);
1255 
1256 	if (new_connection_type == dc_connection_none)
1257 		goto skip;
1258 
1259 	if (amdgpu_in_reset(adev))
1260 		goto skip;
1261 
1262 	mutex_lock(&adev->dm.dc_lock);
1263 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1264 		dc_link_dp_handle_automated_test(dc_link);
1265 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1266 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1267 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1268 		dc_link_dp_handle_link_loss(dc_link);
1269 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1270 		offload_work->offload_wq->is_handling_link_loss = false;
1271 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1272 	}
1273 	mutex_unlock(&adev->dm.dc_lock);
1274 
1275 skip:
1276 	kfree(offload_work);
1277 
1278 }
1279 
1280 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1281 {
1282 	int max_caps = dc->caps.max_links;
1283 	int i = 0;
1284 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1285 
1286 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1287 
1288 	if (!hpd_rx_offload_wq)
1289 		return NULL;
1290 
1291 
1292 	for (i = 0; i < max_caps; i++) {
1293 		hpd_rx_offload_wq[i].wq =
1294 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1295 
1296 		if (hpd_rx_offload_wq[i].wq == NULL) {
1297 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1298 			return NULL;
1299 		}
1300 
1301 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1302 	}
1303 
1304 	return hpd_rx_offload_wq;
1305 }
1306 
1307 struct amdgpu_stutter_quirk {
1308 	u16 chip_vendor;
1309 	u16 chip_device;
1310 	u16 subsys_vendor;
1311 	u16 subsys_device;
1312 	u8 revision;
1313 };
1314 
1315 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1316 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1317 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1318 	{ 0, 0, 0, 0, 0 },
1319 };
1320 
1321 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1322 {
1323 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1324 
1325 	while (p && p->chip_device != 0) {
1326 		if (pdev->vendor == p->chip_vendor &&
1327 		    pdev->device == p->chip_device &&
1328 		    pdev->subsystem_vendor == p->subsys_vendor &&
1329 		    pdev->subsystem_device == p->subsys_device &&
1330 		    pdev->revision == p->revision) {
1331 			return true;
1332 		}
1333 		++p;
1334 	}
1335 	return false;
1336 }
1337 
1338 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1339 	{
1340 		.matches = {
1341 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1342 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1343 		},
1344 	},
1345 	{
1346 		.matches = {
1347 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1348 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1349 		},
1350 	},
1351 	{
1352 		.matches = {
1353 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1354 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1355 		},
1356 	},
1357 	{}
1358 };
1359 
1360 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1361 {
1362 	const struct dmi_system_id *dmi_id;
1363 
1364 	dm->aux_hpd_discon_quirk = false;
1365 
1366 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1367 	if (dmi_id) {
1368 		dm->aux_hpd_discon_quirk = true;
1369 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1370 	}
1371 }
1372 
1373 static int amdgpu_dm_init(struct amdgpu_device *adev)
1374 {
1375 	struct dc_init_data init_data;
1376 #ifdef CONFIG_DRM_AMD_DC_HDCP
1377 	struct dc_callback_init init_params;
1378 #endif
1379 	int r;
1380 
1381 	adev->dm.ddev = adev_to_drm(adev);
1382 	adev->dm.adev = adev;
1383 
1384 	/* Zero all the fields */
1385 	memset(&init_data, 0, sizeof(init_data));
1386 #ifdef CONFIG_DRM_AMD_DC_HDCP
1387 	memset(&init_params, 0, sizeof(init_params));
1388 #endif
1389 
1390 	mutex_init(&adev->dm.dc_lock);
1391 	mutex_init(&adev->dm.audio_lock);
1392 	spin_lock_init(&adev->dm.vblank_lock);
1393 
1394 	if(amdgpu_dm_irq_init(adev)) {
1395 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1396 		goto error;
1397 	}
1398 
1399 	init_data.asic_id.chip_family = adev->family;
1400 
1401 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1402 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1403 	init_data.asic_id.chip_id = adev->pdev->device;
1404 
1405 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1406 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1407 	init_data.asic_id.atombios_base_address =
1408 		adev->mode_info.atom_context->bios;
1409 
1410 	init_data.driver = adev;
1411 
1412 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1413 
1414 	if (!adev->dm.cgs_device) {
1415 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1416 		goto error;
1417 	}
1418 
1419 	init_data.cgs_device = adev->dm.cgs_device;
1420 
1421 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1422 
1423 	switch (adev->ip_versions[DCE_HWIP][0]) {
1424 	case IP_VERSION(2, 1, 0):
1425 		switch (adev->dm.dmcub_fw_version) {
1426 		case 0: /* development */
1427 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1428 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1429 			init_data.flags.disable_dmcu = false;
1430 			break;
1431 		default:
1432 			init_data.flags.disable_dmcu = true;
1433 		}
1434 		break;
1435 	case IP_VERSION(2, 0, 3):
1436 		init_data.flags.disable_dmcu = true;
1437 		break;
1438 	default:
1439 		break;
1440 	}
1441 
1442 	switch (adev->asic_type) {
1443 	case CHIP_CARRIZO:
1444 	case CHIP_STONEY:
1445 		init_data.flags.gpu_vm_support = true;
1446 		break;
1447 	default:
1448 		switch (adev->ip_versions[DCE_HWIP][0]) {
1449 		case IP_VERSION(1, 0, 0):
1450 		case IP_VERSION(1, 0, 1):
1451 			/* enable S/G on PCO and RV2 */
1452 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1453 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1454 				init_data.flags.gpu_vm_support = true;
1455 			break;
1456 		case IP_VERSION(2, 1, 0):
1457 		case IP_VERSION(3, 0, 1):
1458 		case IP_VERSION(3, 1, 2):
1459 		case IP_VERSION(3, 1, 3):
1460 		case IP_VERSION(3, 1, 5):
1461 		case IP_VERSION(3, 1, 6):
1462 			init_data.flags.gpu_vm_support = true;
1463 			break;
1464 		default:
1465 			break;
1466 		}
1467 		break;
1468 	}
1469 
1470 	if (init_data.flags.gpu_vm_support)
1471 		adev->mode_info.gpu_vm_support = true;
1472 
1473 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1474 		init_data.flags.fbc_support = true;
1475 
1476 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1477 		init_data.flags.multi_mon_pp_mclk_switch = true;
1478 
1479 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1480 		init_data.flags.disable_fractional_pwm = true;
1481 
1482 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1483 		init_data.flags.edp_no_power_sequencing = true;
1484 
1485 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1486 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1487 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1488 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1489 
1490 	init_data.flags.seamless_boot_edp_requested = false;
1491 
1492 	if (check_seamless_boot_capability(adev)) {
1493 		init_data.flags.seamless_boot_edp_requested = true;
1494 		init_data.flags.allow_seamless_boot_optimization = true;
1495 		DRM_INFO("Seamless boot condition check passed\n");
1496 	}
1497 
1498 	init_data.flags.enable_mipi_converter_optimization = true;
1499 
1500 	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1501 	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1502 
1503 	INIT_LIST_HEAD(&adev->dm.da_list);
1504 
1505 	retrieve_dmi_info(&adev->dm);
1506 
1507 	/* Display Core create. */
1508 	adev->dm.dc = dc_create(&init_data);
1509 
1510 	if (adev->dm.dc) {
1511 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1512 	} else {
1513 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1514 		goto error;
1515 	}
1516 
1517 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1518 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1519 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1520 	}
1521 
1522 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1523 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1524 	if (dm_should_disable_stutter(adev->pdev))
1525 		adev->dm.dc->debug.disable_stutter = true;
1526 
1527 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1528 		adev->dm.dc->debug.disable_stutter = true;
1529 
1530 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1531 		adev->dm.dc->debug.disable_dsc = true;
1532 	}
1533 
1534 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1535 		adev->dm.dc->debug.disable_clock_gate = true;
1536 
1537 	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1538 		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1539 
1540 	adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1541 
1542 	r = dm_dmub_hw_init(adev);
1543 	if (r) {
1544 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1545 		goto error;
1546 	}
1547 
1548 	dc_hardware_init(adev->dm.dc);
1549 
1550 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1551 	if (!adev->dm.hpd_rx_offload_wq) {
1552 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1553 		goto error;
1554 	}
1555 
1556 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1557 		struct dc_phy_addr_space_config pa_config;
1558 
1559 		mmhub_read_system_context(adev, &pa_config);
1560 
1561 		// Call the DC init_memory func
1562 		dc_setup_system_context(adev->dm.dc, &pa_config);
1563 	}
1564 
1565 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1566 	if (!adev->dm.freesync_module) {
1567 		DRM_ERROR(
1568 		"amdgpu: failed to initialize freesync_module.\n");
1569 	} else
1570 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1571 				adev->dm.freesync_module);
1572 
1573 	amdgpu_dm_init_color_mod();
1574 
1575 	if (adev->dm.dc->caps.max_links > 0) {
1576 		adev->dm.vblank_control_workqueue =
1577 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1578 		if (!adev->dm.vblank_control_workqueue)
1579 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1580 	}
1581 
1582 #ifdef CONFIG_DRM_AMD_DC_HDCP
1583 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1584 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1585 
1586 		if (!adev->dm.hdcp_workqueue)
1587 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1588 		else
1589 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1590 
1591 		dc_init_callbacks(adev->dm.dc, &init_params);
1592 	}
1593 #endif
1594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1595 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1596 #endif
1597 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1598 		init_completion(&adev->dm.dmub_aux_transfer_done);
1599 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1600 		if (!adev->dm.dmub_notify) {
1601 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1602 			goto error;
1603 		}
1604 
1605 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1606 		if (!adev->dm.delayed_hpd_wq) {
1607 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1608 			goto error;
1609 		}
1610 
1611 		amdgpu_dm_outbox_init(adev);
1612 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1613 			dmub_aux_setconfig_callback, false)) {
1614 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1615 			goto error;
1616 		}
1617 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1618 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1619 			goto error;
1620 		}
1621 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1622 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1623 			goto error;
1624 		}
1625 	}
1626 
1627 	if (amdgpu_dm_initialize_drm_device(adev)) {
1628 		DRM_ERROR(
1629 		"amdgpu: failed to initialize sw for display support.\n");
1630 		goto error;
1631 	}
1632 
1633 	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1634 	 * It is expected that DMUB will resend any pending notifications at this point, for
1635 	 * example HPD from DPIA.
1636 	 */
1637 	if (dc_is_dmub_outbox_supported(adev->dm.dc))
1638 		dc_enable_dmub_outbox(adev->dm.dc);
1639 
1640 	/* create fake encoders for MST */
1641 	dm_dp_create_fake_mst_encoders(adev);
1642 
1643 	/* TODO: Add_display_info? */
1644 
1645 	/* TODO use dynamic cursor width */
1646 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1647 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1648 
1649 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1650 		DRM_ERROR(
1651 		"amdgpu: failed to initialize sw for display support.\n");
1652 		goto error;
1653 	}
1654 
1655 
1656 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1657 
1658 	return 0;
1659 error:
1660 	amdgpu_dm_fini(adev);
1661 
1662 	return -EINVAL;
1663 }
1664 
1665 static int amdgpu_dm_early_fini(void *handle)
1666 {
1667 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1668 
1669 	amdgpu_dm_audio_fini(adev);
1670 
1671 	return 0;
1672 }
1673 
1674 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1675 {
1676 	int i;
1677 
1678 	if (adev->dm.vblank_control_workqueue) {
1679 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1680 		adev->dm.vblank_control_workqueue = NULL;
1681 	}
1682 
1683 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1684 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1685 	}
1686 
1687 	amdgpu_dm_destroy_drm_device(&adev->dm);
1688 
1689 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1690 	if (adev->dm.crc_rd_wrk) {
1691 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1692 		kfree(adev->dm.crc_rd_wrk);
1693 		adev->dm.crc_rd_wrk = NULL;
1694 	}
1695 #endif
1696 #ifdef CONFIG_DRM_AMD_DC_HDCP
1697 	if (adev->dm.hdcp_workqueue) {
1698 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1699 		adev->dm.hdcp_workqueue = NULL;
1700 	}
1701 
1702 	if (adev->dm.dc)
1703 		dc_deinit_callbacks(adev->dm.dc);
1704 #endif
1705 
1706 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1707 
1708 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1709 		kfree(adev->dm.dmub_notify);
1710 		adev->dm.dmub_notify = NULL;
1711 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1712 		adev->dm.delayed_hpd_wq = NULL;
1713 	}
1714 
1715 	if (adev->dm.dmub_bo)
1716 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1717 				      &adev->dm.dmub_bo_gpu_addr,
1718 				      &adev->dm.dmub_bo_cpu_addr);
1719 
1720 	if (adev->dm.hpd_rx_offload_wq) {
1721 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1722 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1723 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1724 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1725 			}
1726 		}
1727 
1728 		kfree(adev->dm.hpd_rx_offload_wq);
1729 		adev->dm.hpd_rx_offload_wq = NULL;
1730 	}
1731 
1732 	/* DC Destroy TODO: Replace destroy DAL */
1733 	if (adev->dm.dc)
1734 		dc_destroy(&adev->dm.dc);
1735 	/*
1736 	 * TODO: pageflip, vlank interrupt
1737 	 *
1738 	 * amdgpu_dm_irq_fini(adev);
1739 	 */
1740 
1741 	if (adev->dm.cgs_device) {
1742 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1743 		adev->dm.cgs_device = NULL;
1744 	}
1745 	if (adev->dm.freesync_module) {
1746 		mod_freesync_destroy(adev->dm.freesync_module);
1747 		adev->dm.freesync_module = NULL;
1748 	}
1749 
1750 	mutex_destroy(&adev->dm.audio_lock);
1751 	mutex_destroy(&adev->dm.dc_lock);
1752 
1753 	return;
1754 }
1755 
1756 static int load_dmcu_fw(struct amdgpu_device *adev)
1757 {
1758 	const char *fw_name_dmcu = NULL;
1759 	int r;
1760 	const struct dmcu_firmware_header_v1_0 *hdr;
1761 
1762 	switch(adev->asic_type) {
1763 #if defined(CONFIG_DRM_AMD_DC_SI)
1764 	case CHIP_TAHITI:
1765 	case CHIP_PITCAIRN:
1766 	case CHIP_VERDE:
1767 	case CHIP_OLAND:
1768 #endif
1769 	case CHIP_BONAIRE:
1770 	case CHIP_HAWAII:
1771 	case CHIP_KAVERI:
1772 	case CHIP_KABINI:
1773 	case CHIP_MULLINS:
1774 	case CHIP_TONGA:
1775 	case CHIP_FIJI:
1776 	case CHIP_CARRIZO:
1777 	case CHIP_STONEY:
1778 	case CHIP_POLARIS11:
1779 	case CHIP_POLARIS10:
1780 	case CHIP_POLARIS12:
1781 	case CHIP_VEGAM:
1782 	case CHIP_VEGA10:
1783 	case CHIP_VEGA12:
1784 	case CHIP_VEGA20:
1785 		return 0;
1786 	case CHIP_NAVI12:
1787 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1788 		break;
1789 	case CHIP_RAVEN:
1790 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1791 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1792 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1793 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1794 		else
1795 			return 0;
1796 		break;
1797 	default:
1798 		switch (adev->ip_versions[DCE_HWIP][0]) {
1799 		case IP_VERSION(2, 0, 2):
1800 		case IP_VERSION(2, 0, 3):
1801 		case IP_VERSION(2, 0, 0):
1802 		case IP_VERSION(2, 1, 0):
1803 		case IP_VERSION(3, 0, 0):
1804 		case IP_VERSION(3, 0, 2):
1805 		case IP_VERSION(3, 0, 3):
1806 		case IP_VERSION(3, 0, 1):
1807 		case IP_VERSION(3, 1, 2):
1808 		case IP_VERSION(3, 1, 3):
1809 		case IP_VERSION(3, 1, 4):
1810 		case IP_VERSION(3, 1, 5):
1811 		case IP_VERSION(3, 1, 6):
1812 		case IP_VERSION(3, 2, 0):
1813 		case IP_VERSION(3, 2, 1):
1814 			return 0;
1815 		default:
1816 			break;
1817 		}
1818 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1819 		return -EINVAL;
1820 	}
1821 
1822 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1823 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1824 		return 0;
1825 	}
1826 
1827 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1828 	if (r == -ENOENT) {
1829 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1830 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1831 		adev->dm.fw_dmcu = NULL;
1832 		return 0;
1833 	}
1834 	if (r) {
1835 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1836 			fw_name_dmcu);
1837 		return r;
1838 	}
1839 
1840 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1841 	if (r) {
1842 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1843 			fw_name_dmcu);
1844 		release_firmware(adev->dm.fw_dmcu);
1845 		adev->dm.fw_dmcu = NULL;
1846 		return r;
1847 	}
1848 
1849 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1850 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1851 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1852 	adev->firmware.fw_size +=
1853 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1854 
1855 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1856 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1857 	adev->firmware.fw_size +=
1858 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1859 
1860 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1861 
1862 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1863 
1864 	return 0;
1865 }
1866 
1867 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1868 {
1869 	struct amdgpu_device *adev = ctx;
1870 
1871 	return dm_read_reg(adev->dm.dc->ctx, address);
1872 }
1873 
1874 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1875 				     uint32_t value)
1876 {
1877 	struct amdgpu_device *adev = ctx;
1878 
1879 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1880 }
1881 
1882 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1883 {
1884 	struct dmub_srv_create_params create_params;
1885 	struct dmub_srv_region_params region_params;
1886 	struct dmub_srv_region_info region_info;
1887 	struct dmub_srv_fb_params fb_params;
1888 	struct dmub_srv_fb_info *fb_info;
1889 	struct dmub_srv *dmub_srv;
1890 	const struct dmcub_firmware_header_v1_0 *hdr;
1891 	const char *fw_name_dmub;
1892 	enum dmub_asic dmub_asic;
1893 	enum dmub_status status;
1894 	int r;
1895 
1896 	switch (adev->ip_versions[DCE_HWIP][0]) {
1897 	case IP_VERSION(2, 1, 0):
1898 		dmub_asic = DMUB_ASIC_DCN21;
1899 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1900 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1901 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1902 		break;
1903 	case IP_VERSION(3, 0, 0):
1904 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1905 			dmub_asic = DMUB_ASIC_DCN30;
1906 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1907 		} else {
1908 			dmub_asic = DMUB_ASIC_DCN30;
1909 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1910 		}
1911 		break;
1912 	case IP_VERSION(3, 0, 1):
1913 		dmub_asic = DMUB_ASIC_DCN301;
1914 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1915 		break;
1916 	case IP_VERSION(3, 0, 2):
1917 		dmub_asic = DMUB_ASIC_DCN302;
1918 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1919 		break;
1920 	case IP_VERSION(3, 0, 3):
1921 		dmub_asic = DMUB_ASIC_DCN303;
1922 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1923 		break;
1924 	case IP_VERSION(3, 1, 2):
1925 	case IP_VERSION(3, 1, 3):
1926 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1927 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1928 		break;
1929 	case IP_VERSION(3, 1, 4):
1930 		dmub_asic = DMUB_ASIC_DCN314;
1931 		fw_name_dmub = FIRMWARE_DCN_314_DMUB;
1932 		break;
1933 	case IP_VERSION(3, 1, 5):
1934 		dmub_asic = DMUB_ASIC_DCN315;
1935 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1936 		break;
1937 	case IP_VERSION(3, 1, 6):
1938 		dmub_asic = DMUB_ASIC_DCN316;
1939 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1940 		break;
1941 	case IP_VERSION(3, 2, 0):
1942 		dmub_asic = DMUB_ASIC_DCN32;
1943 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1944 		break;
1945 	case IP_VERSION(3, 2, 1):
1946 		dmub_asic = DMUB_ASIC_DCN321;
1947 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1948 		break;
1949 	default:
1950 		/* ASIC doesn't support DMUB. */
1951 		return 0;
1952 	}
1953 
1954 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1955 	if (r) {
1956 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1957 		return 0;
1958 	}
1959 
1960 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1961 	if (r) {
1962 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1963 		return 0;
1964 	}
1965 
1966 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1967 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1968 
1969 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1970 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1971 			AMDGPU_UCODE_ID_DMCUB;
1972 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1973 			adev->dm.dmub_fw;
1974 		adev->firmware.fw_size +=
1975 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1976 
1977 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1978 			 adev->dm.dmcub_fw_version);
1979 	}
1980 
1981 
1982 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1983 	dmub_srv = adev->dm.dmub_srv;
1984 
1985 	if (!dmub_srv) {
1986 		DRM_ERROR("Failed to allocate DMUB service!\n");
1987 		return -ENOMEM;
1988 	}
1989 
1990 	memset(&create_params, 0, sizeof(create_params));
1991 	create_params.user_ctx = adev;
1992 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1993 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1994 	create_params.asic = dmub_asic;
1995 
1996 	/* Create the DMUB service. */
1997 	status = dmub_srv_create(dmub_srv, &create_params);
1998 	if (status != DMUB_STATUS_OK) {
1999 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2000 		return -EINVAL;
2001 	}
2002 
2003 	/* Calculate the size of all the regions for the DMUB service. */
2004 	memset(&region_params, 0, sizeof(region_params));
2005 
2006 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2007 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2008 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2009 	region_params.vbios_size = adev->bios_size;
2010 	region_params.fw_bss_data = region_params.bss_data_size ?
2011 		adev->dm.dmub_fw->data +
2012 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2013 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2014 	region_params.fw_inst_const =
2015 		adev->dm.dmub_fw->data +
2016 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2017 		PSP_HEADER_BYTES;
2018 
2019 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2020 					   &region_info);
2021 
2022 	if (status != DMUB_STATUS_OK) {
2023 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2024 		return -EINVAL;
2025 	}
2026 
2027 	/*
2028 	 * Allocate a framebuffer based on the total size of all the regions.
2029 	 * TODO: Move this into GART.
2030 	 */
2031 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2032 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2033 				    &adev->dm.dmub_bo_gpu_addr,
2034 				    &adev->dm.dmub_bo_cpu_addr);
2035 	if (r)
2036 		return r;
2037 
2038 	/* Rebase the regions on the framebuffer address. */
2039 	memset(&fb_params, 0, sizeof(fb_params));
2040 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2041 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2042 	fb_params.region_info = &region_info;
2043 
2044 	adev->dm.dmub_fb_info =
2045 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2046 	fb_info = adev->dm.dmub_fb_info;
2047 
2048 	if (!fb_info) {
2049 		DRM_ERROR(
2050 			"Failed to allocate framebuffer info for DMUB service!\n");
2051 		return -ENOMEM;
2052 	}
2053 
2054 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2055 	if (status != DMUB_STATUS_OK) {
2056 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2057 		return -EINVAL;
2058 	}
2059 
2060 	return 0;
2061 }
2062 
2063 static int dm_sw_init(void *handle)
2064 {
2065 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2066 	int r;
2067 
2068 	r = dm_dmub_sw_init(adev);
2069 	if (r)
2070 		return r;
2071 
2072 	return load_dmcu_fw(adev);
2073 }
2074 
2075 static int dm_sw_fini(void *handle)
2076 {
2077 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2078 
2079 	kfree(adev->dm.dmub_fb_info);
2080 	adev->dm.dmub_fb_info = NULL;
2081 
2082 	if (adev->dm.dmub_srv) {
2083 		dmub_srv_destroy(adev->dm.dmub_srv);
2084 		adev->dm.dmub_srv = NULL;
2085 	}
2086 
2087 	release_firmware(adev->dm.dmub_fw);
2088 	adev->dm.dmub_fw = NULL;
2089 
2090 	release_firmware(adev->dm.fw_dmcu);
2091 	adev->dm.fw_dmcu = NULL;
2092 
2093 	return 0;
2094 }
2095 
2096 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2097 {
2098 	struct amdgpu_dm_connector *aconnector;
2099 	struct drm_connector *connector;
2100 	struct drm_connector_list_iter iter;
2101 	int ret = 0;
2102 
2103 	drm_connector_list_iter_begin(dev, &iter);
2104 	drm_for_each_connector_iter(connector, &iter) {
2105 		aconnector = to_amdgpu_dm_connector(connector);
2106 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2107 		    aconnector->mst_mgr.aux) {
2108 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2109 					 aconnector,
2110 					 aconnector->base.base.id);
2111 
2112 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2113 			if (ret < 0) {
2114 				DRM_ERROR("DM_MST: Failed to start MST\n");
2115 				aconnector->dc_link->type =
2116 					dc_connection_single;
2117 				break;
2118 			}
2119 		}
2120 	}
2121 	drm_connector_list_iter_end(&iter);
2122 
2123 	return ret;
2124 }
2125 
2126 static int dm_late_init(void *handle)
2127 {
2128 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2129 
2130 	struct dmcu_iram_parameters params;
2131 	unsigned int linear_lut[16];
2132 	int i;
2133 	struct dmcu *dmcu = NULL;
2134 
2135 	dmcu = adev->dm.dc->res_pool->dmcu;
2136 
2137 	for (i = 0; i < 16; i++)
2138 		linear_lut[i] = 0xFFFF * i / 15;
2139 
2140 	params.set = 0;
2141 	params.backlight_ramping_override = false;
2142 	params.backlight_ramping_start = 0xCCCC;
2143 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2144 	params.backlight_lut_array_size = 16;
2145 	params.backlight_lut_array = linear_lut;
2146 
2147 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2148 	 * 0xFFFF x 0.01 = 0x28F
2149 	 */
2150 	params.min_abm_backlight = 0x28F;
2151 	/* In the case where abm is implemented on dmcub,
2152 	* dmcu object will be null.
2153 	* ABM 2.4 and up are implemented on dmcub.
2154 	*/
2155 	if (dmcu) {
2156 		if (!dmcu_load_iram(dmcu, params))
2157 			return -EINVAL;
2158 	} else if (adev->dm.dc->ctx->dmub_srv) {
2159 		struct dc_link *edp_links[MAX_NUM_EDP];
2160 		int edp_num;
2161 
2162 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2163 		for (i = 0; i < edp_num; i++) {
2164 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2165 				return -EINVAL;
2166 		}
2167 	}
2168 
2169 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2170 }
2171 
2172 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2173 {
2174 	struct amdgpu_dm_connector *aconnector;
2175 	struct drm_connector *connector;
2176 	struct drm_connector_list_iter iter;
2177 	struct drm_dp_mst_topology_mgr *mgr;
2178 	int ret;
2179 	bool need_hotplug = false;
2180 
2181 	drm_connector_list_iter_begin(dev, &iter);
2182 	drm_for_each_connector_iter(connector, &iter) {
2183 		aconnector = to_amdgpu_dm_connector(connector);
2184 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2185 		    aconnector->mst_port)
2186 			continue;
2187 
2188 		mgr = &aconnector->mst_mgr;
2189 
2190 		if (suspend) {
2191 			drm_dp_mst_topology_mgr_suspend(mgr);
2192 		} else {
2193 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2194 			if (ret < 0) {
2195 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2196 					aconnector->dc_link);
2197 				need_hotplug = true;
2198 			}
2199 		}
2200 	}
2201 	drm_connector_list_iter_end(&iter);
2202 
2203 	if (need_hotplug)
2204 		drm_kms_helper_hotplug_event(dev);
2205 }
2206 
2207 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2208 {
2209 	int ret = 0;
2210 
2211 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2212 	 * on window driver dc implementation.
2213 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2214 	 * should be passed to smu during boot up and resume from s3.
2215 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2216 	 * dcn20_resource_construct
2217 	 * then call pplib functions below to pass the settings to smu:
2218 	 * smu_set_watermarks_for_clock_ranges
2219 	 * smu_set_watermarks_table
2220 	 * navi10_set_watermarks_table
2221 	 * smu_write_watermarks_table
2222 	 *
2223 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2224 	 * dc has implemented different flow for window driver:
2225 	 * dc_hardware_init / dc_set_power_state
2226 	 * dcn10_init_hw
2227 	 * notify_wm_ranges
2228 	 * set_wm_ranges
2229 	 * -- Linux
2230 	 * smu_set_watermarks_for_clock_ranges
2231 	 * renoir_set_watermarks_table
2232 	 * smu_write_watermarks_table
2233 	 *
2234 	 * For Linux,
2235 	 * dc_hardware_init -> amdgpu_dm_init
2236 	 * dc_set_power_state --> dm_resume
2237 	 *
2238 	 * therefore, this function apply to navi10/12/14 but not Renoir
2239 	 * *
2240 	 */
2241 	switch (adev->ip_versions[DCE_HWIP][0]) {
2242 	case IP_VERSION(2, 0, 2):
2243 	case IP_VERSION(2, 0, 0):
2244 		break;
2245 	default:
2246 		return 0;
2247 	}
2248 
2249 	ret = amdgpu_dpm_write_watermarks_table(adev);
2250 	if (ret) {
2251 		DRM_ERROR("Failed to update WMTABLE!\n");
2252 		return ret;
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 /**
2259  * dm_hw_init() - Initialize DC device
2260  * @handle: The base driver device containing the amdgpu_dm device.
2261  *
2262  * Initialize the &struct amdgpu_display_manager device. This involves calling
2263  * the initializers of each DM component, then populating the struct with them.
2264  *
2265  * Although the function implies hardware initialization, both hardware and
2266  * software are initialized here. Splitting them out to their relevant init
2267  * hooks is a future TODO item.
2268  *
2269  * Some notable things that are initialized here:
2270  *
2271  * - Display Core, both software and hardware
2272  * - DC modules that we need (freesync and color management)
2273  * - DRM software states
2274  * - Interrupt sources and handlers
2275  * - Vblank support
2276  * - Debug FS entries, if enabled
2277  */
2278 static int dm_hw_init(void *handle)
2279 {
2280 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2281 	/* Create DAL display manager */
2282 	amdgpu_dm_init(adev);
2283 	amdgpu_dm_hpd_init(adev);
2284 
2285 	return 0;
2286 }
2287 
2288 /**
2289  * dm_hw_fini() - Teardown DC device
2290  * @handle: The base driver device containing the amdgpu_dm device.
2291  *
2292  * Teardown components within &struct amdgpu_display_manager that require
2293  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2294  * were loaded. Also flush IRQ workqueues and disable them.
2295  */
2296 static int dm_hw_fini(void *handle)
2297 {
2298 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2299 
2300 	amdgpu_dm_hpd_fini(adev);
2301 
2302 	amdgpu_dm_irq_fini(adev);
2303 	amdgpu_dm_fini(adev);
2304 	return 0;
2305 }
2306 
2307 
2308 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2309 				 struct dc_state *state, bool enable)
2310 {
2311 	enum dc_irq_source irq_source;
2312 	struct amdgpu_crtc *acrtc;
2313 	int rc = -EBUSY;
2314 	int i = 0;
2315 
2316 	for (i = 0; i < state->stream_count; i++) {
2317 		acrtc = get_crtc_by_otg_inst(
2318 				adev, state->stream_status[i].primary_otg_inst);
2319 
2320 		if (acrtc && state->stream_status[i].plane_count != 0) {
2321 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2322 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2323 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2324 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2325 			if (rc)
2326 				DRM_WARN("Failed to %s pflip interrupts\n",
2327 					 enable ? "enable" : "disable");
2328 
2329 			if (enable) {
2330 				rc = dm_enable_vblank(&acrtc->base);
2331 				if (rc)
2332 					DRM_WARN("Failed to enable vblank interrupts\n");
2333 			} else {
2334 				dm_disable_vblank(&acrtc->base);
2335 			}
2336 
2337 		}
2338 	}
2339 
2340 }
2341 
2342 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2343 {
2344 	struct dc_state *context = NULL;
2345 	enum dc_status res = DC_ERROR_UNEXPECTED;
2346 	int i;
2347 	struct dc_stream_state *del_streams[MAX_PIPES];
2348 	int del_streams_count = 0;
2349 
2350 	memset(del_streams, 0, sizeof(del_streams));
2351 
2352 	context = dc_create_state(dc);
2353 	if (context == NULL)
2354 		goto context_alloc_fail;
2355 
2356 	dc_resource_state_copy_construct_current(dc, context);
2357 
2358 	/* First remove from context all streams */
2359 	for (i = 0; i < context->stream_count; i++) {
2360 		struct dc_stream_state *stream = context->streams[i];
2361 
2362 		del_streams[del_streams_count++] = stream;
2363 	}
2364 
2365 	/* Remove all planes for removed streams and then remove the streams */
2366 	for (i = 0; i < del_streams_count; i++) {
2367 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2368 			res = DC_FAIL_DETACH_SURFACES;
2369 			goto fail;
2370 		}
2371 
2372 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2373 		if (res != DC_OK)
2374 			goto fail;
2375 	}
2376 
2377 	res = dc_commit_state(dc, context);
2378 
2379 fail:
2380 	dc_release_state(context);
2381 
2382 context_alloc_fail:
2383 	return res;
2384 }
2385 
2386 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2387 {
2388 	int i;
2389 
2390 	if (dm->hpd_rx_offload_wq) {
2391 		for (i = 0; i < dm->dc->caps.max_links; i++)
2392 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2393 	}
2394 }
2395 
2396 static int dm_suspend(void *handle)
2397 {
2398 	struct amdgpu_device *adev = handle;
2399 	struct amdgpu_display_manager *dm = &adev->dm;
2400 	int ret = 0;
2401 
2402 	if (amdgpu_in_reset(adev)) {
2403 		mutex_lock(&dm->dc_lock);
2404 
2405 		dc_allow_idle_optimizations(adev->dm.dc, false);
2406 
2407 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2408 
2409 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2410 
2411 		amdgpu_dm_commit_zero_streams(dm->dc);
2412 
2413 		amdgpu_dm_irq_suspend(adev);
2414 
2415 		hpd_rx_irq_work_suspend(dm);
2416 
2417 		return ret;
2418 	}
2419 
2420 	WARN_ON(adev->dm.cached_state);
2421 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2422 
2423 	s3_handle_mst(adev_to_drm(adev), true);
2424 
2425 	amdgpu_dm_irq_suspend(adev);
2426 
2427 	hpd_rx_irq_work_suspend(dm);
2428 
2429 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2430 
2431 	return 0;
2432 }
2433 
2434 struct amdgpu_dm_connector *
2435 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2436 					     struct drm_crtc *crtc)
2437 {
2438 	uint32_t i;
2439 	struct drm_connector_state *new_con_state;
2440 	struct drm_connector *connector;
2441 	struct drm_crtc *crtc_from_state;
2442 
2443 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2444 		crtc_from_state = new_con_state->crtc;
2445 
2446 		if (crtc_from_state == crtc)
2447 			return to_amdgpu_dm_connector(connector);
2448 	}
2449 
2450 	return NULL;
2451 }
2452 
2453 static void emulated_link_detect(struct dc_link *link)
2454 {
2455 	struct dc_sink_init_data sink_init_data = { 0 };
2456 	struct display_sink_capability sink_caps = { 0 };
2457 	enum dc_edid_status edid_status;
2458 	struct dc_context *dc_ctx = link->ctx;
2459 	struct dc_sink *sink = NULL;
2460 	struct dc_sink *prev_sink = NULL;
2461 
2462 	link->type = dc_connection_none;
2463 	prev_sink = link->local_sink;
2464 
2465 	if (prev_sink)
2466 		dc_sink_release(prev_sink);
2467 
2468 	switch (link->connector_signal) {
2469 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2470 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2471 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2472 		break;
2473 	}
2474 
2475 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2476 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2477 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2478 		break;
2479 	}
2480 
2481 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2482 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2483 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2484 		break;
2485 	}
2486 
2487 	case SIGNAL_TYPE_LVDS: {
2488 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2489 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2490 		break;
2491 	}
2492 
2493 	case SIGNAL_TYPE_EDP: {
2494 		sink_caps.transaction_type =
2495 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2496 		sink_caps.signal = SIGNAL_TYPE_EDP;
2497 		break;
2498 	}
2499 
2500 	case SIGNAL_TYPE_DISPLAY_PORT: {
2501 		sink_caps.transaction_type =
2502 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2503 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2504 		break;
2505 	}
2506 
2507 	default:
2508 		DC_ERROR("Invalid connector type! signal:%d\n",
2509 			link->connector_signal);
2510 		return;
2511 	}
2512 
2513 	sink_init_data.link = link;
2514 	sink_init_data.sink_signal = sink_caps.signal;
2515 
2516 	sink = dc_sink_create(&sink_init_data);
2517 	if (!sink) {
2518 		DC_ERROR("Failed to create sink!\n");
2519 		return;
2520 	}
2521 
2522 	/* dc_sink_create returns a new reference */
2523 	link->local_sink = sink;
2524 
2525 	edid_status = dm_helpers_read_local_edid(
2526 			link->ctx,
2527 			link,
2528 			sink);
2529 
2530 	if (edid_status != EDID_OK)
2531 		DC_ERROR("Failed to read EDID");
2532 
2533 }
2534 
2535 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2536 				     struct amdgpu_display_manager *dm)
2537 {
2538 	struct {
2539 		struct dc_surface_update surface_updates[MAX_SURFACES];
2540 		struct dc_plane_info plane_infos[MAX_SURFACES];
2541 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2542 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2543 		struct dc_stream_update stream_update;
2544 	} * bundle;
2545 	int k, m;
2546 
2547 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2548 
2549 	if (!bundle) {
2550 		dm_error("Failed to allocate update bundle\n");
2551 		goto cleanup;
2552 	}
2553 
2554 	for (k = 0; k < dc_state->stream_count; k++) {
2555 		bundle->stream_update.stream = dc_state->streams[k];
2556 
2557 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2558 			bundle->surface_updates[m].surface =
2559 				dc_state->stream_status->plane_states[m];
2560 			bundle->surface_updates[m].surface->force_full_update =
2561 				true;
2562 		}
2563 		dc_commit_updates_for_stream(
2564 			dm->dc, bundle->surface_updates,
2565 			dc_state->stream_status->plane_count,
2566 			dc_state->streams[k], &bundle->stream_update, dc_state);
2567 	}
2568 
2569 cleanup:
2570 	kfree(bundle);
2571 
2572 	return;
2573 }
2574 
2575 static int dm_resume(void *handle)
2576 {
2577 	struct amdgpu_device *adev = handle;
2578 	struct drm_device *ddev = adev_to_drm(adev);
2579 	struct amdgpu_display_manager *dm = &adev->dm;
2580 	struct amdgpu_dm_connector *aconnector;
2581 	struct drm_connector *connector;
2582 	struct drm_connector_list_iter iter;
2583 	struct drm_crtc *crtc;
2584 	struct drm_crtc_state *new_crtc_state;
2585 	struct dm_crtc_state *dm_new_crtc_state;
2586 	struct drm_plane *plane;
2587 	struct drm_plane_state *new_plane_state;
2588 	struct dm_plane_state *dm_new_plane_state;
2589 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2590 	enum dc_connection_type new_connection_type = dc_connection_none;
2591 	struct dc_state *dc_state;
2592 	int i, r, j;
2593 
2594 	if (amdgpu_in_reset(adev)) {
2595 		dc_state = dm->cached_dc_state;
2596 
2597 		/*
2598 		 * The dc->current_state is backed up into dm->cached_dc_state
2599 		 * before we commit 0 streams.
2600 		 *
2601 		 * DC will clear link encoder assignments on the real state
2602 		 * but the changes won't propagate over to the copy we made
2603 		 * before the 0 streams commit.
2604 		 *
2605 		 * DC expects that link encoder assignments are *not* valid
2606 		 * when committing a state, so as a workaround we can copy
2607 		 * off of the current state.
2608 		 *
2609 		 * We lose the previous assignments, but we had already
2610 		 * commit 0 streams anyway.
2611 		 */
2612 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2613 
2614 		r = dm_dmub_hw_init(adev);
2615 		if (r)
2616 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2617 
2618 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2619 		dc_resume(dm->dc);
2620 
2621 		amdgpu_dm_irq_resume_early(adev);
2622 
2623 		for (i = 0; i < dc_state->stream_count; i++) {
2624 			dc_state->streams[i]->mode_changed = true;
2625 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2626 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2627 					= 0xffffffff;
2628 			}
2629 		}
2630 
2631 		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2632 			amdgpu_dm_outbox_init(adev);
2633 			dc_enable_dmub_outbox(adev->dm.dc);
2634 		}
2635 
2636 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2637 
2638 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2639 
2640 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2641 
2642 		dc_release_state(dm->cached_dc_state);
2643 		dm->cached_dc_state = NULL;
2644 
2645 		amdgpu_dm_irq_resume_late(adev);
2646 
2647 		mutex_unlock(&dm->dc_lock);
2648 
2649 		return 0;
2650 	}
2651 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2652 	dc_release_state(dm_state->context);
2653 	dm_state->context = dc_create_state(dm->dc);
2654 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2655 	dc_resource_state_construct(dm->dc, dm_state->context);
2656 
2657 	/* Before powering on DC we need to re-initialize DMUB. */
2658 	dm_dmub_hw_resume(adev);
2659 
2660 	/* Re-enable outbox interrupts for DPIA. */
2661 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2662 		amdgpu_dm_outbox_init(adev);
2663 		dc_enable_dmub_outbox(adev->dm.dc);
2664 	}
2665 
2666 	/* power on hardware */
2667 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2668 
2669 	/* program HPD filter */
2670 	dc_resume(dm->dc);
2671 
2672 	/*
2673 	 * early enable HPD Rx IRQ, should be done before set mode as short
2674 	 * pulse interrupts are used for MST
2675 	 */
2676 	amdgpu_dm_irq_resume_early(adev);
2677 
2678 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2679 	s3_handle_mst(ddev, false);
2680 
2681 	/* Do detection*/
2682 	drm_connector_list_iter_begin(ddev, &iter);
2683 	drm_for_each_connector_iter(connector, &iter) {
2684 		aconnector = to_amdgpu_dm_connector(connector);
2685 
2686 		/*
2687 		 * this is the case when traversing through already created
2688 		 * MST connectors, should be skipped
2689 		 */
2690 		if (aconnector->dc_link &&
2691 		    aconnector->dc_link->type == dc_connection_mst_branch)
2692 			continue;
2693 
2694 		mutex_lock(&aconnector->hpd_lock);
2695 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2696 			DRM_ERROR("KMS: Failed to detect connector\n");
2697 
2698 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2699 			emulated_link_detect(aconnector->dc_link);
2700 		} else {
2701 			mutex_lock(&dm->dc_lock);
2702 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2703 			mutex_unlock(&dm->dc_lock);
2704 		}
2705 
2706 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2707 			aconnector->fake_enable = false;
2708 
2709 		if (aconnector->dc_sink)
2710 			dc_sink_release(aconnector->dc_sink);
2711 		aconnector->dc_sink = NULL;
2712 		amdgpu_dm_update_connector_after_detect(aconnector);
2713 		mutex_unlock(&aconnector->hpd_lock);
2714 	}
2715 	drm_connector_list_iter_end(&iter);
2716 
2717 	/* Force mode set in atomic commit */
2718 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2719 		new_crtc_state->active_changed = true;
2720 
2721 	/*
2722 	 * atomic_check is expected to create the dc states. We need to release
2723 	 * them here, since they were duplicated as part of the suspend
2724 	 * procedure.
2725 	 */
2726 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2727 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2728 		if (dm_new_crtc_state->stream) {
2729 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2730 			dc_stream_release(dm_new_crtc_state->stream);
2731 			dm_new_crtc_state->stream = NULL;
2732 		}
2733 	}
2734 
2735 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2736 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2737 		if (dm_new_plane_state->dc_state) {
2738 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2739 			dc_plane_state_release(dm_new_plane_state->dc_state);
2740 			dm_new_plane_state->dc_state = NULL;
2741 		}
2742 	}
2743 
2744 	drm_atomic_helper_resume(ddev, dm->cached_state);
2745 
2746 	dm->cached_state = NULL;
2747 
2748 	amdgpu_dm_irq_resume_late(adev);
2749 
2750 	amdgpu_dm_smu_write_watermarks_table(adev);
2751 
2752 	return 0;
2753 }
2754 
2755 /**
2756  * DOC: DM Lifecycle
2757  *
2758  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2759  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2760  * the base driver's device list to be initialized and torn down accordingly.
2761  *
2762  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2763  */
2764 
2765 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2766 	.name = "dm",
2767 	.early_init = dm_early_init,
2768 	.late_init = dm_late_init,
2769 	.sw_init = dm_sw_init,
2770 	.sw_fini = dm_sw_fini,
2771 	.early_fini = amdgpu_dm_early_fini,
2772 	.hw_init = dm_hw_init,
2773 	.hw_fini = dm_hw_fini,
2774 	.suspend = dm_suspend,
2775 	.resume = dm_resume,
2776 	.is_idle = dm_is_idle,
2777 	.wait_for_idle = dm_wait_for_idle,
2778 	.check_soft_reset = dm_check_soft_reset,
2779 	.soft_reset = dm_soft_reset,
2780 	.set_clockgating_state = dm_set_clockgating_state,
2781 	.set_powergating_state = dm_set_powergating_state,
2782 };
2783 
2784 const struct amdgpu_ip_block_version dm_ip_block =
2785 {
2786 	.type = AMD_IP_BLOCK_TYPE_DCE,
2787 	.major = 1,
2788 	.minor = 0,
2789 	.rev = 0,
2790 	.funcs = &amdgpu_dm_funcs,
2791 };
2792 
2793 
2794 /**
2795  * DOC: atomic
2796  *
2797  * *WIP*
2798  */
2799 
2800 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2801 	.fb_create = amdgpu_display_user_framebuffer_create,
2802 	.get_format_info = amd_get_format_info,
2803 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2804 	.atomic_check = amdgpu_dm_atomic_check,
2805 	.atomic_commit = drm_atomic_helper_commit,
2806 };
2807 
2808 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2809 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2810 };
2811 
2812 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2813 {
2814 	u32 max_avg, min_cll, max, min, q, r;
2815 	struct amdgpu_dm_backlight_caps *caps;
2816 	struct amdgpu_display_manager *dm;
2817 	struct drm_connector *conn_base;
2818 	struct amdgpu_device *adev;
2819 	struct dc_link *link = NULL;
2820 	static const u8 pre_computed_values[] = {
2821 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2822 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2823 	int i;
2824 
2825 	if (!aconnector || !aconnector->dc_link)
2826 		return;
2827 
2828 	link = aconnector->dc_link;
2829 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2830 		return;
2831 
2832 	conn_base = &aconnector->base;
2833 	adev = drm_to_adev(conn_base->dev);
2834 	dm = &adev->dm;
2835 	for (i = 0; i < dm->num_of_edps; i++) {
2836 		if (link == dm->backlight_link[i])
2837 			break;
2838 	}
2839 	if (i >= dm->num_of_edps)
2840 		return;
2841 	caps = &dm->backlight_caps[i];
2842 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2843 	caps->aux_support = false;
2844 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2845 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2846 
2847 	if (caps->ext_caps->bits.oled == 1 /*||
2848 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2849 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2850 		caps->aux_support = true;
2851 
2852 	if (amdgpu_backlight == 0)
2853 		caps->aux_support = false;
2854 	else if (amdgpu_backlight == 1)
2855 		caps->aux_support = true;
2856 
2857 	/* From the specification (CTA-861-G), for calculating the maximum
2858 	 * luminance we need to use:
2859 	 *	Luminance = 50*2**(CV/32)
2860 	 * Where CV is a one-byte value.
2861 	 * For calculating this expression we may need float point precision;
2862 	 * to avoid this complexity level, we take advantage that CV is divided
2863 	 * by a constant. From the Euclids division algorithm, we know that CV
2864 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2865 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2866 	 * need to pre-compute the value of r/32. For pre-computing the values
2867 	 * We just used the following Ruby line:
2868 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2869 	 * The results of the above expressions can be verified at
2870 	 * pre_computed_values.
2871 	 */
2872 	q = max_avg >> 5;
2873 	r = max_avg % 32;
2874 	max = (1 << q) * pre_computed_values[r];
2875 
2876 	// min luminance: maxLum * (CV/255)^2 / 100
2877 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2878 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2879 
2880 	caps->aux_max_input_signal = max;
2881 	caps->aux_min_input_signal = min;
2882 }
2883 
2884 void amdgpu_dm_update_connector_after_detect(
2885 		struct amdgpu_dm_connector *aconnector)
2886 {
2887 	struct drm_connector *connector = &aconnector->base;
2888 	struct drm_device *dev = connector->dev;
2889 	struct dc_sink *sink;
2890 
2891 	/* MST handled by drm_mst framework */
2892 	if (aconnector->mst_mgr.mst_state == true)
2893 		return;
2894 
2895 	sink = aconnector->dc_link->local_sink;
2896 	if (sink)
2897 		dc_sink_retain(sink);
2898 
2899 	/*
2900 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2901 	 * the connector sink is set to either fake or physical sink depends on link status.
2902 	 * Skip if already done during boot.
2903 	 */
2904 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2905 			&& aconnector->dc_em_sink) {
2906 
2907 		/*
2908 		 * For S3 resume with headless use eml_sink to fake stream
2909 		 * because on resume connector->sink is set to NULL
2910 		 */
2911 		mutex_lock(&dev->mode_config.mutex);
2912 
2913 		if (sink) {
2914 			if (aconnector->dc_sink) {
2915 				amdgpu_dm_update_freesync_caps(connector, NULL);
2916 				/*
2917 				 * retain and release below are used to
2918 				 * bump up refcount for sink because the link doesn't point
2919 				 * to it anymore after disconnect, so on next crtc to connector
2920 				 * reshuffle by UMD we will get into unwanted dc_sink release
2921 				 */
2922 				dc_sink_release(aconnector->dc_sink);
2923 			}
2924 			aconnector->dc_sink = sink;
2925 			dc_sink_retain(aconnector->dc_sink);
2926 			amdgpu_dm_update_freesync_caps(connector,
2927 					aconnector->edid);
2928 		} else {
2929 			amdgpu_dm_update_freesync_caps(connector, NULL);
2930 			if (!aconnector->dc_sink) {
2931 				aconnector->dc_sink = aconnector->dc_em_sink;
2932 				dc_sink_retain(aconnector->dc_sink);
2933 			}
2934 		}
2935 
2936 		mutex_unlock(&dev->mode_config.mutex);
2937 
2938 		if (sink)
2939 			dc_sink_release(sink);
2940 		return;
2941 	}
2942 
2943 	/*
2944 	 * TODO: temporary guard to look for proper fix
2945 	 * if this sink is MST sink, we should not do anything
2946 	 */
2947 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2948 		dc_sink_release(sink);
2949 		return;
2950 	}
2951 
2952 	if (aconnector->dc_sink == sink) {
2953 		/*
2954 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2955 		 * Do nothing!!
2956 		 */
2957 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2958 				aconnector->connector_id);
2959 		if (sink)
2960 			dc_sink_release(sink);
2961 		return;
2962 	}
2963 
2964 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2965 		aconnector->connector_id, aconnector->dc_sink, sink);
2966 
2967 	mutex_lock(&dev->mode_config.mutex);
2968 
2969 	/*
2970 	 * 1. Update status of the drm connector
2971 	 * 2. Send an event and let userspace tell us what to do
2972 	 */
2973 	if (sink) {
2974 		/*
2975 		 * TODO: check if we still need the S3 mode update workaround.
2976 		 * If yes, put it here.
2977 		 */
2978 		if (aconnector->dc_sink) {
2979 			amdgpu_dm_update_freesync_caps(connector, NULL);
2980 			dc_sink_release(aconnector->dc_sink);
2981 		}
2982 
2983 		aconnector->dc_sink = sink;
2984 		dc_sink_retain(aconnector->dc_sink);
2985 		if (sink->dc_edid.length == 0) {
2986 			aconnector->edid = NULL;
2987 			if (aconnector->dc_link->aux_mode) {
2988 				drm_dp_cec_unset_edid(
2989 					&aconnector->dm_dp_aux.aux);
2990 			}
2991 		} else {
2992 			aconnector->edid =
2993 				(struct edid *)sink->dc_edid.raw_edid;
2994 
2995 			if (aconnector->dc_link->aux_mode)
2996 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2997 						    aconnector->edid);
2998 		}
2999 
3000 		drm_connector_update_edid_property(connector, aconnector->edid);
3001 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3002 		update_connector_ext_caps(aconnector);
3003 	} else {
3004 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3005 		amdgpu_dm_update_freesync_caps(connector, NULL);
3006 		drm_connector_update_edid_property(connector, NULL);
3007 		aconnector->num_modes = 0;
3008 		dc_sink_release(aconnector->dc_sink);
3009 		aconnector->dc_sink = NULL;
3010 		aconnector->edid = NULL;
3011 #ifdef CONFIG_DRM_AMD_DC_HDCP
3012 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3013 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3014 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3015 #endif
3016 	}
3017 
3018 	mutex_unlock(&dev->mode_config.mutex);
3019 
3020 	update_subconnector_property(aconnector);
3021 
3022 	if (sink)
3023 		dc_sink_release(sink);
3024 }
3025 
3026 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3027 {
3028 	struct drm_connector *connector = &aconnector->base;
3029 	struct drm_device *dev = connector->dev;
3030 	enum dc_connection_type new_connection_type = dc_connection_none;
3031 	struct amdgpu_device *adev = drm_to_adev(dev);
3032 #ifdef CONFIG_DRM_AMD_DC_HDCP
3033 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3034 #endif
3035 	bool ret = false;
3036 
3037 	if (adev->dm.disable_hpd_irq)
3038 		return;
3039 
3040 	/*
3041 	 * In case of failure or MST no need to update connector status or notify the OS
3042 	 * since (for MST case) MST does this in its own context.
3043 	 */
3044 	mutex_lock(&aconnector->hpd_lock);
3045 
3046 #ifdef CONFIG_DRM_AMD_DC_HDCP
3047 	if (adev->dm.hdcp_workqueue) {
3048 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3049 		dm_con_state->update_hdcp = true;
3050 	}
3051 #endif
3052 	if (aconnector->fake_enable)
3053 		aconnector->fake_enable = false;
3054 
3055 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3056 		DRM_ERROR("KMS: Failed to detect connector\n");
3057 
3058 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3059 		emulated_link_detect(aconnector->dc_link);
3060 
3061 		drm_modeset_lock_all(dev);
3062 		dm_restore_drm_connector_state(dev, connector);
3063 		drm_modeset_unlock_all(dev);
3064 
3065 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3066 			drm_kms_helper_connector_hotplug_event(connector);
3067 	} else {
3068 		mutex_lock(&adev->dm.dc_lock);
3069 		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3070 		mutex_unlock(&adev->dm.dc_lock);
3071 		if (ret) {
3072 			amdgpu_dm_update_connector_after_detect(aconnector);
3073 
3074 			drm_modeset_lock_all(dev);
3075 			dm_restore_drm_connector_state(dev, connector);
3076 			drm_modeset_unlock_all(dev);
3077 
3078 			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3079 				drm_kms_helper_connector_hotplug_event(connector);
3080 		}
3081 	}
3082 	mutex_unlock(&aconnector->hpd_lock);
3083 
3084 }
3085 
3086 static void handle_hpd_irq(void *param)
3087 {
3088 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3089 
3090 	handle_hpd_irq_helper(aconnector);
3091 
3092 }
3093 
3094 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3095 {
3096 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3097 	uint8_t dret;
3098 	bool new_irq_handled = false;
3099 	int dpcd_addr;
3100 	int dpcd_bytes_to_read;
3101 
3102 	const int max_process_count = 30;
3103 	int process_count = 0;
3104 
3105 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3106 
3107 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3108 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3109 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3110 		dpcd_addr = DP_SINK_COUNT;
3111 	} else {
3112 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3113 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3114 		dpcd_addr = DP_SINK_COUNT_ESI;
3115 	}
3116 
3117 	dret = drm_dp_dpcd_read(
3118 		&aconnector->dm_dp_aux.aux,
3119 		dpcd_addr,
3120 		esi,
3121 		dpcd_bytes_to_read);
3122 
3123 	while (dret == dpcd_bytes_to_read &&
3124 		process_count < max_process_count) {
3125 		uint8_t retry;
3126 		dret = 0;
3127 
3128 		process_count++;
3129 
3130 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3131 		/* handle HPD short pulse irq */
3132 		if (aconnector->mst_mgr.mst_state)
3133 			drm_dp_mst_hpd_irq(
3134 				&aconnector->mst_mgr,
3135 				esi,
3136 				&new_irq_handled);
3137 
3138 		if (new_irq_handled) {
3139 			/* ACK at DPCD to notify down stream */
3140 			const int ack_dpcd_bytes_to_write =
3141 				dpcd_bytes_to_read - 1;
3142 
3143 			for (retry = 0; retry < 3; retry++) {
3144 				uint8_t wret;
3145 
3146 				wret = drm_dp_dpcd_write(
3147 					&aconnector->dm_dp_aux.aux,
3148 					dpcd_addr + 1,
3149 					&esi[1],
3150 					ack_dpcd_bytes_to_write);
3151 				if (wret == ack_dpcd_bytes_to_write)
3152 					break;
3153 			}
3154 
3155 			/* check if there is new irq to be handled */
3156 			dret = drm_dp_dpcd_read(
3157 				&aconnector->dm_dp_aux.aux,
3158 				dpcd_addr,
3159 				esi,
3160 				dpcd_bytes_to_read);
3161 
3162 			new_irq_handled = false;
3163 		} else {
3164 			break;
3165 		}
3166 	}
3167 
3168 	if (process_count == max_process_count)
3169 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3170 }
3171 
3172 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3173 							union hpd_irq_data hpd_irq_data)
3174 {
3175 	struct hpd_rx_irq_offload_work *offload_work =
3176 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3177 
3178 	if (!offload_work) {
3179 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3180 		return;
3181 	}
3182 
3183 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3184 	offload_work->data = hpd_irq_data;
3185 	offload_work->offload_wq = offload_wq;
3186 
3187 	queue_work(offload_wq->wq, &offload_work->work);
3188 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3189 }
3190 
3191 static void handle_hpd_rx_irq(void *param)
3192 {
3193 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3194 	struct drm_connector *connector = &aconnector->base;
3195 	struct drm_device *dev = connector->dev;
3196 	struct dc_link *dc_link = aconnector->dc_link;
3197 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3198 	bool result = false;
3199 	enum dc_connection_type new_connection_type = dc_connection_none;
3200 	struct amdgpu_device *adev = drm_to_adev(dev);
3201 	union hpd_irq_data hpd_irq_data;
3202 	bool link_loss = false;
3203 	bool has_left_work = false;
3204 	int idx = aconnector->base.index;
3205 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3206 
3207 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3208 
3209 	if (adev->dm.disable_hpd_irq)
3210 		return;
3211 
3212 	/*
3213 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3214 	 * conflict, after implement i2c helper, this mutex should be
3215 	 * retired.
3216 	 */
3217 	mutex_lock(&aconnector->hpd_lock);
3218 
3219 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3220 						&link_loss, true, &has_left_work);
3221 
3222 	if (!has_left_work)
3223 		goto out;
3224 
3225 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3226 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3227 		goto out;
3228 	}
3229 
3230 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3231 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3232 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3233 			dm_handle_mst_sideband_msg(aconnector);
3234 			goto out;
3235 		}
3236 
3237 		if (link_loss) {
3238 			bool skip = false;
3239 
3240 			spin_lock(&offload_wq->offload_lock);
3241 			skip = offload_wq->is_handling_link_loss;
3242 
3243 			if (!skip)
3244 				offload_wq->is_handling_link_loss = true;
3245 
3246 			spin_unlock(&offload_wq->offload_lock);
3247 
3248 			if (!skip)
3249 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3250 
3251 			goto out;
3252 		}
3253 	}
3254 
3255 out:
3256 	if (result && !is_mst_root_connector) {
3257 		/* Downstream Port status changed. */
3258 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3259 			DRM_ERROR("KMS: Failed to detect connector\n");
3260 
3261 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3262 			emulated_link_detect(dc_link);
3263 
3264 			if (aconnector->fake_enable)
3265 				aconnector->fake_enable = false;
3266 
3267 			amdgpu_dm_update_connector_after_detect(aconnector);
3268 
3269 
3270 			drm_modeset_lock_all(dev);
3271 			dm_restore_drm_connector_state(dev, connector);
3272 			drm_modeset_unlock_all(dev);
3273 
3274 			drm_kms_helper_connector_hotplug_event(connector);
3275 		} else {
3276 			bool ret = false;
3277 
3278 			mutex_lock(&adev->dm.dc_lock);
3279 			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3280 			mutex_unlock(&adev->dm.dc_lock);
3281 
3282 			if (ret) {
3283 				if (aconnector->fake_enable)
3284 					aconnector->fake_enable = false;
3285 
3286 				amdgpu_dm_update_connector_after_detect(aconnector);
3287 
3288 				drm_modeset_lock_all(dev);
3289 				dm_restore_drm_connector_state(dev, connector);
3290 				drm_modeset_unlock_all(dev);
3291 
3292 				drm_kms_helper_connector_hotplug_event(connector);
3293 			}
3294 		}
3295 	}
3296 #ifdef CONFIG_DRM_AMD_DC_HDCP
3297 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3298 		if (adev->dm.hdcp_workqueue)
3299 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3300 	}
3301 #endif
3302 
3303 	if (dc_link->type != dc_connection_mst_branch)
3304 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3305 
3306 	mutex_unlock(&aconnector->hpd_lock);
3307 }
3308 
3309 static void register_hpd_handlers(struct amdgpu_device *adev)
3310 {
3311 	struct drm_device *dev = adev_to_drm(adev);
3312 	struct drm_connector *connector;
3313 	struct amdgpu_dm_connector *aconnector;
3314 	const struct dc_link *dc_link;
3315 	struct dc_interrupt_params int_params = {0};
3316 
3317 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3318 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3319 
3320 	list_for_each_entry(connector,
3321 			&dev->mode_config.connector_list, head)	{
3322 
3323 		aconnector = to_amdgpu_dm_connector(connector);
3324 		dc_link = aconnector->dc_link;
3325 
3326 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3327 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3328 			int_params.irq_source = dc_link->irq_source_hpd;
3329 
3330 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3331 					handle_hpd_irq,
3332 					(void *) aconnector);
3333 		}
3334 
3335 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3336 
3337 			/* Also register for DP short pulse (hpd_rx). */
3338 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3339 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3340 
3341 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3342 					handle_hpd_rx_irq,
3343 					(void *) aconnector);
3344 
3345 			if (adev->dm.hpd_rx_offload_wq)
3346 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3347 					aconnector;
3348 		}
3349 	}
3350 }
3351 
3352 #if defined(CONFIG_DRM_AMD_DC_SI)
3353 /* Register IRQ sources and initialize IRQ callbacks */
3354 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3355 {
3356 	struct dc *dc = adev->dm.dc;
3357 	struct common_irq_params *c_irq_params;
3358 	struct dc_interrupt_params int_params = {0};
3359 	int r;
3360 	int i;
3361 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3362 
3363 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3364 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3365 
3366 	/*
3367 	 * Actions of amdgpu_irq_add_id():
3368 	 * 1. Register a set() function with base driver.
3369 	 *    Base driver will call set() function to enable/disable an
3370 	 *    interrupt in DC hardware.
3371 	 * 2. Register amdgpu_dm_irq_handler().
3372 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3373 	 *    coming from DC hardware.
3374 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3375 	 *    for acknowledging and handling. */
3376 
3377 	/* Use VBLANK interrupt */
3378 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3379 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3380 		if (r) {
3381 			DRM_ERROR("Failed to add crtc irq id!\n");
3382 			return r;
3383 		}
3384 
3385 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3386 		int_params.irq_source =
3387 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3388 
3389 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3390 
3391 		c_irq_params->adev = adev;
3392 		c_irq_params->irq_src = int_params.irq_source;
3393 
3394 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3395 				dm_crtc_high_irq, c_irq_params);
3396 	}
3397 
3398 	/* Use GRPH_PFLIP interrupt */
3399 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3400 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3401 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3402 		if (r) {
3403 			DRM_ERROR("Failed to add page flip irq id!\n");
3404 			return r;
3405 		}
3406 
3407 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3408 		int_params.irq_source =
3409 			dc_interrupt_to_irq_source(dc, i, 0);
3410 
3411 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3412 
3413 		c_irq_params->adev = adev;
3414 		c_irq_params->irq_src = int_params.irq_source;
3415 
3416 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3417 				dm_pflip_high_irq, c_irq_params);
3418 
3419 	}
3420 
3421 	/* HPD */
3422 	r = amdgpu_irq_add_id(adev, client_id,
3423 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3424 	if (r) {
3425 		DRM_ERROR("Failed to add hpd irq id!\n");
3426 		return r;
3427 	}
3428 
3429 	register_hpd_handlers(adev);
3430 
3431 	return 0;
3432 }
3433 #endif
3434 
3435 /* Register IRQ sources and initialize IRQ callbacks */
3436 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3437 {
3438 	struct dc *dc = adev->dm.dc;
3439 	struct common_irq_params *c_irq_params;
3440 	struct dc_interrupt_params int_params = {0};
3441 	int r;
3442 	int i;
3443 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3444 
3445 	if (adev->family >= AMDGPU_FAMILY_AI)
3446 		client_id = SOC15_IH_CLIENTID_DCE;
3447 
3448 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3449 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3450 
3451 	/*
3452 	 * Actions of amdgpu_irq_add_id():
3453 	 * 1. Register a set() function with base driver.
3454 	 *    Base driver will call set() function to enable/disable an
3455 	 *    interrupt in DC hardware.
3456 	 * 2. Register amdgpu_dm_irq_handler().
3457 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3458 	 *    coming from DC hardware.
3459 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3460 	 *    for acknowledging and handling. */
3461 
3462 	/* Use VBLANK interrupt */
3463 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3464 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3465 		if (r) {
3466 			DRM_ERROR("Failed to add crtc irq id!\n");
3467 			return r;
3468 		}
3469 
3470 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3471 		int_params.irq_source =
3472 			dc_interrupt_to_irq_source(dc, i, 0);
3473 
3474 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3475 
3476 		c_irq_params->adev = adev;
3477 		c_irq_params->irq_src = int_params.irq_source;
3478 
3479 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3480 				dm_crtc_high_irq, c_irq_params);
3481 	}
3482 
3483 	/* Use VUPDATE interrupt */
3484 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3485 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3486 		if (r) {
3487 			DRM_ERROR("Failed to add vupdate irq id!\n");
3488 			return r;
3489 		}
3490 
3491 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 		int_params.irq_source =
3493 			dc_interrupt_to_irq_source(dc, i, 0);
3494 
3495 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3496 
3497 		c_irq_params->adev = adev;
3498 		c_irq_params->irq_src = int_params.irq_source;
3499 
3500 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 				dm_vupdate_high_irq, c_irq_params);
3502 	}
3503 
3504 	/* Use GRPH_PFLIP interrupt */
3505 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3506 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3507 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3508 		if (r) {
3509 			DRM_ERROR("Failed to add page flip irq id!\n");
3510 			return r;
3511 		}
3512 
3513 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3514 		int_params.irq_source =
3515 			dc_interrupt_to_irq_source(dc, i, 0);
3516 
3517 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3518 
3519 		c_irq_params->adev = adev;
3520 		c_irq_params->irq_src = int_params.irq_source;
3521 
3522 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3523 				dm_pflip_high_irq, c_irq_params);
3524 
3525 	}
3526 
3527 	/* HPD */
3528 	r = amdgpu_irq_add_id(adev, client_id,
3529 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3530 	if (r) {
3531 		DRM_ERROR("Failed to add hpd irq id!\n");
3532 		return r;
3533 	}
3534 
3535 	register_hpd_handlers(adev);
3536 
3537 	return 0;
3538 }
3539 
3540 /* Register IRQ sources and initialize IRQ callbacks */
3541 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3542 {
3543 	struct dc *dc = adev->dm.dc;
3544 	struct common_irq_params *c_irq_params;
3545 	struct dc_interrupt_params int_params = {0};
3546 	int r;
3547 	int i;
3548 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3549 	static const unsigned int vrtl_int_srcid[] = {
3550 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3551 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3552 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3553 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3554 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3555 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3556 	};
3557 #endif
3558 
3559 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3560 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3561 
3562 	/*
3563 	 * Actions of amdgpu_irq_add_id():
3564 	 * 1. Register a set() function with base driver.
3565 	 *    Base driver will call set() function to enable/disable an
3566 	 *    interrupt in DC hardware.
3567 	 * 2. Register amdgpu_dm_irq_handler().
3568 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3569 	 *    coming from DC hardware.
3570 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3571 	 *    for acknowledging and handling.
3572 	 */
3573 
3574 	/* Use VSTARTUP interrupt */
3575 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3576 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3577 			i++) {
3578 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3579 
3580 		if (r) {
3581 			DRM_ERROR("Failed to add crtc irq id!\n");
3582 			return r;
3583 		}
3584 
3585 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3586 		int_params.irq_source =
3587 			dc_interrupt_to_irq_source(dc, i, 0);
3588 
3589 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3590 
3591 		c_irq_params->adev = adev;
3592 		c_irq_params->irq_src = int_params.irq_source;
3593 
3594 		amdgpu_dm_irq_register_interrupt(
3595 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3596 	}
3597 
3598 	/* Use otg vertical line interrupt */
3599 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3600 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3601 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3602 				vrtl_int_srcid[i], &adev->vline0_irq);
3603 
3604 		if (r) {
3605 			DRM_ERROR("Failed to add vline0 irq id!\n");
3606 			return r;
3607 		}
3608 
3609 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3610 		int_params.irq_source =
3611 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3612 
3613 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3614 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3615 			break;
3616 		}
3617 
3618 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3619 					- DC_IRQ_SOURCE_DC1_VLINE0];
3620 
3621 		c_irq_params->adev = adev;
3622 		c_irq_params->irq_src = int_params.irq_source;
3623 
3624 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3625 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3626 	}
3627 #endif
3628 
3629 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3630 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3631 	 * to trigger at end of each vblank, regardless of state of the lock,
3632 	 * matching DCE behaviour.
3633 	 */
3634 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3635 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3636 	     i++) {
3637 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3638 
3639 		if (r) {
3640 			DRM_ERROR("Failed to add vupdate irq id!\n");
3641 			return r;
3642 		}
3643 
3644 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3645 		int_params.irq_source =
3646 			dc_interrupt_to_irq_source(dc, i, 0);
3647 
3648 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3649 
3650 		c_irq_params->adev = adev;
3651 		c_irq_params->irq_src = int_params.irq_source;
3652 
3653 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3654 				dm_vupdate_high_irq, c_irq_params);
3655 	}
3656 
3657 	/* Use GRPH_PFLIP interrupt */
3658 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3659 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3660 			i++) {
3661 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3662 		if (r) {
3663 			DRM_ERROR("Failed to add page flip irq id!\n");
3664 			return r;
3665 		}
3666 
3667 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3668 		int_params.irq_source =
3669 			dc_interrupt_to_irq_source(dc, i, 0);
3670 
3671 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3672 
3673 		c_irq_params->adev = adev;
3674 		c_irq_params->irq_src = int_params.irq_source;
3675 
3676 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3677 				dm_pflip_high_irq, c_irq_params);
3678 
3679 	}
3680 
3681 	/* HPD */
3682 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3683 			&adev->hpd_irq);
3684 	if (r) {
3685 		DRM_ERROR("Failed to add hpd irq id!\n");
3686 		return r;
3687 	}
3688 
3689 	register_hpd_handlers(adev);
3690 
3691 	return 0;
3692 }
3693 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3694 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3695 {
3696 	struct dc *dc = adev->dm.dc;
3697 	struct common_irq_params *c_irq_params;
3698 	struct dc_interrupt_params int_params = {0};
3699 	int r, i;
3700 
3701 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3702 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3703 
3704 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3705 			&adev->dmub_outbox_irq);
3706 	if (r) {
3707 		DRM_ERROR("Failed to add outbox irq id!\n");
3708 		return r;
3709 	}
3710 
3711 	if (dc->ctx->dmub_srv) {
3712 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3713 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3714 		int_params.irq_source =
3715 		dc_interrupt_to_irq_source(dc, i, 0);
3716 
3717 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3718 
3719 		c_irq_params->adev = adev;
3720 		c_irq_params->irq_src = int_params.irq_source;
3721 
3722 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3723 				dm_dmub_outbox1_low_irq, c_irq_params);
3724 	}
3725 
3726 	return 0;
3727 }
3728 
3729 /*
3730  * Acquires the lock for the atomic state object and returns
3731  * the new atomic state.
3732  *
3733  * This should only be called during atomic check.
3734  */
3735 int dm_atomic_get_state(struct drm_atomic_state *state,
3736 			struct dm_atomic_state **dm_state)
3737 {
3738 	struct drm_device *dev = state->dev;
3739 	struct amdgpu_device *adev = drm_to_adev(dev);
3740 	struct amdgpu_display_manager *dm = &adev->dm;
3741 	struct drm_private_state *priv_state;
3742 
3743 	if (*dm_state)
3744 		return 0;
3745 
3746 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3747 	if (IS_ERR(priv_state))
3748 		return PTR_ERR(priv_state);
3749 
3750 	*dm_state = to_dm_atomic_state(priv_state);
3751 
3752 	return 0;
3753 }
3754 
3755 static struct dm_atomic_state *
3756 dm_atomic_get_new_state(struct drm_atomic_state *state)
3757 {
3758 	struct drm_device *dev = state->dev;
3759 	struct amdgpu_device *adev = drm_to_adev(dev);
3760 	struct amdgpu_display_manager *dm = &adev->dm;
3761 	struct drm_private_obj *obj;
3762 	struct drm_private_state *new_obj_state;
3763 	int i;
3764 
3765 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3766 		if (obj->funcs == dm->atomic_obj.funcs)
3767 			return to_dm_atomic_state(new_obj_state);
3768 	}
3769 
3770 	return NULL;
3771 }
3772 
3773 static struct drm_private_state *
3774 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3775 {
3776 	struct dm_atomic_state *old_state, *new_state;
3777 
3778 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3779 	if (!new_state)
3780 		return NULL;
3781 
3782 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3783 
3784 	old_state = to_dm_atomic_state(obj->state);
3785 
3786 	if (old_state && old_state->context)
3787 		new_state->context = dc_copy_state(old_state->context);
3788 
3789 	if (!new_state->context) {
3790 		kfree(new_state);
3791 		return NULL;
3792 	}
3793 
3794 	return &new_state->base;
3795 }
3796 
3797 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3798 				    struct drm_private_state *state)
3799 {
3800 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3801 
3802 	if (dm_state && dm_state->context)
3803 		dc_release_state(dm_state->context);
3804 
3805 	kfree(dm_state);
3806 }
3807 
3808 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3809 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3810 	.atomic_destroy_state = dm_atomic_destroy_state,
3811 };
3812 
3813 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3814 {
3815 	struct dm_atomic_state *state;
3816 	int r;
3817 
3818 	adev->mode_info.mode_config_initialized = true;
3819 
3820 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3821 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3822 
3823 	adev_to_drm(adev)->mode_config.max_width = 16384;
3824 	adev_to_drm(adev)->mode_config.max_height = 16384;
3825 
3826 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3827 	if (adev->asic_type == CHIP_HAWAII)
3828 		/* disable prefer shadow for now due to hibernation issues */
3829 		adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3830 	else
3831 		adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3832 	/* indicates support for immediate flip */
3833 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3834 
3835 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3836 
3837 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3838 	if (!state)
3839 		return -ENOMEM;
3840 
3841 	state->context = dc_create_state(adev->dm.dc);
3842 	if (!state->context) {
3843 		kfree(state);
3844 		return -ENOMEM;
3845 	}
3846 
3847 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3848 
3849 	drm_atomic_private_obj_init(adev_to_drm(adev),
3850 				    &adev->dm.atomic_obj,
3851 				    &state->base,
3852 				    &dm_atomic_state_funcs);
3853 
3854 	r = amdgpu_display_modeset_create_props(adev);
3855 	if (r) {
3856 		dc_release_state(state->context);
3857 		kfree(state);
3858 		return r;
3859 	}
3860 
3861 	r = amdgpu_dm_audio_init(adev);
3862 	if (r) {
3863 		dc_release_state(state->context);
3864 		kfree(state);
3865 		return r;
3866 	}
3867 
3868 	return 0;
3869 }
3870 
3871 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3872 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3873 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3874 
3875 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3876 					    int bl_idx)
3877 {
3878 #if defined(CONFIG_ACPI)
3879 	struct amdgpu_dm_backlight_caps caps;
3880 
3881 	memset(&caps, 0, sizeof(caps));
3882 
3883 	if (dm->backlight_caps[bl_idx].caps_valid)
3884 		return;
3885 
3886 	amdgpu_acpi_get_backlight_caps(&caps);
3887 	if (caps.caps_valid) {
3888 		dm->backlight_caps[bl_idx].caps_valid = true;
3889 		if (caps.aux_support)
3890 			return;
3891 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3892 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3893 	} else {
3894 		dm->backlight_caps[bl_idx].min_input_signal =
3895 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3896 		dm->backlight_caps[bl_idx].max_input_signal =
3897 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3898 	}
3899 #else
3900 	if (dm->backlight_caps[bl_idx].aux_support)
3901 		return;
3902 
3903 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3904 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3905 #endif
3906 }
3907 
3908 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3909 				unsigned *min, unsigned *max)
3910 {
3911 	if (!caps)
3912 		return 0;
3913 
3914 	if (caps->aux_support) {
3915 		// Firmware limits are in nits, DC API wants millinits.
3916 		*max = 1000 * caps->aux_max_input_signal;
3917 		*min = 1000 * caps->aux_min_input_signal;
3918 	} else {
3919 		// Firmware limits are 8-bit, PWM control is 16-bit.
3920 		*max = 0x101 * caps->max_input_signal;
3921 		*min = 0x101 * caps->min_input_signal;
3922 	}
3923 	return 1;
3924 }
3925 
3926 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3927 					uint32_t brightness)
3928 {
3929 	unsigned min, max;
3930 
3931 	if (!get_brightness_range(caps, &min, &max))
3932 		return brightness;
3933 
3934 	// Rescale 0..255 to min..max
3935 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3936 				       AMDGPU_MAX_BL_LEVEL);
3937 }
3938 
3939 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3940 				      uint32_t brightness)
3941 {
3942 	unsigned min, max;
3943 
3944 	if (!get_brightness_range(caps, &min, &max))
3945 		return brightness;
3946 
3947 	if (brightness < min)
3948 		return 0;
3949 	// Rescale min..max to 0..255
3950 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3951 				 max - min);
3952 }
3953 
3954 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3955 					 int bl_idx,
3956 					 u32 user_brightness)
3957 {
3958 	struct amdgpu_dm_backlight_caps caps;
3959 	struct dc_link *link;
3960 	u32 brightness;
3961 	bool rc;
3962 
3963 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3964 	caps = dm->backlight_caps[bl_idx];
3965 
3966 	dm->brightness[bl_idx] = user_brightness;
3967 	/* update scratch register */
3968 	if (bl_idx == 0)
3969 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3970 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3971 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3972 
3973 	/* Change brightness based on AUX property */
3974 	if (caps.aux_support) {
3975 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3976 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3977 		if (!rc)
3978 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3979 	} else {
3980 		rc = dc_link_set_backlight_level(link, brightness, 0);
3981 		if (!rc)
3982 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3983 	}
3984 
3985 	if (rc)
3986 		dm->actual_brightness[bl_idx] = user_brightness;
3987 }
3988 
3989 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3990 {
3991 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3992 	int i;
3993 
3994 	for (i = 0; i < dm->num_of_edps; i++) {
3995 		if (bd == dm->backlight_dev[i])
3996 			break;
3997 	}
3998 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3999 		i = 0;
4000 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4001 
4002 	return 0;
4003 }
4004 
4005 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4006 					 int bl_idx)
4007 {
4008 	struct amdgpu_dm_backlight_caps caps;
4009 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4010 
4011 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4012 	caps = dm->backlight_caps[bl_idx];
4013 
4014 	if (caps.aux_support) {
4015 		u32 avg, peak;
4016 		bool rc;
4017 
4018 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4019 		if (!rc)
4020 			return dm->brightness[bl_idx];
4021 		return convert_brightness_to_user(&caps, avg);
4022 	} else {
4023 		int ret = dc_link_get_backlight_level(link);
4024 
4025 		if (ret == DC_ERROR_UNEXPECTED)
4026 			return dm->brightness[bl_idx];
4027 		return convert_brightness_to_user(&caps, ret);
4028 	}
4029 }
4030 
4031 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4032 {
4033 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4034 	int i;
4035 
4036 	for (i = 0; i < dm->num_of_edps; i++) {
4037 		if (bd == dm->backlight_dev[i])
4038 			break;
4039 	}
4040 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4041 		i = 0;
4042 	return amdgpu_dm_backlight_get_level(dm, i);
4043 }
4044 
4045 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4046 	.options = BL_CORE_SUSPENDRESUME,
4047 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4048 	.update_status	= amdgpu_dm_backlight_update_status,
4049 };
4050 
4051 static void
4052 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4053 {
4054 	char bl_name[16];
4055 	struct backlight_properties props = { 0 };
4056 
4057 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4058 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4059 
4060 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4061 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4062 	props.type = BACKLIGHT_RAW;
4063 
4064 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4065 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4066 
4067 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4068 								       adev_to_drm(dm->adev)->dev,
4069 								       dm,
4070 								       &amdgpu_dm_backlight_ops,
4071 								       &props);
4072 
4073 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4074 		DRM_ERROR("DM: Backlight registration failed!\n");
4075 	else
4076 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4077 }
4078 
4079 static int initialize_plane(struct amdgpu_display_manager *dm,
4080 			    struct amdgpu_mode_info *mode_info, int plane_id,
4081 			    enum drm_plane_type plane_type,
4082 			    const struct dc_plane_cap *plane_cap)
4083 {
4084 	struct drm_plane *plane;
4085 	unsigned long possible_crtcs;
4086 	int ret = 0;
4087 
4088 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4089 	if (!plane) {
4090 		DRM_ERROR("KMS: Failed to allocate plane\n");
4091 		return -ENOMEM;
4092 	}
4093 	plane->type = plane_type;
4094 
4095 	/*
4096 	 * HACK: IGT tests expect that the primary plane for a CRTC
4097 	 * can only have one possible CRTC. Only expose support for
4098 	 * any CRTC if they're not going to be used as a primary plane
4099 	 * for a CRTC - like overlay or underlay planes.
4100 	 */
4101 	possible_crtcs = 1 << plane_id;
4102 	if (plane_id >= dm->dc->caps.max_streams)
4103 		possible_crtcs = 0xff;
4104 
4105 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4106 
4107 	if (ret) {
4108 		DRM_ERROR("KMS: Failed to initialize plane\n");
4109 		kfree(plane);
4110 		return ret;
4111 	}
4112 
4113 	if (mode_info)
4114 		mode_info->planes[plane_id] = plane;
4115 
4116 	return ret;
4117 }
4118 
4119 
4120 static void register_backlight_device(struct amdgpu_display_manager *dm,
4121 				      struct dc_link *link)
4122 {
4123 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4124 	    link->type != dc_connection_none) {
4125 		/*
4126 		 * Event if registration failed, we should continue with
4127 		 * DM initialization because not having a backlight control
4128 		 * is better then a black screen.
4129 		 */
4130 		if (!dm->backlight_dev[dm->num_of_edps])
4131 			amdgpu_dm_register_backlight_device(dm);
4132 
4133 		if (dm->backlight_dev[dm->num_of_edps]) {
4134 			dm->backlight_link[dm->num_of_edps] = link;
4135 			dm->num_of_edps++;
4136 		}
4137 	}
4138 }
4139 
4140 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4141 
4142 /*
4143  * In this architecture, the association
4144  * connector -> encoder -> crtc
4145  * id not really requried. The crtc and connector will hold the
4146  * display_index as an abstraction to use with DAL component
4147  *
4148  * Returns 0 on success
4149  */
4150 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4151 {
4152 	struct amdgpu_display_manager *dm = &adev->dm;
4153 	int32_t i;
4154 	struct amdgpu_dm_connector *aconnector = NULL;
4155 	struct amdgpu_encoder *aencoder = NULL;
4156 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4157 	uint32_t link_cnt;
4158 	int32_t primary_planes;
4159 	enum dc_connection_type new_connection_type = dc_connection_none;
4160 	const struct dc_plane_cap *plane;
4161 	bool psr_feature_enabled = false;
4162 
4163 	dm->display_indexes_num = dm->dc->caps.max_streams;
4164 	/* Update the actual used number of crtc */
4165 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4166 
4167 	link_cnt = dm->dc->caps.max_links;
4168 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4169 		DRM_ERROR("DM: Failed to initialize mode config\n");
4170 		return -EINVAL;
4171 	}
4172 
4173 	/* There is one primary plane per CRTC */
4174 	primary_planes = dm->dc->caps.max_streams;
4175 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4176 
4177 	/*
4178 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4179 	 * Order is reversed to match iteration order in atomic check.
4180 	 */
4181 	for (i = (primary_planes - 1); i >= 0; i--) {
4182 		plane = &dm->dc->caps.planes[i];
4183 
4184 		if (initialize_plane(dm, mode_info, i,
4185 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4186 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4187 			goto fail;
4188 		}
4189 	}
4190 
4191 	/*
4192 	 * Initialize overlay planes, index starting after primary planes.
4193 	 * These planes have a higher DRM index than the primary planes since
4194 	 * they should be considered as having a higher z-order.
4195 	 * Order is reversed to match iteration order in atomic check.
4196 	 *
4197 	 * Only support DCN for now, and only expose one so we don't encourage
4198 	 * userspace to use up all the pipes.
4199 	 */
4200 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4201 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4202 
4203 		/* Do not create overlay if MPO disabled */
4204 		if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4205 			break;
4206 
4207 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4208 			continue;
4209 
4210 		if (!plane->blends_with_above || !plane->blends_with_below)
4211 			continue;
4212 
4213 		if (!plane->pixel_format_support.argb8888)
4214 			continue;
4215 
4216 		if (initialize_plane(dm, NULL, primary_planes + i,
4217 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4218 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4219 			goto fail;
4220 		}
4221 
4222 		/* Only create one overlay plane. */
4223 		break;
4224 	}
4225 
4226 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4227 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4228 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4229 			goto fail;
4230 		}
4231 
4232 	/* Use Outbox interrupt */
4233 	switch (adev->ip_versions[DCE_HWIP][0]) {
4234 	case IP_VERSION(3, 0, 0):
4235 	case IP_VERSION(3, 1, 2):
4236 	case IP_VERSION(3, 1, 3):
4237 	case IP_VERSION(3, 1, 4):
4238 	case IP_VERSION(3, 1, 5):
4239 	case IP_VERSION(3, 1, 6):
4240 	case IP_VERSION(3, 2, 0):
4241 	case IP_VERSION(3, 2, 1):
4242 	case IP_VERSION(2, 1, 0):
4243 		if (register_outbox_irq_handlers(dm->adev)) {
4244 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4245 			goto fail;
4246 		}
4247 		break;
4248 	default:
4249 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4250 			      adev->ip_versions[DCE_HWIP][0]);
4251 	}
4252 
4253 	/* Determine whether to enable PSR support by default. */
4254 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4255 		switch (adev->ip_versions[DCE_HWIP][0]) {
4256 		case IP_VERSION(3, 1, 2):
4257 		case IP_VERSION(3, 1, 3):
4258 		case IP_VERSION(3, 1, 4):
4259 		case IP_VERSION(3, 1, 5):
4260 		case IP_VERSION(3, 1, 6):
4261 		case IP_VERSION(3, 2, 0):
4262 		case IP_VERSION(3, 2, 1):
4263 			psr_feature_enabled = true;
4264 			break;
4265 		default:
4266 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4267 			break;
4268 		}
4269 	}
4270 
4271 	/* loops over all connectors on the board */
4272 	for (i = 0; i < link_cnt; i++) {
4273 		struct dc_link *link = NULL;
4274 
4275 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4276 			DRM_ERROR(
4277 				"KMS: Cannot support more than %d display indexes\n",
4278 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4279 			continue;
4280 		}
4281 
4282 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4283 		if (!aconnector)
4284 			goto fail;
4285 
4286 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4287 		if (!aencoder)
4288 			goto fail;
4289 
4290 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4291 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4292 			goto fail;
4293 		}
4294 
4295 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4296 			DRM_ERROR("KMS: Failed to initialize connector\n");
4297 			goto fail;
4298 		}
4299 
4300 		link = dc_get_link_at_index(dm->dc, i);
4301 
4302 		if (!dc_link_detect_sink(link, &new_connection_type))
4303 			DRM_ERROR("KMS: Failed to detect connector\n");
4304 
4305 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4306 			emulated_link_detect(link);
4307 			amdgpu_dm_update_connector_after_detect(aconnector);
4308 		} else {
4309 			bool ret = false;
4310 
4311 			mutex_lock(&dm->dc_lock);
4312 			ret = dc_link_detect(link, DETECT_REASON_BOOT);
4313 			mutex_unlock(&dm->dc_lock);
4314 
4315 			if (ret) {
4316 				amdgpu_dm_update_connector_after_detect(aconnector);
4317 				register_backlight_device(dm, link);
4318 
4319 				if (dm->num_of_edps)
4320 					update_connector_ext_caps(aconnector);
4321 
4322 				if (psr_feature_enabled)
4323 					amdgpu_dm_set_psr_caps(link);
4324 
4325 				/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4326 				 * PSR is also supported.
4327 				 */
4328 				if (link->psr_settings.psr_feature_enabled)
4329 					adev_to_drm(adev)->vblank_disable_immediate = false;
4330 			}
4331 		}
4332 		amdgpu_set_panel_orientation(&aconnector->base);
4333 	}
4334 
4335 	/* Software is initialized. Now we can register interrupt handlers. */
4336 	switch (adev->asic_type) {
4337 #if defined(CONFIG_DRM_AMD_DC_SI)
4338 	case CHIP_TAHITI:
4339 	case CHIP_PITCAIRN:
4340 	case CHIP_VERDE:
4341 	case CHIP_OLAND:
4342 		if (dce60_register_irq_handlers(dm->adev)) {
4343 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4344 			goto fail;
4345 		}
4346 		break;
4347 #endif
4348 	case CHIP_BONAIRE:
4349 	case CHIP_HAWAII:
4350 	case CHIP_KAVERI:
4351 	case CHIP_KABINI:
4352 	case CHIP_MULLINS:
4353 	case CHIP_TONGA:
4354 	case CHIP_FIJI:
4355 	case CHIP_CARRIZO:
4356 	case CHIP_STONEY:
4357 	case CHIP_POLARIS11:
4358 	case CHIP_POLARIS10:
4359 	case CHIP_POLARIS12:
4360 	case CHIP_VEGAM:
4361 	case CHIP_VEGA10:
4362 	case CHIP_VEGA12:
4363 	case CHIP_VEGA20:
4364 		if (dce110_register_irq_handlers(dm->adev)) {
4365 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4366 			goto fail;
4367 		}
4368 		break;
4369 	default:
4370 		switch (adev->ip_versions[DCE_HWIP][0]) {
4371 		case IP_VERSION(1, 0, 0):
4372 		case IP_VERSION(1, 0, 1):
4373 		case IP_VERSION(2, 0, 2):
4374 		case IP_VERSION(2, 0, 3):
4375 		case IP_VERSION(2, 0, 0):
4376 		case IP_VERSION(2, 1, 0):
4377 		case IP_VERSION(3, 0, 0):
4378 		case IP_VERSION(3, 0, 2):
4379 		case IP_VERSION(3, 0, 3):
4380 		case IP_VERSION(3, 0, 1):
4381 		case IP_VERSION(3, 1, 2):
4382 		case IP_VERSION(3, 1, 3):
4383 		case IP_VERSION(3, 1, 4):
4384 		case IP_VERSION(3, 1, 5):
4385 		case IP_VERSION(3, 1, 6):
4386 		case IP_VERSION(3, 2, 0):
4387 		case IP_VERSION(3, 2, 1):
4388 			if (dcn10_register_irq_handlers(dm->adev)) {
4389 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4390 				goto fail;
4391 			}
4392 			break;
4393 		default:
4394 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4395 					adev->ip_versions[DCE_HWIP][0]);
4396 			goto fail;
4397 		}
4398 		break;
4399 	}
4400 
4401 	return 0;
4402 fail:
4403 	kfree(aencoder);
4404 	kfree(aconnector);
4405 
4406 	return -EINVAL;
4407 }
4408 
4409 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4410 {
4411 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4412 	return;
4413 }
4414 
4415 /******************************************************************************
4416  * amdgpu_display_funcs functions
4417  *****************************************************************************/
4418 
4419 /*
4420  * dm_bandwidth_update - program display watermarks
4421  *
4422  * @adev: amdgpu_device pointer
4423  *
4424  * Calculate and program the display watermarks and line buffer allocation.
4425  */
4426 static void dm_bandwidth_update(struct amdgpu_device *adev)
4427 {
4428 	/* TODO: implement later */
4429 }
4430 
4431 static const struct amdgpu_display_funcs dm_display_funcs = {
4432 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4433 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4434 	.backlight_set_level = NULL, /* never called for DC */
4435 	.backlight_get_level = NULL, /* never called for DC */
4436 	.hpd_sense = NULL,/* called unconditionally */
4437 	.hpd_set_polarity = NULL, /* called unconditionally */
4438 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4439 	.page_flip_get_scanoutpos =
4440 		dm_crtc_get_scanoutpos,/* called unconditionally */
4441 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4442 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4443 };
4444 
4445 #if defined(CONFIG_DEBUG_KERNEL_DC)
4446 
4447 static ssize_t s3_debug_store(struct device *device,
4448 			      struct device_attribute *attr,
4449 			      const char *buf,
4450 			      size_t count)
4451 {
4452 	int ret;
4453 	int s3_state;
4454 	struct drm_device *drm_dev = dev_get_drvdata(device);
4455 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4456 
4457 	ret = kstrtoint(buf, 0, &s3_state);
4458 
4459 	if (ret == 0) {
4460 		if (s3_state) {
4461 			dm_resume(adev);
4462 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4463 		} else
4464 			dm_suspend(adev);
4465 	}
4466 
4467 	return ret == 0 ? count : 0;
4468 }
4469 
4470 DEVICE_ATTR_WO(s3_debug);
4471 
4472 #endif
4473 
4474 static int dm_early_init(void *handle)
4475 {
4476 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4477 
4478 	switch (adev->asic_type) {
4479 #if defined(CONFIG_DRM_AMD_DC_SI)
4480 	case CHIP_TAHITI:
4481 	case CHIP_PITCAIRN:
4482 	case CHIP_VERDE:
4483 		adev->mode_info.num_crtc = 6;
4484 		adev->mode_info.num_hpd = 6;
4485 		adev->mode_info.num_dig = 6;
4486 		break;
4487 	case CHIP_OLAND:
4488 		adev->mode_info.num_crtc = 2;
4489 		adev->mode_info.num_hpd = 2;
4490 		adev->mode_info.num_dig = 2;
4491 		break;
4492 #endif
4493 	case CHIP_BONAIRE:
4494 	case CHIP_HAWAII:
4495 		adev->mode_info.num_crtc = 6;
4496 		adev->mode_info.num_hpd = 6;
4497 		adev->mode_info.num_dig = 6;
4498 		break;
4499 	case CHIP_KAVERI:
4500 		adev->mode_info.num_crtc = 4;
4501 		adev->mode_info.num_hpd = 6;
4502 		adev->mode_info.num_dig = 7;
4503 		break;
4504 	case CHIP_KABINI:
4505 	case CHIP_MULLINS:
4506 		adev->mode_info.num_crtc = 2;
4507 		adev->mode_info.num_hpd = 6;
4508 		adev->mode_info.num_dig = 6;
4509 		break;
4510 	case CHIP_FIJI:
4511 	case CHIP_TONGA:
4512 		adev->mode_info.num_crtc = 6;
4513 		adev->mode_info.num_hpd = 6;
4514 		adev->mode_info.num_dig = 7;
4515 		break;
4516 	case CHIP_CARRIZO:
4517 		adev->mode_info.num_crtc = 3;
4518 		adev->mode_info.num_hpd = 6;
4519 		adev->mode_info.num_dig = 9;
4520 		break;
4521 	case CHIP_STONEY:
4522 		adev->mode_info.num_crtc = 2;
4523 		adev->mode_info.num_hpd = 6;
4524 		adev->mode_info.num_dig = 9;
4525 		break;
4526 	case CHIP_POLARIS11:
4527 	case CHIP_POLARIS12:
4528 		adev->mode_info.num_crtc = 5;
4529 		adev->mode_info.num_hpd = 5;
4530 		adev->mode_info.num_dig = 5;
4531 		break;
4532 	case CHIP_POLARIS10:
4533 	case CHIP_VEGAM:
4534 		adev->mode_info.num_crtc = 6;
4535 		adev->mode_info.num_hpd = 6;
4536 		adev->mode_info.num_dig = 6;
4537 		break;
4538 	case CHIP_VEGA10:
4539 	case CHIP_VEGA12:
4540 	case CHIP_VEGA20:
4541 		adev->mode_info.num_crtc = 6;
4542 		adev->mode_info.num_hpd = 6;
4543 		adev->mode_info.num_dig = 6;
4544 		break;
4545 	default:
4546 
4547 		switch (adev->ip_versions[DCE_HWIP][0]) {
4548 		case IP_VERSION(2, 0, 2):
4549 		case IP_VERSION(3, 0, 0):
4550 			adev->mode_info.num_crtc = 6;
4551 			adev->mode_info.num_hpd = 6;
4552 			adev->mode_info.num_dig = 6;
4553 			break;
4554 		case IP_VERSION(2, 0, 0):
4555 		case IP_VERSION(3, 0, 2):
4556 			adev->mode_info.num_crtc = 5;
4557 			adev->mode_info.num_hpd = 5;
4558 			adev->mode_info.num_dig = 5;
4559 			break;
4560 		case IP_VERSION(2, 0, 3):
4561 		case IP_VERSION(3, 0, 3):
4562 			adev->mode_info.num_crtc = 2;
4563 			adev->mode_info.num_hpd = 2;
4564 			adev->mode_info.num_dig = 2;
4565 			break;
4566 		case IP_VERSION(1, 0, 0):
4567 		case IP_VERSION(1, 0, 1):
4568 		case IP_VERSION(3, 0, 1):
4569 		case IP_VERSION(2, 1, 0):
4570 		case IP_VERSION(3, 1, 2):
4571 		case IP_VERSION(3, 1, 3):
4572 		case IP_VERSION(3, 1, 4):
4573 		case IP_VERSION(3, 1, 5):
4574 		case IP_VERSION(3, 1, 6):
4575 		case IP_VERSION(3, 2, 0):
4576 		case IP_VERSION(3, 2, 1):
4577 			adev->mode_info.num_crtc = 4;
4578 			adev->mode_info.num_hpd = 4;
4579 			adev->mode_info.num_dig = 4;
4580 			break;
4581 		default:
4582 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4583 					adev->ip_versions[DCE_HWIP][0]);
4584 			return -EINVAL;
4585 		}
4586 		break;
4587 	}
4588 
4589 	amdgpu_dm_set_irq_funcs(adev);
4590 
4591 	if (adev->mode_info.funcs == NULL)
4592 		adev->mode_info.funcs = &dm_display_funcs;
4593 
4594 	/*
4595 	 * Note: Do NOT change adev->audio_endpt_rreg and
4596 	 * adev->audio_endpt_wreg because they are initialised in
4597 	 * amdgpu_device_init()
4598 	 */
4599 #if defined(CONFIG_DEBUG_KERNEL_DC)
4600 	device_create_file(
4601 		adev_to_drm(adev)->dev,
4602 		&dev_attr_s3_debug);
4603 #endif
4604 
4605 	return 0;
4606 }
4607 
4608 static bool modereset_required(struct drm_crtc_state *crtc_state)
4609 {
4610 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4611 }
4612 
4613 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4614 {
4615 	drm_encoder_cleanup(encoder);
4616 	kfree(encoder);
4617 }
4618 
4619 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4620 	.destroy = amdgpu_dm_encoder_destroy,
4621 };
4622 
4623 static int
4624 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4625 			    const enum surface_pixel_format format,
4626 			    enum dc_color_space *color_space)
4627 {
4628 	bool full_range;
4629 
4630 	*color_space = COLOR_SPACE_SRGB;
4631 
4632 	/* DRM color properties only affect non-RGB formats. */
4633 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4634 		return 0;
4635 
4636 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4637 
4638 	switch (plane_state->color_encoding) {
4639 	case DRM_COLOR_YCBCR_BT601:
4640 		if (full_range)
4641 			*color_space = COLOR_SPACE_YCBCR601;
4642 		else
4643 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4644 		break;
4645 
4646 	case DRM_COLOR_YCBCR_BT709:
4647 		if (full_range)
4648 			*color_space = COLOR_SPACE_YCBCR709;
4649 		else
4650 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4651 		break;
4652 
4653 	case DRM_COLOR_YCBCR_BT2020:
4654 		if (full_range)
4655 			*color_space = COLOR_SPACE_2020_YCBCR;
4656 		else
4657 			return -EINVAL;
4658 		break;
4659 
4660 	default:
4661 		return -EINVAL;
4662 	}
4663 
4664 	return 0;
4665 }
4666 
4667 static int
4668 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4669 			    const struct drm_plane_state *plane_state,
4670 			    const uint64_t tiling_flags,
4671 			    struct dc_plane_info *plane_info,
4672 			    struct dc_plane_address *address,
4673 			    bool tmz_surface,
4674 			    bool force_disable_dcc)
4675 {
4676 	const struct drm_framebuffer *fb = plane_state->fb;
4677 	const struct amdgpu_framebuffer *afb =
4678 		to_amdgpu_framebuffer(plane_state->fb);
4679 	int ret;
4680 
4681 	memset(plane_info, 0, sizeof(*plane_info));
4682 
4683 	switch (fb->format->format) {
4684 	case DRM_FORMAT_C8:
4685 		plane_info->format =
4686 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4687 		break;
4688 	case DRM_FORMAT_RGB565:
4689 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4690 		break;
4691 	case DRM_FORMAT_XRGB8888:
4692 	case DRM_FORMAT_ARGB8888:
4693 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4694 		break;
4695 	case DRM_FORMAT_XRGB2101010:
4696 	case DRM_FORMAT_ARGB2101010:
4697 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4698 		break;
4699 	case DRM_FORMAT_XBGR2101010:
4700 	case DRM_FORMAT_ABGR2101010:
4701 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4702 		break;
4703 	case DRM_FORMAT_XBGR8888:
4704 	case DRM_FORMAT_ABGR8888:
4705 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4706 		break;
4707 	case DRM_FORMAT_NV21:
4708 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4709 		break;
4710 	case DRM_FORMAT_NV12:
4711 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4712 		break;
4713 	case DRM_FORMAT_P010:
4714 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4715 		break;
4716 	case DRM_FORMAT_XRGB16161616F:
4717 	case DRM_FORMAT_ARGB16161616F:
4718 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4719 		break;
4720 	case DRM_FORMAT_XBGR16161616F:
4721 	case DRM_FORMAT_ABGR16161616F:
4722 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4723 		break;
4724 	case DRM_FORMAT_XRGB16161616:
4725 	case DRM_FORMAT_ARGB16161616:
4726 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4727 		break;
4728 	case DRM_FORMAT_XBGR16161616:
4729 	case DRM_FORMAT_ABGR16161616:
4730 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4731 		break;
4732 	default:
4733 		DRM_ERROR(
4734 			"Unsupported screen format %p4cc\n",
4735 			&fb->format->format);
4736 		return -EINVAL;
4737 	}
4738 
4739 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4740 	case DRM_MODE_ROTATE_0:
4741 		plane_info->rotation = ROTATION_ANGLE_0;
4742 		break;
4743 	case DRM_MODE_ROTATE_90:
4744 		plane_info->rotation = ROTATION_ANGLE_90;
4745 		break;
4746 	case DRM_MODE_ROTATE_180:
4747 		plane_info->rotation = ROTATION_ANGLE_180;
4748 		break;
4749 	case DRM_MODE_ROTATE_270:
4750 		plane_info->rotation = ROTATION_ANGLE_270;
4751 		break;
4752 	default:
4753 		plane_info->rotation = ROTATION_ANGLE_0;
4754 		break;
4755 	}
4756 
4757 
4758 	plane_info->visible = true;
4759 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4760 
4761 	plane_info->layer_index = 0;
4762 
4763 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4764 					  &plane_info->color_space);
4765 	if (ret)
4766 		return ret;
4767 
4768 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4769 					   plane_info->rotation, tiling_flags,
4770 					   &plane_info->tiling_info,
4771 					   &plane_info->plane_size,
4772 					   &plane_info->dcc, address,
4773 					   tmz_surface, force_disable_dcc);
4774 	if (ret)
4775 		return ret;
4776 
4777 	fill_blending_from_plane_state(
4778 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
4779 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4780 
4781 	return 0;
4782 }
4783 
4784 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4785 				    struct dc_plane_state *dc_plane_state,
4786 				    struct drm_plane_state *plane_state,
4787 				    struct drm_crtc_state *crtc_state)
4788 {
4789 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4790 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4791 	struct dc_scaling_info scaling_info;
4792 	struct dc_plane_info plane_info;
4793 	int ret;
4794 	bool force_disable_dcc = false;
4795 
4796 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
4797 	if (ret)
4798 		return ret;
4799 
4800 	dc_plane_state->src_rect = scaling_info.src_rect;
4801 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4802 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4803 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4804 
4805 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4806 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4807 					  afb->tiling_flags,
4808 					  &plane_info,
4809 					  &dc_plane_state->address,
4810 					  afb->tmz_surface,
4811 					  force_disable_dcc);
4812 	if (ret)
4813 		return ret;
4814 
4815 	dc_plane_state->format = plane_info.format;
4816 	dc_plane_state->color_space = plane_info.color_space;
4817 	dc_plane_state->format = plane_info.format;
4818 	dc_plane_state->plane_size = plane_info.plane_size;
4819 	dc_plane_state->rotation = plane_info.rotation;
4820 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4821 	dc_plane_state->stereo_format = plane_info.stereo_format;
4822 	dc_plane_state->tiling_info = plane_info.tiling_info;
4823 	dc_plane_state->visible = plane_info.visible;
4824 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4825 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
4826 	dc_plane_state->global_alpha = plane_info.global_alpha;
4827 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4828 	dc_plane_state->dcc = plane_info.dcc;
4829 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4830 	dc_plane_state->flip_int_enabled = true;
4831 
4832 	/*
4833 	 * Always set input transfer function, since plane state is refreshed
4834 	 * every time.
4835 	 */
4836 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4837 	if (ret)
4838 		return ret;
4839 
4840 	return 0;
4841 }
4842 
4843 /**
4844  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
4845  *
4846  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
4847  *         remote fb
4848  * @old_plane_state: Old state of @plane
4849  * @new_plane_state: New state of @plane
4850  * @crtc_state: New state of CRTC connected to the @plane
4851  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
4852  *
4853  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
4854  * (referred to as "damage clips" in DRM nomenclature) that require updating on
4855  * the eDP remote buffer. The responsibility of specifying the dirty regions is
4856  * amdgpu_dm's.
4857  *
4858  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
4859  * plane with regions that require flushing to the eDP remote buffer. In
4860  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
4861  * implicitly provide damage clips without any client support via the plane
4862  * bounds.
4863  *
4864  * Today, amdgpu_dm only supports the MPO and cursor usecase.
4865  *
4866  * TODO: Also enable for FB_DAMAGE_CLIPS
4867  */
4868 static void fill_dc_dirty_rects(struct drm_plane *plane,
4869 				struct drm_plane_state *old_plane_state,
4870 				struct drm_plane_state *new_plane_state,
4871 				struct drm_crtc_state *crtc_state,
4872 				struct dc_flip_addrs *flip_addrs)
4873 {
4874 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4875 	struct rect *dirty_rects = flip_addrs->dirty_rects;
4876 	uint32_t num_clips;
4877 	bool bb_changed;
4878 	bool fb_changed;
4879 	uint32_t i = 0;
4880 
4881 	flip_addrs->dirty_rect_count = 0;
4882 
4883 	/*
4884 	 * Cursor plane has it's own dirty rect update interface. See
4885 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
4886 	 */
4887 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
4888 		return;
4889 
4890 	/*
4891 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
4892 	 * requested, and there is a plane update, do FFU.
4893 	 */
4894 	if (!dm_crtc_state->mpo_requested) {
4895 		dirty_rects[0].x = 0;
4896 		dirty_rects[0].y = 0;
4897 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
4898 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
4899 		flip_addrs->dirty_rect_count = 1;
4900 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
4901 				 new_plane_state->plane->base.id,
4902 				 dm_crtc_state->base.mode.crtc_hdisplay,
4903 				 dm_crtc_state->base.mode.crtc_vdisplay);
4904 		return;
4905 	}
4906 
4907 	/*
4908 	 * MPO is requested. Add entire plane bounding box to dirty rects if
4909 	 * flipped to or damaged.
4910 	 *
4911 	 * If plane is moved or resized, also add old bounding box to dirty
4912 	 * rects.
4913 	 */
4914 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
4915 	fb_changed = old_plane_state->fb->base.id !=
4916 		     new_plane_state->fb->base.id;
4917 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
4918 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
4919 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
4920 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
4921 
4922 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
4923 			 new_plane_state->plane->base.id,
4924 			 bb_changed, fb_changed, num_clips);
4925 
4926 	if (num_clips || fb_changed || bb_changed) {
4927 		dirty_rects[i].x = new_plane_state->crtc_x;
4928 		dirty_rects[i].y = new_plane_state->crtc_y;
4929 		dirty_rects[i].width = new_plane_state->crtc_w;
4930 		dirty_rects[i].height = new_plane_state->crtc_h;
4931 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4932 				 new_plane_state->plane->base.id,
4933 				 dirty_rects[i].x, dirty_rects[i].y,
4934 				 dirty_rects[i].width, dirty_rects[i].height);
4935 		i += 1;
4936 	}
4937 
4938 	/* Add old plane bounding-box if plane is moved or resized */
4939 	if (bb_changed) {
4940 		dirty_rects[i].x = old_plane_state->crtc_x;
4941 		dirty_rects[i].y = old_plane_state->crtc_y;
4942 		dirty_rects[i].width = old_plane_state->crtc_w;
4943 		dirty_rects[i].height = old_plane_state->crtc_h;
4944 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
4945 				old_plane_state->plane->base.id,
4946 				dirty_rects[i].x, dirty_rects[i].y,
4947 				dirty_rects[i].width, dirty_rects[i].height);
4948 		i += 1;
4949 	}
4950 
4951 	flip_addrs->dirty_rect_count = i;
4952 }
4953 
4954 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4955 					   const struct dm_connector_state *dm_state,
4956 					   struct dc_stream_state *stream)
4957 {
4958 	enum amdgpu_rmx_type rmx_type;
4959 
4960 	struct rect src = { 0 }; /* viewport in composition space*/
4961 	struct rect dst = { 0 }; /* stream addressable area */
4962 
4963 	/* no mode. nothing to be done */
4964 	if (!mode)
4965 		return;
4966 
4967 	/* Full screen scaling by default */
4968 	src.width = mode->hdisplay;
4969 	src.height = mode->vdisplay;
4970 	dst.width = stream->timing.h_addressable;
4971 	dst.height = stream->timing.v_addressable;
4972 
4973 	if (dm_state) {
4974 		rmx_type = dm_state->scaling;
4975 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4976 			if (src.width * dst.height <
4977 					src.height * dst.width) {
4978 				/* height needs less upscaling/more downscaling */
4979 				dst.width = src.width *
4980 						dst.height / src.height;
4981 			} else {
4982 				/* width needs less upscaling/more downscaling */
4983 				dst.height = src.height *
4984 						dst.width / src.width;
4985 			}
4986 		} else if (rmx_type == RMX_CENTER) {
4987 			dst = src;
4988 		}
4989 
4990 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4991 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4992 
4993 		if (dm_state->underscan_enable) {
4994 			dst.x += dm_state->underscan_hborder / 2;
4995 			dst.y += dm_state->underscan_vborder / 2;
4996 			dst.width -= dm_state->underscan_hborder;
4997 			dst.height -= dm_state->underscan_vborder;
4998 		}
4999 	}
5000 
5001 	stream->src = src;
5002 	stream->dst = dst;
5003 
5004 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5005 		      dst.x, dst.y, dst.width, dst.height);
5006 
5007 }
5008 
5009 static enum dc_color_depth
5010 convert_color_depth_from_display_info(const struct drm_connector *connector,
5011 				      bool is_y420, int requested_bpc)
5012 {
5013 	uint8_t bpc;
5014 
5015 	if (is_y420) {
5016 		bpc = 8;
5017 
5018 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5019 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5020 			bpc = 16;
5021 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5022 			bpc = 12;
5023 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5024 			bpc = 10;
5025 	} else {
5026 		bpc = (uint8_t)connector->display_info.bpc;
5027 		/* Assume 8 bpc by default if no bpc is specified. */
5028 		bpc = bpc ? bpc : 8;
5029 	}
5030 
5031 	if (requested_bpc > 0) {
5032 		/*
5033 		 * Cap display bpc based on the user requested value.
5034 		 *
5035 		 * The value for state->max_bpc may not correctly updated
5036 		 * depending on when the connector gets added to the state
5037 		 * or if this was called outside of atomic check, so it
5038 		 * can't be used directly.
5039 		 */
5040 		bpc = min_t(u8, bpc, requested_bpc);
5041 
5042 		/* Round down to the nearest even number. */
5043 		bpc = bpc - (bpc & 1);
5044 	}
5045 
5046 	switch (bpc) {
5047 	case 0:
5048 		/*
5049 		 * Temporary Work around, DRM doesn't parse color depth for
5050 		 * EDID revision before 1.4
5051 		 * TODO: Fix edid parsing
5052 		 */
5053 		return COLOR_DEPTH_888;
5054 	case 6:
5055 		return COLOR_DEPTH_666;
5056 	case 8:
5057 		return COLOR_DEPTH_888;
5058 	case 10:
5059 		return COLOR_DEPTH_101010;
5060 	case 12:
5061 		return COLOR_DEPTH_121212;
5062 	case 14:
5063 		return COLOR_DEPTH_141414;
5064 	case 16:
5065 		return COLOR_DEPTH_161616;
5066 	default:
5067 		return COLOR_DEPTH_UNDEFINED;
5068 	}
5069 }
5070 
5071 static enum dc_aspect_ratio
5072 get_aspect_ratio(const struct drm_display_mode *mode_in)
5073 {
5074 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5075 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5076 }
5077 
5078 static enum dc_color_space
5079 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5080 {
5081 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5082 
5083 	switch (dc_crtc_timing->pixel_encoding)	{
5084 	case PIXEL_ENCODING_YCBCR422:
5085 	case PIXEL_ENCODING_YCBCR444:
5086 	case PIXEL_ENCODING_YCBCR420:
5087 	{
5088 		/*
5089 		 * 27030khz is the separation point between HDTV and SDTV
5090 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5091 		 * respectively
5092 		 */
5093 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5094 			if (dc_crtc_timing->flags.Y_ONLY)
5095 				color_space =
5096 					COLOR_SPACE_YCBCR709_LIMITED;
5097 			else
5098 				color_space = COLOR_SPACE_YCBCR709;
5099 		} else {
5100 			if (dc_crtc_timing->flags.Y_ONLY)
5101 				color_space =
5102 					COLOR_SPACE_YCBCR601_LIMITED;
5103 			else
5104 				color_space = COLOR_SPACE_YCBCR601;
5105 		}
5106 
5107 	}
5108 	break;
5109 	case PIXEL_ENCODING_RGB:
5110 		color_space = COLOR_SPACE_SRGB;
5111 		break;
5112 
5113 	default:
5114 		WARN_ON(1);
5115 		break;
5116 	}
5117 
5118 	return color_space;
5119 }
5120 
5121 static bool adjust_colour_depth_from_display_info(
5122 	struct dc_crtc_timing *timing_out,
5123 	const struct drm_display_info *info)
5124 {
5125 	enum dc_color_depth depth = timing_out->display_color_depth;
5126 	int normalized_clk;
5127 	do {
5128 		normalized_clk = timing_out->pix_clk_100hz / 10;
5129 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5130 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5131 			normalized_clk /= 2;
5132 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5133 		switch (depth) {
5134 		case COLOR_DEPTH_888:
5135 			break;
5136 		case COLOR_DEPTH_101010:
5137 			normalized_clk = (normalized_clk * 30) / 24;
5138 			break;
5139 		case COLOR_DEPTH_121212:
5140 			normalized_clk = (normalized_clk * 36) / 24;
5141 			break;
5142 		case COLOR_DEPTH_161616:
5143 			normalized_clk = (normalized_clk * 48) / 24;
5144 			break;
5145 		default:
5146 			/* The above depths are the only ones valid for HDMI. */
5147 			return false;
5148 		}
5149 		if (normalized_clk <= info->max_tmds_clock) {
5150 			timing_out->display_color_depth = depth;
5151 			return true;
5152 		}
5153 	} while (--depth > COLOR_DEPTH_666);
5154 	return false;
5155 }
5156 
5157 static void fill_stream_properties_from_drm_display_mode(
5158 	struct dc_stream_state *stream,
5159 	const struct drm_display_mode *mode_in,
5160 	const struct drm_connector *connector,
5161 	const struct drm_connector_state *connector_state,
5162 	const struct dc_stream_state *old_stream,
5163 	int requested_bpc)
5164 {
5165 	struct dc_crtc_timing *timing_out = &stream->timing;
5166 	const struct drm_display_info *info = &connector->display_info;
5167 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5168 	struct hdmi_vendor_infoframe hv_frame;
5169 	struct hdmi_avi_infoframe avi_frame;
5170 
5171 	memset(&hv_frame, 0, sizeof(hv_frame));
5172 	memset(&avi_frame, 0, sizeof(avi_frame));
5173 
5174 	timing_out->h_border_left = 0;
5175 	timing_out->h_border_right = 0;
5176 	timing_out->v_border_top = 0;
5177 	timing_out->v_border_bottom = 0;
5178 	/* TODO: un-hardcode */
5179 	if (drm_mode_is_420_only(info, mode_in)
5180 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5181 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5182 	else if (drm_mode_is_420_also(info, mode_in)
5183 			&& aconnector->force_yuv420_output)
5184 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5185 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5186 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5187 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5188 	else
5189 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5190 
5191 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5192 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5193 		connector,
5194 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5195 		requested_bpc);
5196 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5197 	timing_out->hdmi_vic = 0;
5198 
5199 	if (old_stream) {
5200 		timing_out->vic = old_stream->timing.vic;
5201 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5202 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5203 	} else {
5204 		timing_out->vic = drm_match_cea_mode(mode_in);
5205 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5206 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5207 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5208 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5209 	}
5210 
5211 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5212 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5213 		timing_out->vic = avi_frame.video_code;
5214 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5215 		timing_out->hdmi_vic = hv_frame.vic;
5216 	}
5217 
5218 	if (is_freesync_video_mode(mode_in, aconnector)) {
5219 		timing_out->h_addressable = mode_in->hdisplay;
5220 		timing_out->h_total = mode_in->htotal;
5221 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5222 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5223 		timing_out->v_total = mode_in->vtotal;
5224 		timing_out->v_addressable = mode_in->vdisplay;
5225 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5226 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5227 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5228 	} else {
5229 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5230 		timing_out->h_total = mode_in->crtc_htotal;
5231 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5232 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5233 		timing_out->v_total = mode_in->crtc_vtotal;
5234 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5235 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5236 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5237 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5238 	}
5239 
5240 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5241 
5242 	stream->output_color_space = get_output_color_space(timing_out);
5243 
5244 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5245 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5246 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5247 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5248 		    drm_mode_is_420_also(info, mode_in) &&
5249 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5250 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5251 			adjust_colour_depth_from_display_info(timing_out, info);
5252 		}
5253 	}
5254 }
5255 
5256 static void fill_audio_info(struct audio_info *audio_info,
5257 			    const struct drm_connector *drm_connector,
5258 			    const struct dc_sink *dc_sink)
5259 {
5260 	int i = 0;
5261 	int cea_revision = 0;
5262 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5263 
5264 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5265 	audio_info->product_id = edid_caps->product_id;
5266 
5267 	cea_revision = drm_connector->display_info.cea_rev;
5268 
5269 	strscpy(audio_info->display_name,
5270 		edid_caps->display_name,
5271 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5272 
5273 	if (cea_revision >= 3) {
5274 		audio_info->mode_count = edid_caps->audio_mode_count;
5275 
5276 		for (i = 0; i < audio_info->mode_count; ++i) {
5277 			audio_info->modes[i].format_code =
5278 					(enum audio_format_code)
5279 					(edid_caps->audio_modes[i].format_code);
5280 			audio_info->modes[i].channel_count =
5281 					edid_caps->audio_modes[i].channel_count;
5282 			audio_info->modes[i].sample_rates.all =
5283 					edid_caps->audio_modes[i].sample_rate;
5284 			audio_info->modes[i].sample_size =
5285 					edid_caps->audio_modes[i].sample_size;
5286 		}
5287 	}
5288 
5289 	audio_info->flags.all = edid_caps->speaker_flags;
5290 
5291 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5292 	if (drm_connector->latency_present[0]) {
5293 		audio_info->video_latency = drm_connector->video_latency[0];
5294 		audio_info->audio_latency = drm_connector->audio_latency[0];
5295 	}
5296 
5297 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5298 
5299 }
5300 
5301 static void
5302 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5303 				      struct drm_display_mode *dst_mode)
5304 {
5305 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5306 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5307 	dst_mode->crtc_clock = src_mode->crtc_clock;
5308 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5309 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5310 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5311 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5312 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5313 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5314 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5315 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5316 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5317 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5318 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5319 }
5320 
5321 static void
5322 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5323 					const struct drm_display_mode *native_mode,
5324 					bool scale_enabled)
5325 {
5326 	if (scale_enabled) {
5327 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5328 	} else if (native_mode->clock == drm_mode->clock &&
5329 			native_mode->htotal == drm_mode->htotal &&
5330 			native_mode->vtotal == drm_mode->vtotal) {
5331 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5332 	} else {
5333 		/* no scaling nor amdgpu inserted, no need to patch */
5334 	}
5335 }
5336 
5337 static struct dc_sink *
5338 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5339 {
5340 	struct dc_sink_init_data sink_init_data = { 0 };
5341 	struct dc_sink *sink = NULL;
5342 	sink_init_data.link = aconnector->dc_link;
5343 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5344 
5345 	sink = dc_sink_create(&sink_init_data);
5346 	if (!sink) {
5347 		DRM_ERROR("Failed to create sink!\n");
5348 		return NULL;
5349 	}
5350 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5351 
5352 	return sink;
5353 }
5354 
5355 static void set_multisync_trigger_params(
5356 		struct dc_stream_state *stream)
5357 {
5358 	struct dc_stream_state *master = NULL;
5359 
5360 	if (stream->triggered_crtc_reset.enabled) {
5361 		master = stream->triggered_crtc_reset.event_source;
5362 		stream->triggered_crtc_reset.event =
5363 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5364 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5365 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5366 	}
5367 }
5368 
5369 static void set_master_stream(struct dc_stream_state *stream_set[],
5370 			      int stream_count)
5371 {
5372 	int j, highest_rfr = 0, master_stream = 0;
5373 
5374 	for (j = 0;  j < stream_count; j++) {
5375 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5376 			int refresh_rate = 0;
5377 
5378 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5379 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5380 			if (refresh_rate > highest_rfr) {
5381 				highest_rfr = refresh_rate;
5382 				master_stream = j;
5383 			}
5384 		}
5385 	}
5386 	for (j = 0;  j < stream_count; j++) {
5387 		if (stream_set[j])
5388 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5389 	}
5390 }
5391 
5392 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5393 {
5394 	int i = 0;
5395 	struct dc_stream_state *stream;
5396 
5397 	if (context->stream_count < 2)
5398 		return;
5399 	for (i = 0; i < context->stream_count ; i++) {
5400 		if (!context->streams[i])
5401 			continue;
5402 		/*
5403 		 * TODO: add a function to read AMD VSDB bits and set
5404 		 * crtc_sync_master.multi_sync_enabled flag
5405 		 * For now it's set to false
5406 		 */
5407 	}
5408 
5409 	set_master_stream(context->streams, context->stream_count);
5410 
5411 	for (i = 0; i < context->stream_count ; i++) {
5412 		stream = context->streams[i];
5413 
5414 		if (!stream)
5415 			continue;
5416 
5417 		set_multisync_trigger_params(stream);
5418 	}
5419 }
5420 
5421 /**
5422  * DOC: FreeSync Video
5423  *
5424  * When a userspace application wants to play a video, the content follows a
5425  * standard format definition that usually specifies the FPS for that format.
5426  * The below list illustrates some video format and the expected FPS,
5427  * respectively:
5428  *
5429  * - TV/NTSC (23.976 FPS)
5430  * - Cinema (24 FPS)
5431  * - TV/PAL (25 FPS)
5432  * - TV/NTSC (29.97 FPS)
5433  * - TV/NTSC (30 FPS)
5434  * - Cinema HFR (48 FPS)
5435  * - TV/PAL (50 FPS)
5436  * - Commonly used (60 FPS)
5437  * - Multiples of 24 (48,72,96 FPS)
5438  *
5439  * The list of standards video format is not huge and can be added to the
5440  * connector modeset list beforehand. With that, userspace can leverage
5441  * FreeSync to extends the front porch in order to attain the target refresh
5442  * rate. Such a switch will happen seamlessly, without screen blanking or
5443  * reprogramming of the output in any other way. If the userspace requests a
5444  * modesetting change compatible with FreeSync modes that only differ in the
5445  * refresh rate, DC will skip the full update and avoid blink during the
5446  * transition. For example, the video player can change the modesetting from
5447  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5448  * causing any display blink. This same concept can be applied to a mode
5449  * setting change.
5450  */
5451 static struct drm_display_mode *
5452 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5453 		bool use_probed_modes)
5454 {
5455 	struct drm_display_mode *m, *m_pref = NULL;
5456 	u16 current_refresh, highest_refresh;
5457 	struct list_head *list_head = use_probed_modes ?
5458 		&aconnector->base.probed_modes :
5459 		&aconnector->base.modes;
5460 
5461 	if (aconnector->freesync_vid_base.clock != 0)
5462 		return &aconnector->freesync_vid_base;
5463 
5464 	/* Find the preferred mode */
5465 	list_for_each_entry (m, list_head, head) {
5466 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5467 			m_pref = m;
5468 			break;
5469 		}
5470 	}
5471 
5472 	if (!m_pref) {
5473 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5474 		m_pref = list_first_entry_or_null(
5475 				&aconnector->base.modes, struct drm_display_mode, head);
5476 		if (!m_pref) {
5477 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5478 			return NULL;
5479 		}
5480 	}
5481 
5482 	highest_refresh = drm_mode_vrefresh(m_pref);
5483 
5484 	/*
5485 	 * Find the mode with highest refresh rate with same resolution.
5486 	 * For some monitors, preferred mode is not the mode with highest
5487 	 * supported refresh rate.
5488 	 */
5489 	list_for_each_entry (m, list_head, head) {
5490 		current_refresh  = drm_mode_vrefresh(m);
5491 
5492 		if (m->hdisplay == m_pref->hdisplay &&
5493 		    m->vdisplay == m_pref->vdisplay &&
5494 		    highest_refresh < current_refresh) {
5495 			highest_refresh = current_refresh;
5496 			m_pref = m;
5497 		}
5498 	}
5499 
5500 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5501 	return m_pref;
5502 }
5503 
5504 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5505 		struct amdgpu_dm_connector *aconnector)
5506 {
5507 	struct drm_display_mode *high_mode;
5508 	int timing_diff;
5509 
5510 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5511 	if (!high_mode || !mode)
5512 		return false;
5513 
5514 	timing_diff = high_mode->vtotal - mode->vtotal;
5515 
5516 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5517 	    high_mode->hdisplay != mode->hdisplay ||
5518 	    high_mode->vdisplay != mode->vdisplay ||
5519 	    high_mode->hsync_start != mode->hsync_start ||
5520 	    high_mode->hsync_end != mode->hsync_end ||
5521 	    high_mode->htotal != mode->htotal ||
5522 	    high_mode->hskew != mode->hskew ||
5523 	    high_mode->vscan != mode->vscan ||
5524 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5525 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5526 		return false;
5527 	else
5528 		return true;
5529 }
5530 
5531 #if defined(CONFIG_DRM_AMD_DC_DCN)
5532 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5533 			    struct dc_sink *sink, struct dc_stream_state *stream,
5534 			    struct dsc_dec_dpcd_caps *dsc_caps)
5535 {
5536 	stream->timing.flags.DSC = 0;
5537 	dsc_caps->is_dsc_supported = false;
5538 
5539 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5540 	    sink->sink_signal == SIGNAL_TYPE_EDP)) {
5541 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5542 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5543 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5544 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5545 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5546 				dsc_caps);
5547 	}
5548 }
5549 
5550 
5551 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5552 				    struct dc_sink *sink, struct dc_stream_state *stream,
5553 				    struct dsc_dec_dpcd_caps *dsc_caps,
5554 				    uint32_t max_dsc_target_bpp_limit_override)
5555 {
5556 	const struct dc_link_settings *verified_link_cap = NULL;
5557 	uint32_t link_bw_in_kbps;
5558 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
5559 	struct dc *dc = sink->ctx->dc;
5560 	struct dc_dsc_bw_range bw_range = {0};
5561 	struct dc_dsc_config dsc_cfg = {0};
5562 
5563 	verified_link_cap = dc_link_get_link_cap(stream->link);
5564 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5565 	edp_min_bpp_x16 = 8 * 16;
5566 	edp_max_bpp_x16 = 8 * 16;
5567 
5568 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5569 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5570 
5571 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
5572 		edp_min_bpp_x16 = edp_max_bpp_x16;
5573 
5574 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5575 				dc->debug.dsc_min_slice_height_override,
5576 				edp_min_bpp_x16, edp_max_bpp_x16,
5577 				dsc_caps,
5578 				&stream->timing,
5579 				&bw_range)) {
5580 
5581 		if (bw_range.max_kbps < link_bw_in_kbps) {
5582 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5583 					dsc_caps,
5584 					dc->debug.dsc_min_slice_height_override,
5585 					max_dsc_target_bpp_limit_override,
5586 					0,
5587 					&stream->timing,
5588 					&dsc_cfg)) {
5589 				stream->timing.dsc_cfg = dsc_cfg;
5590 				stream->timing.flags.DSC = 1;
5591 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5592 			}
5593 			return;
5594 		}
5595 	}
5596 
5597 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5598 				dsc_caps,
5599 				dc->debug.dsc_min_slice_height_override,
5600 				max_dsc_target_bpp_limit_override,
5601 				link_bw_in_kbps,
5602 				&stream->timing,
5603 				&dsc_cfg)) {
5604 		stream->timing.dsc_cfg = dsc_cfg;
5605 		stream->timing.flags.DSC = 1;
5606 	}
5607 }
5608 
5609 
5610 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5611 					struct dc_sink *sink, struct dc_stream_state *stream,
5612 					struct dsc_dec_dpcd_caps *dsc_caps)
5613 {
5614 	struct drm_connector *drm_connector = &aconnector->base;
5615 	uint32_t link_bandwidth_kbps;
5616 	uint32_t max_dsc_target_bpp_limit_override = 0;
5617 	struct dc *dc = sink->ctx->dc;
5618 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
5619 	uint32_t dsc_max_supported_bw_in_kbps;
5620 
5621 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5622 							dc_link_get_link_cap(aconnector->dc_link));
5623 	if (stream->link && stream->link->local_sink)
5624 		max_dsc_target_bpp_limit_override =
5625 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5626 
5627 	/* Set DSC policy according to dsc_clock_en */
5628 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5629 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5630 
5631 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5632 	    !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
5633 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5634 
5635 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5636 
5637 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5638 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5639 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5640 						dsc_caps,
5641 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5642 						max_dsc_target_bpp_limit_override,
5643 						link_bandwidth_kbps,
5644 						&stream->timing,
5645 						&stream->timing.dsc_cfg)) {
5646 				stream->timing.flags.DSC = 1;
5647 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5648 			}
5649 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5650 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
5651 			max_supported_bw_in_kbps = link_bandwidth_kbps;
5652 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5653 
5654 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5655 					max_supported_bw_in_kbps > 0 &&
5656 					dsc_max_supported_bw_in_kbps > 0)
5657 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5658 						dsc_caps,
5659 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5660 						max_dsc_target_bpp_limit_override,
5661 						dsc_max_supported_bw_in_kbps,
5662 						&stream->timing,
5663 						&stream->timing.dsc_cfg)) {
5664 					stream->timing.flags.DSC = 1;
5665 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5666 									 __func__, drm_connector->name);
5667 				}
5668 		}
5669 	}
5670 
5671 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5672 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5673 		stream->timing.flags.DSC = 1;
5674 
5675 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5676 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5677 
5678 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5679 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5680 
5681 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5682 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5683 }
5684 #endif /* CONFIG_DRM_AMD_DC_DCN */
5685 
5686 static struct dc_stream_state *
5687 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5688 		       const struct drm_display_mode *drm_mode,
5689 		       const struct dm_connector_state *dm_state,
5690 		       const struct dc_stream_state *old_stream,
5691 		       int requested_bpc)
5692 {
5693 	struct drm_display_mode *preferred_mode = NULL;
5694 	struct drm_connector *drm_connector;
5695 	const struct drm_connector_state *con_state =
5696 		dm_state ? &dm_state->base : NULL;
5697 	struct dc_stream_state *stream = NULL;
5698 	struct drm_display_mode mode = *drm_mode;
5699 	struct drm_display_mode saved_mode;
5700 	struct drm_display_mode *freesync_mode = NULL;
5701 	bool native_mode_found = false;
5702 	bool recalculate_timing = false;
5703 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5704 	int mode_refresh;
5705 	int preferred_refresh = 0;
5706 #if defined(CONFIG_DRM_AMD_DC_DCN)
5707 	struct dsc_dec_dpcd_caps dsc_caps;
5708 #endif
5709 
5710 	struct dc_sink *sink = NULL;
5711 
5712 	memset(&saved_mode, 0, sizeof(saved_mode));
5713 
5714 	if (aconnector == NULL) {
5715 		DRM_ERROR("aconnector is NULL!\n");
5716 		return stream;
5717 	}
5718 
5719 	drm_connector = &aconnector->base;
5720 
5721 	if (!aconnector->dc_sink) {
5722 		sink = create_fake_sink(aconnector);
5723 		if (!sink)
5724 			return stream;
5725 	} else {
5726 		sink = aconnector->dc_sink;
5727 		dc_sink_retain(sink);
5728 	}
5729 
5730 	stream = dc_create_stream_for_sink(sink);
5731 
5732 	if (stream == NULL) {
5733 		DRM_ERROR("Failed to create stream for sink!\n");
5734 		goto finish;
5735 	}
5736 
5737 	stream->dm_stream_context = aconnector;
5738 
5739 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5740 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5741 
5742 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5743 		/* Search for preferred mode */
5744 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5745 			native_mode_found = true;
5746 			break;
5747 		}
5748 	}
5749 	if (!native_mode_found)
5750 		preferred_mode = list_first_entry_or_null(
5751 				&aconnector->base.modes,
5752 				struct drm_display_mode,
5753 				head);
5754 
5755 	mode_refresh = drm_mode_vrefresh(&mode);
5756 
5757 	if (preferred_mode == NULL) {
5758 		/*
5759 		 * This may not be an error, the use case is when we have no
5760 		 * usermode calls to reset and set mode upon hotplug. In this
5761 		 * case, we call set mode ourselves to restore the previous mode
5762 		 * and the modelist may not be filled in in time.
5763 		 */
5764 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5765 	} else {
5766 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
5767 		if (recalculate_timing) {
5768 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5769 			drm_mode_copy(&saved_mode, &mode);
5770 			drm_mode_copy(&mode, freesync_mode);
5771 		} else {
5772 			decide_crtc_timing_for_drm_display_mode(
5773 					&mode, preferred_mode, scale);
5774 
5775 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
5776 		}
5777 	}
5778 
5779 	if (recalculate_timing)
5780 		drm_mode_set_crtcinfo(&saved_mode, 0);
5781 	else if (!dm_state)
5782 		drm_mode_set_crtcinfo(&mode, 0);
5783 
5784 	/*
5785 	* If scaling is enabled and refresh rate didn't change
5786 	* we copy the vic and polarities of the old timings
5787 	*/
5788 	if (!scale || mode_refresh != preferred_refresh)
5789 		fill_stream_properties_from_drm_display_mode(
5790 			stream, &mode, &aconnector->base, con_state, NULL,
5791 			requested_bpc);
5792 	else
5793 		fill_stream_properties_from_drm_display_mode(
5794 			stream, &mode, &aconnector->base, con_state, old_stream,
5795 			requested_bpc);
5796 
5797 #if defined(CONFIG_DRM_AMD_DC_DCN)
5798 	/* SST DSC determination policy */
5799 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
5800 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
5801 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
5802 #endif
5803 
5804 	update_stream_scaling_settings(&mode, dm_state, stream);
5805 
5806 	fill_audio_info(
5807 		&stream->audio_info,
5808 		drm_connector,
5809 		sink);
5810 
5811 	update_stream_signal(stream, sink);
5812 
5813 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5814 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5815 
5816 	if (stream->link->psr_settings.psr_feature_enabled) {
5817 		//
5818 		// should decide stream support vsc sdp colorimetry capability
5819 		// before building vsc info packet
5820 		//
5821 		stream->use_vsc_sdp_for_colorimetry = false;
5822 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5823 			stream->use_vsc_sdp_for_colorimetry =
5824 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5825 		} else {
5826 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5827 				stream->use_vsc_sdp_for_colorimetry = true;
5828 		}
5829 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
5830 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
5831 
5832 	}
5833 finish:
5834 	dc_sink_release(sink);
5835 
5836 	return stream;
5837 }
5838 
5839 static enum drm_connector_status
5840 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5841 {
5842 	bool connected;
5843 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5844 
5845 	/*
5846 	 * Notes:
5847 	 * 1. This interface is NOT called in context of HPD irq.
5848 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5849 	 * makes it a bad place for *any* MST-related activity.
5850 	 */
5851 
5852 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5853 	    !aconnector->fake_enable)
5854 		connected = (aconnector->dc_sink != NULL);
5855 	else
5856 		connected = (aconnector->base.force == DRM_FORCE_ON ||
5857 				aconnector->base.force == DRM_FORCE_ON_DIGITAL);
5858 
5859 	update_subconnector_property(aconnector);
5860 
5861 	return (connected ? connector_status_connected :
5862 			connector_status_disconnected);
5863 }
5864 
5865 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5866 					    struct drm_connector_state *connector_state,
5867 					    struct drm_property *property,
5868 					    uint64_t val)
5869 {
5870 	struct drm_device *dev = connector->dev;
5871 	struct amdgpu_device *adev = drm_to_adev(dev);
5872 	struct dm_connector_state *dm_old_state =
5873 		to_dm_connector_state(connector->state);
5874 	struct dm_connector_state *dm_new_state =
5875 		to_dm_connector_state(connector_state);
5876 
5877 	int ret = -EINVAL;
5878 
5879 	if (property == dev->mode_config.scaling_mode_property) {
5880 		enum amdgpu_rmx_type rmx_type;
5881 
5882 		switch (val) {
5883 		case DRM_MODE_SCALE_CENTER:
5884 			rmx_type = RMX_CENTER;
5885 			break;
5886 		case DRM_MODE_SCALE_ASPECT:
5887 			rmx_type = RMX_ASPECT;
5888 			break;
5889 		case DRM_MODE_SCALE_FULLSCREEN:
5890 			rmx_type = RMX_FULL;
5891 			break;
5892 		case DRM_MODE_SCALE_NONE:
5893 		default:
5894 			rmx_type = RMX_OFF;
5895 			break;
5896 		}
5897 
5898 		if (dm_old_state->scaling == rmx_type)
5899 			return 0;
5900 
5901 		dm_new_state->scaling = rmx_type;
5902 		ret = 0;
5903 	} else if (property == adev->mode_info.underscan_hborder_property) {
5904 		dm_new_state->underscan_hborder = val;
5905 		ret = 0;
5906 	} else if (property == adev->mode_info.underscan_vborder_property) {
5907 		dm_new_state->underscan_vborder = val;
5908 		ret = 0;
5909 	} else if (property == adev->mode_info.underscan_property) {
5910 		dm_new_state->underscan_enable = val;
5911 		ret = 0;
5912 	} else if (property == adev->mode_info.abm_level_property) {
5913 		dm_new_state->abm_level = val;
5914 		ret = 0;
5915 	}
5916 
5917 	return ret;
5918 }
5919 
5920 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5921 					    const struct drm_connector_state *state,
5922 					    struct drm_property *property,
5923 					    uint64_t *val)
5924 {
5925 	struct drm_device *dev = connector->dev;
5926 	struct amdgpu_device *adev = drm_to_adev(dev);
5927 	struct dm_connector_state *dm_state =
5928 		to_dm_connector_state(state);
5929 	int ret = -EINVAL;
5930 
5931 	if (property == dev->mode_config.scaling_mode_property) {
5932 		switch (dm_state->scaling) {
5933 		case RMX_CENTER:
5934 			*val = DRM_MODE_SCALE_CENTER;
5935 			break;
5936 		case RMX_ASPECT:
5937 			*val = DRM_MODE_SCALE_ASPECT;
5938 			break;
5939 		case RMX_FULL:
5940 			*val = DRM_MODE_SCALE_FULLSCREEN;
5941 			break;
5942 		case RMX_OFF:
5943 		default:
5944 			*val = DRM_MODE_SCALE_NONE;
5945 			break;
5946 		}
5947 		ret = 0;
5948 	} else if (property == adev->mode_info.underscan_hborder_property) {
5949 		*val = dm_state->underscan_hborder;
5950 		ret = 0;
5951 	} else if (property == adev->mode_info.underscan_vborder_property) {
5952 		*val = dm_state->underscan_vborder;
5953 		ret = 0;
5954 	} else if (property == adev->mode_info.underscan_property) {
5955 		*val = dm_state->underscan_enable;
5956 		ret = 0;
5957 	} else if (property == adev->mode_info.abm_level_property) {
5958 		*val = dm_state->abm_level;
5959 		ret = 0;
5960 	}
5961 
5962 	return ret;
5963 }
5964 
5965 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5966 {
5967 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5968 
5969 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5970 }
5971 
5972 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5973 {
5974 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5975 	const struct dc_link *link = aconnector->dc_link;
5976 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5977 	struct amdgpu_display_manager *dm = &adev->dm;
5978 	int i;
5979 
5980 	/*
5981 	 * Call only if mst_mgr was initialized before since it's not done
5982 	 * for all connector types.
5983 	 */
5984 	if (aconnector->mst_mgr.dev)
5985 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5986 
5987 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5988 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5989 	for (i = 0; i < dm->num_of_edps; i++) {
5990 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
5991 			backlight_device_unregister(dm->backlight_dev[i]);
5992 			dm->backlight_dev[i] = NULL;
5993 		}
5994 	}
5995 #endif
5996 
5997 	if (aconnector->dc_em_sink)
5998 		dc_sink_release(aconnector->dc_em_sink);
5999 	aconnector->dc_em_sink = NULL;
6000 	if (aconnector->dc_sink)
6001 		dc_sink_release(aconnector->dc_sink);
6002 	aconnector->dc_sink = NULL;
6003 
6004 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6005 	drm_connector_unregister(connector);
6006 	drm_connector_cleanup(connector);
6007 	if (aconnector->i2c) {
6008 		i2c_del_adapter(&aconnector->i2c->base);
6009 		kfree(aconnector->i2c);
6010 	}
6011 	kfree(aconnector->dm_dp_aux.aux.name);
6012 
6013 	kfree(connector);
6014 }
6015 
6016 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6017 {
6018 	struct dm_connector_state *state =
6019 		to_dm_connector_state(connector->state);
6020 
6021 	if (connector->state)
6022 		__drm_atomic_helper_connector_destroy_state(connector->state);
6023 
6024 	kfree(state);
6025 
6026 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6027 
6028 	if (state) {
6029 		state->scaling = RMX_OFF;
6030 		state->underscan_enable = false;
6031 		state->underscan_hborder = 0;
6032 		state->underscan_vborder = 0;
6033 		state->base.max_requested_bpc = 8;
6034 		state->vcpi_slots = 0;
6035 		state->pbn = 0;
6036 
6037 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6038 			state->abm_level = amdgpu_dm_abm_level;
6039 
6040 		__drm_atomic_helper_connector_reset(connector, &state->base);
6041 	}
6042 }
6043 
6044 struct drm_connector_state *
6045 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6046 {
6047 	struct dm_connector_state *state =
6048 		to_dm_connector_state(connector->state);
6049 
6050 	struct dm_connector_state *new_state =
6051 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6052 
6053 	if (!new_state)
6054 		return NULL;
6055 
6056 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6057 
6058 	new_state->freesync_capable = state->freesync_capable;
6059 	new_state->abm_level = state->abm_level;
6060 	new_state->scaling = state->scaling;
6061 	new_state->underscan_enable = state->underscan_enable;
6062 	new_state->underscan_hborder = state->underscan_hborder;
6063 	new_state->underscan_vborder = state->underscan_vborder;
6064 	new_state->vcpi_slots = state->vcpi_slots;
6065 	new_state->pbn = state->pbn;
6066 	return &new_state->base;
6067 }
6068 
6069 static int
6070 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6071 {
6072 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6073 		to_amdgpu_dm_connector(connector);
6074 	int r;
6075 
6076 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6077 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6078 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6079 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6080 		if (r)
6081 			return r;
6082 	}
6083 
6084 #if defined(CONFIG_DEBUG_FS)
6085 	connector_debugfs_init(amdgpu_dm_connector);
6086 #endif
6087 
6088 	return 0;
6089 }
6090 
6091 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6092 	.reset = amdgpu_dm_connector_funcs_reset,
6093 	.detect = amdgpu_dm_connector_detect,
6094 	.fill_modes = drm_helper_probe_single_connector_modes,
6095 	.destroy = amdgpu_dm_connector_destroy,
6096 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6097 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6098 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6099 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6100 	.late_register = amdgpu_dm_connector_late_register,
6101 	.early_unregister = amdgpu_dm_connector_unregister
6102 };
6103 
6104 static int get_modes(struct drm_connector *connector)
6105 {
6106 	return amdgpu_dm_connector_get_modes(connector);
6107 }
6108 
6109 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6110 {
6111 	struct dc_sink_init_data init_params = {
6112 			.link = aconnector->dc_link,
6113 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6114 	};
6115 	struct edid *edid;
6116 
6117 	if (!aconnector->base.edid_blob_ptr) {
6118 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6119 				aconnector->base.name);
6120 
6121 		aconnector->base.force = DRM_FORCE_OFF;
6122 		aconnector->base.override_edid = false;
6123 		return;
6124 	}
6125 
6126 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6127 
6128 	aconnector->edid = edid;
6129 
6130 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6131 		aconnector->dc_link,
6132 		(uint8_t *)edid,
6133 		(edid->extensions + 1) * EDID_LENGTH,
6134 		&init_params);
6135 
6136 	if (aconnector->base.force == DRM_FORCE_ON) {
6137 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6138 		aconnector->dc_link->local_sink :
6139 		aconnector->dc_em_sink;
6140 		dc_sink_retain(aconnector->dc_sink);
6141 	}
6142 }
6143 
6144 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6145 {
6146 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6147 
6148 	/*
6149 	 * In case of headless boot with force on for DP managed connector
6150 	 * Those settings have to be != 0 to get initial modeset
6151 	 */
6152 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6153 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6154 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6155 	}
6156 
6157 
6158 	aconnector->base.override_edid = true;
6159 	create_eml_sink(aconnector);
6160 }
6161 
6162 struct dc_stream_state *
6163 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6164 				const struct drm_display_mode *drm_mode,
6165 				const struct dm_connector_state *dm_state,
6166 				const struct dc_stream_state *old_stream)
6167 {
6168 	struct drm_connector *connector = &aconnector->base;
6169 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6170 	struct dc_stream_state *stream;
6171 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6172 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6173 	enum dc_status dc_result = DC_OK;
6174 
6175 	do {
6176 		stream = create_stream_for_sink(aconnector, drm_mode,
6177 						dm_state, old_stream,
6178 						requested_bpc);
6179 		if (stream == NULL) {
6180 			DRM_ERROR("Failed to create stream for sink!\n");
6181 			break;
6182 		}
6183 
6184 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6185 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6186 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6187 
6188 		if (dc_result != DC_OK) {
6189 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6190 				      drm_mode->hdisplay,
6191 				      drm_mode->vdisplay,
6192 				      drm_mode->clock,
6193 				      dc_result,
6194 				      dc_status_to_str(dc_result));
6195 
6196 			dc_stream_release(stream);
6197 			stream = NULL;
6198 			requested_bpc -= 2; /* lower bpc to retry validation */
6199 		}
6200 
6201 	} while (stream == NULL && requested_bpc >= 6);
6202 
6203 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6204 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6205 
6206 		aconnector->force_yuv420_output = true;
6207 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6208 						dm_state, old_stream);
6209 		aconnector->force_yuv420_output = false;
6210 	}
6211 
6212 	return stream;
6213 }
6214 
6215 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6216 				   struct drm_display_mode *mode)
6217 {
6218 	int result = MODE_ERROR;
6219 	struct dc_sink *dc_sink;
6220 	/* TODO: Unhardcode stream count */
6221 	struct dc_stream_state *stream;
6222 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6223 
6224 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6225 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6226 		return result;
6227 
6228 	/*
6229 	 * Only run this the first time mode_valid is called to initilialize
6230 	 * EDID mgmt
6231 	 */
6232 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6233 		!aconnector->dc_em_sink)
6234 		handle_edid_mgmt(aconnector);
6235 
6236 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6237 
6238 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6239 				aconnector->base.force != DRM_FORCE_ON) {
6240 		DRM_ERROR("dc_sink is NULL!\n");
6241 		goto fail;
6242 	}
6243 
6244 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6245 	if (stream) {
6246 		dc_stream_release(stream);
6247 		result = MODE_OK;
6248 	}
6249 
6250 fail:
6251 	/* TODO: error handling*/
6252 	return result;
6253 }
6254 
6255 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6256 				struct dc_info_packet *out)
6257 {
6258 	struct hdmi_drm_infoframe frame;
6259 	unsigned char buf[30]; /* 26 + 4 */
6260 	ssize_t len;
6261 	int ret, i;
6262 
6263 	memset(out, 0, sizeof(*out));
6264 
6265 	if (!state->hdr_output_metadata)
6266 		return 0;
6267 
6268 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6269 	if (ret)
6270 		return ret;
6271 
6272 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6273 	if (len < 0)
6274 		return (int)len;
6275 
6276 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6277 	if (len != 30)
6278 		return -EINVAL;
6279 
6280 	/* Prepare the infopacket for DC. */
6281 	switch (state->connector->connector_type) {
6282 	case DRM_MODE_CONNECTOR_HDMIA:
6283 		out->hb0 = 0x87; /* type */
6284 		out->hb1 = 0x01; /* version */
6285 		out->hb2 = 0x1A; /* length */
6286 		out->sb[0] = buf[3]; /* checksum */
6287 		i = 1;
6288 		break;
6289 
6290 	case DRM_MODE_CONNECTOR_DisplayPort:
6291 	case DRM_MODE_CONNECTOR_eDP:
6292 		out->hb0 = 0x00; /* sdp id, zero */
6293 		out->hb1 = 0x87; /* type */
6294 		out->hb2 = 0x1D; /* payload len - 1 */
6295 		out->hb3 = (0x13 << 2); /* sdp version */
6296 		out->sb[0] = 0x01; /* version */
6297 		out->sb[1] = 0x1A; /* length */
6298 		i = 2;
6299 		break;
6300 
6301 	default:
6302 		return -EINVAL;
6303 	}
6304 
6305 	memcpy(&out->sb[i], &buf[4], 26);
6306 	out->valid = true;
6307 
6308 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6309 		       sizeof(out->sb), false);
6310 
6311 	return 0;
6312 }
6313 
6314 static int
6315 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6316 				 struct drm_atomic_state *state)
6317 {
6318 	struct drm_connector_state *new_con_state =
6319 		drm_atomic_get_new_connector_state(state, conn);
6320 	struct drm_connector_state *old_con_state =
6321 		drm_atomic_get_old_connector_state(state, conn);
6322 	struct drm_crtc *crtc = new_con_state->crtc;
6323 	struct drm_crtc_state *new_crtc_state;
6324 	int ret;
6325 
6326 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6327 
6328 	if (!crtc)
6329 		return 0;
6330 
6331 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6332 		struct dc_info_packet hdr_infopacket;
6333 
6334 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6335 		if (ret)
6336 			return ret;
6337 
6338 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6339 		if (IS_ERR(new_crtc_state))
6340 			return PTR_ERR(new_crtc_state);
6341 
6342 		/*
6343 		 * DC considers the stream backends changed if the
6344 		 * static metadata changes. Forcing the modeset also
6345 		 * gives a simple way for userspace to switch from
6346 		 * 8bpc to 10bpc when setting the metadata to enter
6347 		 * or exit HDR.
6348 		 *
6349 		 * Changing the static metadata after it's been
6350 		 * set is permissible, however. So only force a
6351 		 * modeset if we're entering or exiting HDR.
6352 		 */
6353 		new_crtc_state->mode_changed =
6354 			!old_con_state->hdr_output_metadata ||
6355 			!new_con_state->hdr_output_metadata;
6356 	}
6357 
6358 	return 0;
6359 }
6360 
6361 static const struct drm_connector_helper_funcs
6362 amdgpu_dm_connector_helper_funcs = {
6363 	/*
6364 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6365 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6366 	 * are missing after user start lightdm. So we need to renew modes list.
6367 	 * in get_modes call back, not just return the modes count
6368 	 */
6369 	.get_modes = get_modes,
6370 	.mode_valid = amdgpu_dm_connector_mode_valid,
6371 	.atomic_check = amdgpu_dm_connector_atomic_check,
6372 };
6373 
6374 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6375 {
6376 
6377 }
6378 
6379 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6380 {
6381 	switch (display_color_depth) {
6382 	case COLOR_DEPTH_666:
6383 		return 6;
6384 	case COLOR_DEPTH_888:
6385 		return 8;
6386 	case COLOR_DEPTH_101010:
6387 		return 10;
6388 	case COLOR_DEPTH_121212:
6389 		return 12;
6390 	case COLOR_DEPTH_141414:
6391 		return 14;
6392 	case COLOR_DEPTH_161616:
6393 		return 16;
6394 	default:
6395 		break;
6396 	}
6397 	return 0;
6398 }
6399 
6400 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6401 					  struct drm_crtc_state *crtc_state,
6402 					  struct drm_connector_state *conn_state)
6403 {
6404 	struct drm_atomic_state *state = crtc_state->state;
6405 	struct drm_connector *connector = conn_state->connector;
6406 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6407 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6408 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6409 	struct drm_dp_mst_topology_mgr *mst_mgr;
6410 	struct drm_dp_mst_port *mst_port;
6411 	enum dc_color_depth color_depth;
6412 	int clock, bpp = 0;
6413 	bool is_y420 = false;
6414 
6415 	if (!aconnector->port || !aconnector->dc_sink)
6416 		return 0;
6417 
6418 	mst_port = aconnector->port;
6419 	mst_mgr = &aconnector->mst_port->mst_mgr;
6420 
6421 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6422 		return 0;
6423 
6424 	if (!state->duplicated) {
6425 		int max_bpc = conn_state->max_requested_bpc;
6426 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6427 			  aconnector->force_yuv420_output;
6428 		color_depth = convert_color_depth_from_display_info(connector,
6429 								    is_y420,
6430 								    max_bpc);
6431 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6432 		clock = adjusted_mode->clock;
6433 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6434 	}
6435 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6436 									   mst_mgr,
6437 									   mst_port,
6438 									   dm_new_connector_state->pbn,
6439 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6440 	if (dm_new_connector_state->vcpi_slots < 0) {
6441 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6442 		return dm_new_connector_state->vcpi_slots;
6443 	}
6444 	return 0;
6445 }
6446 
6447 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6448 	.disable = dm_encoder_helper_disable,
6449 	.atomic_check = dm_encoder_helper_atomic_check
6450 };
6451 
6452 #if defined(CONFIG_DRM_AMD_DC_DCN)
6453 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6454 					    struct dc_state *dc_state,
6455 					    struct dsc_mst_fairness_vars *vars)
6456 {
6457 	struct dc_stream_state *stream = NULL;
6458 	struct drm_connector *connector;
6459 	struct drm_connector_state *new_con_state;
6460 	struct amdgpu_dm_connector *aconnector;
6461 	struct dm_connector_state *dm_conn_state;
6462 	int i, j;
6463 	int vcpi, pbn_div, pbn, slot_num = 0;
6464 
6465 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6466 
6467 		aconnector = to_amdgpu_dm_connector(connector);
6468 
6469 		if (!aconnector->port)
6470 			continue;
6471 
6472 		if (!new_con_state || !new_con_state->crtc)
6473 			continue;
6474 
6475 		dm_conn_state = to_dm_connector_state(new_con_state);
6476 
6477 		for (j = 0; j < dc_state->stream_count; j++) {
6478 			stream = dc_state->streams[j];
6479 			if (!stream)
6480 				continue;
6481 
6482 			if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6483 				break;
6484 
6485 			stream = NULL;
6486 		}
6487 
6488 		if (!stream)
6489 			continue;
6490 
6491 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6492 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
6493 		for (j = 0; j < dc_state->stream_count; j++) {
6494 			if (vars[j].aconnector == aconnector) {
6495 				pbn = vars[j].pbn;
6496 				break;
6497 			}
6498 		}
6499 
6500 		if (j == dc_state->stream_count)
6501 			continue;
6502 
6503 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
6504 
6505 		if (stream->timing.flags.DSC != 1) {
6506 			dm_conn_state->pbn = pbn;
6507 			dm_conn_state->vcpi_slots = slot_num;
6508 
6509 			drm_dp_mst_atomic_enable_dsc(state,
6510 						     aconnector->port,
6511 						     dm_conn_state->pbn,
6512 						     0,
6513 						     false);
6514 			continue;
6515 		}
6516 
6517 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6518 						    aconnector->port,
6519 						    pbn, pbn_div,
6520 						    true);
6521 		if (vcpi < 0)
6522 			return vcpi;
6523 
6524 		dm_conn_state->pbn = pbn;
6525 		dm_conn_state->vcpi_slots = vcpi;
6526 	}
6527 	return 0;
6528 }
6529 #endif
6530 
6531 static int to_drm_connector_type(enum signal_type st)
6532 {
6533 	switch (st) {
6534 	case SIGNAL_TYPE_HDMI_TYPE_A:
6535 		return DRM_MODE_CONNECTOR_HDMIA;
6536 	case SIGNAL_TYPE_EDP:
6537 		return DRM_MODE_CONNECTOR_eDP;
6538 	case SIGNAL_TYPE_LVDS:
6539 		return DRM_MODE_CONNECTOR_LVDS;
6540 	case SIGNAL_TYPE_RGB:
6541 		return DRM_MODE_CONNECTOR_VGA;
6542 	case SIGNAL_TYPE_DISPLAY_PORT:
6543 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6544 		return DRM_MODE_CONNECTOR_DisplayPort;
6545 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6546 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6547 		return DRM_MODE_CONNECTOR_DVID;
6548 	case SIGNAL_TYPE_VIRTUAL:
6549 		return DRM_MODE_CONNECTOR_VIRTUAL;
6550 
6551 	default:
6552 		return DRM_MODE_CONNECTOR_Unknown;
6553 	}
6554 }
6555 
6556 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6557 {
6558 	struct drm_encoder *encoder;
6559 
6560 	/* There is only one encoder per connector */
6561 	drm_connector_for_each_possible_encoder(connector, encoder)
6562 		return encoder;
6563 
6564 	return NULL;
6565 }
6566 
6567 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6568 {
6569 	struct drm_encoder *encoder;
6570 	struct amdgpu_encoder *amdgpu_encoder;
6571 
6572 	encoder = amdgpu_dm_connector_to_encoder(connector);
6573 
6574 	if (encoder == NULL)
6575 		return;
6576 
6577 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6578 
6579 	amdgpu_encoder->native_mode.clock = 0;
6580 
6581 	if (!list_empty(&connector->probed_modes)) {
6582 		struct drm_display_mode *preferred_mode = NULL;
6583 
6584 		list_for_each_entry(preferred_mode,
6585 				    &connector->probed_modes,
6586 				    head) {
6587 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6588 				amdgpu_encoder->native_mode = *preferred_mode;
6589 
6590 			break;
6591 		}
6592 
6593 	}
6594 }
6595 
6596 static struct drm_display_mode *
6597 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6598 			     char *name,
6599 			     int hdisplay, int vdisplay)
6600 {
6601 	struct drm_device *dev = encoder->dev;
6602 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6603 	struct drm_display_mode *mode = NULL;
6604 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6605 
6606 	mode = drm_mode_duplicate(dev, native_mode);
6607 
6608 	if (mode == NULL)
6609 		return NULL;
6610 
6611 	mode->hdisplay = hdisplay;
6612 	mode->vdisplay = vdisplay;
6613 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6614 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6615 
6616 	return mode;
6617 
6618 }
6619 
6620 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6621 						 struct drm_connector *connector)
6622 {
6623 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6624 	struct drm_display_mode *mode = NULL;
6625 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6626 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6627 				to_amdgpu_dm_connector(connector);
6628 	int i;
6629 	int n;
6630 	struct mode_size {
6631 		char name[DRM_DISPLAY_MODE_LEN];
6632 		int w;
6633 		int h;
6634 	} common_modes[] = {
6635 		{  "640x480",  640,  480},
6636 		{  "800x600",  800,  600},
6637 		{ "1024x768", 1024,  768},
6638 		{ "1280x720", 1280,  720},
6639 		{ "1280x800", 1280,  800},
6640 		{"1280x1024", 1280, 1024},
6641 		{ "1440x900", 1440,  900},
6642 		{"1680x1050", 1680, 1050},
6643 		{"1600x1200", 1600, 1200},
6644 		{"1920x1080", 1920, 1080},
6645 		{"1920x1200", 1920, 1200}
6646 	};
6647 
6648 	n = ARRAY_SIZE(common_modes);
6649 
6650 	for (i = 0; i < n; i++) {
6651 		struct drm_display_mode *curmode = NULL;
6652 		bool mode_existed = false;
6653 
6654 		if (common_modes[i].w > native_mode->hdisplay ||
6655 		    common_modes[i].h > native_mode->vdisplay ||
6656 		   (common_modes[i].w == native_mode->hdisplay &&
6657 		    common_modes[i].h == native_mode->vdisplay))
6658 			continue;
6659 
6660 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6661 			if (common_modes[i].w == curmode->hdisplay &&
6662 			    common_modes[i].h == curmode->vdisplay) {
6663 				mode_existed = true;
6664 				break;
6665 			}
6666 		}
6667 
6668 		if (mode_existed)
6669 			continue;
6670 
6671 		mode = amdgpu_dm_create_common_mode(encoder,
6672 				common_modes[i].name, common_modes[i].w,
6673 				common_modes[i].h);
6674 		if (!mode)
6675 			continue;
6676 
6677 		drm_mode_probed_add(connector, mode);
6678 		amdgpu_dm_connector->num_modes++;
6679 	}
6680 }
6681 
6682 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
6683 {
6684 	struct drm_encoder *encoder;
6685 	struct amdgpu_encoder *amdgpu_encoder;
6686 	const struct drm_display_mode *native_mode;
6687 
6688 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
6689 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
6690 		return;
6691 
6692 	mutex_lock(&connector->dev->mode_config.mutex);
6693 	amdgpu_dm_connector_get_modes(connector);
6694 	mutex_unlock(&connector->dev->mode_config.mutex);
6695 
6696 	encoder = amdgpu_dm_connector_to_encoder(connector);
6697 	if (!encoder)
6698 		return;
6699 
6700 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6701 
6702 	native_mode = &amdgpu_encoder->native_mode;
6703 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
6704 		return;
6705 
6706 	drm_connector_set_panel_orientation_with_quirk(connector,
6707 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
6708 						       native_mode->hdisplay,
6709 						       native_mode->vdisplay);
6710 }
6711 
6712 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6713 					      struct edid *edid)
6714 {
6715 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6716 			to_amdgpu_dm_connector(connector);
6717 
6718 	if (edid) {
6719 		/* empty probed_modes */
6720 		INIT_LIST_HEAD(&connector->probed_modes);
6721 		amdgpu_dm_connector->num_modes =
6722 				drm_add_edid_modes(connector, edid);
6723 
6724 		/* sorting the probed modes before calling function
6725 		 * amdgpu_dm_get_native_mode() since EDID can have
6726 		 * more than one preferred mode. The modes that are
6727 		 * later in the probed mode list could be of higher
6728 		 * and preferred resolution. For example, 3840x2160
6729 		 * resolution in base EDID preferred timing and 4096x2160
6730 		 * preferred resolution in DID extension block later.
6731 		 */
6732 		drm_mode_sort(&connector->probed_modes);
6733 		amdgpu_dm_get_native_mode(connector);
6734 
6735 		/* Freesync capabilities are reset by calling
6736 		 * drm_add_edid_modes() and need to be
6737 		 * restored here.
6738 		 */
6739 		amdgpu_dm_update_freesync_caps(connector, edid);
6740 	} else {
6741 		amdgpu_dm_connector->num_modes = 0;
6742 	}
6743 }
6744 
6745 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
6746 			      struct drm_display_mode *mode)
6747 {
6748 	struct drm_display_mode *m;
6749 
6750 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
6751 		if (drm_mode_equal(m, mode))
6752 			return true;
6753 	}
6754 
6755 	return false;
6756 }
6757 
6758 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
6759 {
6760 	const struct drm_display_mode *m;
6761 	struct drm_display_mode *new_mode;
6762 	uint i;
6763 	uint32_t new_modes_count = 0;
6764 
6765 	/* Standard FPS values
6766 	 *
6767 	 * 23.976       - TV/NTSC
6768 	 * 24 	        - Cinema
6769 	 * 25 	        - TV/PAL
6770 	 * 29.97        - TV/NTSC
6771 	 * 30 	        - TV/NTSC
6772 	 * 48 	        - Cinema HFR
6773 	 * 50 	        - TV/PAL
6774 	 * 60 	        - Commonly used
6775 	 * 48,72,96,120 - Multiples of 24
6776 	 */
6777 	static const uint32_t common_rates[] = {
6778 		23976, 24000, 25000, 29970, 30000,
6779 		48000, 50000, 60000, 72000, 96000, 120000
6780 	};
6781 
6782 	/*
6783 	 * Find mode with highest refresh rate with the same resolution
6784 	 * as the preferred mode. Some monitors report a preferred mode
6785 	 * with lower resolution than the highest refresh rate supported.
6786 	 */
6787 
6788 	m = get_highest_refresh_rate_mode(aconnector, true);
6789 	if (!m)
6790 		return 0;
6791 
6792 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
6793 		uint64_t target_vtotal, target_vtotal_diff;
6794 		uint64_t num, den;
6795 
6796 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
6797 			continue;
6798 
6799 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
6800 		    common_rates[i] > aconnector->max_vfreq * 1000)
6801 			continue;
6802 
6803 		num = (unsigned long long)m->clock * 1000 * 1000;
6804 		den = common_rates[i] * (unsigned long long)m->htotal;
6805 		target_vtotal = div_u64(num, den);
6806 		target_vtotal_diff = target_vtotal - m->vtotal;
6807 
6808 		/* Check for illegal modes */
6809 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
6810 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
6811 		    m->vtotal + target_vtotal_diff < m->vsync_end)
6812 			continue;
6813 
6814 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
6815 		if (!new_mode)
6816 			goto out;
6817 
6818 		new_mode->vtotal += (u16)target_vtotal_diff;
6819 		new_mode->vsync_start += (u16)target_vtotal_diff;
6820 		new_mode->vsync_end += (u16)target_vtotal_diff;
6821 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6822 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
6823 
6824 		if (!is_duplicate_mode(aconnector, new_mode)) {
6825 			drm_mode_probed_add(&aconnector->base, new_mode);
6826 			new_modes_count += 1;
6827 		} else
6828 			drm_mode_destroy(aconnector->base.dev, new_mode);
6829 	}
6830  out:
6831 	return new_modes_count;
6832 }
6833 
6834 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
6835 						   struct edid *edid)
6836 {
6837 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6838 		to_amdgpu_dm_connector(connector);
6839 
6840 	if (!edid)
6841 		return;
6842 
6843 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
6844 		amdgpu_dm_connector->num_modes +=
6845 			add_fs_modes(amdgpu_dm_connector);
6846 }
6847 
6848 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6849 {
6850 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6851 			to_amdgpu_dm_connector(connector);
6852 	struct drm_encoder *encoder;
6853 	struct edid *edid = amdgpu_dm_connector->edid;
6854 
6855 	encoder = amdgpu_dm_connector_to_encoder(connector);
6856 
6857 	if (!drm_edid_is_valid(edid)) {
6858 		amdgpu_dm_connector->num_modes =
6859 				drm_add_modes_noedid(connector, 640, 480);
6860 	} else {
6861 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6862 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6863 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
6864 	}
6865 	amdgpu_dm_fbc_init(connector);
6866 
6867 	return amdgpu_dm_connector->num_modes;
6868 }
6869 
6870 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6871 				     struct amdgpu_dm_connector *aconnector,
6872 				     int connector_type,
6873 				     struct dc_link *link,
6874 				     int link_index)
6875 {
6876 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6877 
6878 	/*
6879 	 * Some of the properties below require access to state, like bpc.
6880 	 * Allocate some default initial connector state with our reset helper.
6881 	 */
6882 	if (aconnector->base.funcs->reset)
6883 		aconnector->base.funcs->reset(&aconnector->base);
6884 
6885 	aconnector->connector_id = link_index;
6886 	aconnector->dc_link = link;
6887 	aconnector->base.interlace_allowed = false;
6888 	aconnector->base.doublescan_allowed = false;
6889 	aconnector->base.stereo_allowed = false;
6890 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6891 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6892 	aconnector->audio_inst = -1;
6893 	mutex_init(&aconnector->hpd_lock);
6894 
6895 	/*
6896 	 * configure support HPD hot plug connector_>polled default value is 0
6897 	 * which means HPD hot plug not supported
6898 	 */
6899 	switch (connector_type) {
6900 	case DRM_MODE_CONNECTOR_HDMIA:
6901 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6902 		aconnector->base.ycbcr_420_allowed =
6903 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6904 		break;
6905 	case DRM_MODE_CONNECTOR_DisplayPort:
6906 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6907 		link->link_enc = link_enc_cfg_get_link_enc(link);
6908 		ASSERT(link->link_enc);
6909 		if (link->link_enc)
6910 			aconnector->base.ycbcr_420_allowed =
6911 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6912 		break;
6913 	case DRM_MODE_CONNECTOR_DVID:
6914 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6915 		break;
6916 	default:
6917 		break;
6918 	}
6919 
6920 	drm_object_attach_property(&aconnector->base.base,
6921 				dm->ddev->mode_config.scaling_mode_property,
6922 				DRM_MODE_SCALE_NONE);
6923 
6924 	drm_object_attach_property(&aconnector->base.base,
6925 				adev->mode_info.underscan_property,
6926 				UNDERSCAN_OFF);
6927 	drm_object_attach_property(&aconnector->base.base,
6928 				adev->mode_info.underscan_hborder_property,
6929 				0);
6930 	drm_object_attach_property(&aconnector->base.base,
6931 				adev->mode_info.underscan_vborder_property,
6932 				0);
6933 
6934 	if (!aconnector->mst_port)
6935 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6936 
6937 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6938 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6939 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6940 
6941 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6942 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6943 		drm_object_attach_property(&aconnector->base.base,
6944 				adev->mode_info.abm_level_property, 0);
6945 	}
6946 
6947 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6948 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6949 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6950 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
6951 
6952 		if (!aconnector->mst_port)
6953 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6954 
6955 #ifdef CONFIG_DRM_AMD_DC_HDCP
6956 		if (adev->dm.hdcp_workqueue)
6957 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6958 #endif
6959 	}
6960 }
6961 
6962 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6963 			      struct i2c_msg *msgs, int num)
6964 {
6965 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6966 	struct ddc_service *ddc_service = i2c->ddc_service;
6967 	struct i2c_command cmd;
6968 	int i;
6969 	int result = -EIO;
6970 
6971 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6972 
6973 	if (!cmd.payloads)
6974 		return result;
6975 
6976 	cmd.number_of_payloads = num;
6977 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6978 	cmd.speed = 100;
6979 
6980 	for (i = 0; i < num; i++) {
6981 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6982 		cmd.payloads[i].address = msgs[i].addr;
6983 		cmd.payloads[i].length = msgs[i].len;
6984 		cmd.payloads[i].data = msgs[i].buf;
6985 	}
6986 
6987 	if (dc_submit_i2c(
6988 			ddc_service->ctx->dc,
6989 			ddc_service->link->link_index,
6990 			&cmd))
6991 		result = num;
6992 
6993 	kfree(cmd.payloads);
6994 	return result;
6995 }
6996 
6997 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6998 {
6999 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7000 }
7001 
7002 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7003 	.master_xfer = amdgpu_dm_i2c_xfer,
7004 	.functionality = amdgpu_dm_i2c_func,
7005 };
7006 
7007 static struct amdgpu_i2c_adapter *
7008 create_i2c(struct ddc_service *ddc_service,
7009 	   int link_index,
7010 	   int *res)
7011 {
7012 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7013 	struct amdgpu_i2c_adapter *i2c;
7014 
7015 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7016 	if (!i2c)
7017 		return NULL;
7018 	i2c->base.owner = THIS_MODULE;
7019 	i2c->base.class = I2C_CLASS_DDC;
7020 	i2c->base.dev.parent = &adev->pdev->dev;
7021 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7022 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7023 	i2c_set_adapdata(&i2c->base, i2c);
7024 	i2c->ddc_service = ddc_service;
7025 
7026 	return i2c;
7027 }
7028 
7029 
7030 /*
7031  * Note: this function assumes that dc_link_detect() was called for the
7032  * dc_link which will be represented by this aconnector.
7033  */
7034 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7035 				    struct amdgpu_dm_connector *aconnector,
7036 				    uint32_t link_index,
7037 				    struct amdgpu_encoder *aencoder)
7038 {
7039 	int res = 0;
7040 	int connector_type;
7041 	struct dc *dc = dm->dc;
7042 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7043 	struct amdgpu_i2c_adapter *i2c;
7044 
7045 	link->priv = aconnector;
7046 
7047 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7048 
7049 	i2c = create_i2c(link->ddc, link->link_index, &res);
7050 	if (!i2c) {
7051 		DRM_ERROR("Failed to create i2c adapter data\n");
7052 		return -ENOMEM;
7053 	}
7054 
7055 	aconnector->i2c = i2c;
7056 	res = i2c_add_adapter(&i2c->base);
7057 
7058 	if (res) {
7059 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7060 		goto out_free;
7061 	}
7062 
7063 	connector_type = to_drm_connector_type(link->connector_signal);
7064 
7065 	res = drm_connector_init_with_ddc(
7066 			dm->ddev,
7067 			&aconnector->base,
7068 			&amdgpu_dm_connector_funcs,
7069 			connector_type,
7070 			&i2c->base);
7071 
7072 	if (res) {
7073 		DRM_ERROR("connector_init failed\n");
7074 		aconnector->connector_id = -1;
7075 		goto out_free;
7076 	}
7077 
7078 	drm_connector_helper_add(
7079 			&aconnector->base,
7080 			&amdgpu_dm_connector_helper_funcs);
7081 
7082 	amdgpu_dm_connector_init_helper(
7083 		dm,
7084 		aconnector,
7085 		connector_type,
7086 		link,
7087 		link_index);
7088 
7089 	drm_connector_attach_encoder(
7090 		&aconnector->base, &aencoder->base);
7091 
7092 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7093 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7094 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7095 
7096 out_free:
7097 	if (res) {
7098 		kfree(i2c);
7099 		aconnector->i2c = NULL;
7100 	}
7101 	return res;
7102 }
7103 
7104 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7105 {
7106 	switch (adev->mode_info.num_crtc) {
7107 	case 1:
7108 		return 0x1;
7109 	case 2:
7110 		return 0x3;
7111 	case 3:
7112 		return 0x7;
7113 	case 4:
7114 		return 0xf;
7115 	case 5:
7116 		return 0x1f;
7117 	case 6:
7118 	default:
7119 		return 0x3f;
7120 	}
7121 }
7122 
7123 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7124 				  struct amdgpu_encoder *aencoder,
7125 				  uint32_t link_index)
7126 {
7127 	struct amdgpu_device *adev = drm_to_adev(dev);
7128 
7129 	int res = drm_encoder_init(dev,
7130 				   &aencoder->base,
7131 				   &amdgpu_dm_encoder_funcs,
7132 				   DRM_MODE_ENCODER_TMDS,
7133 				   NULL);
7134 
7135 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7136 
7137 	if (!res)
7138 		aencoder->encoder_id = link_index;
7139 	else
7140 		aencoder->encoder_id = -1;
7141 
7142 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7143 
7144 	return res;
7145 }
7146 
7147 static void manage_dm_interrupts(struct amdgpu_device *adev,
7148 				 struct amdgpu_crtc *acrtc,
7149 				 bool enable)
7150 {
7151 	/*
7152 	 * We have no guarantee that the frontend index maps to the same
7153 	 * backend index - some even map to more than one.
7154 	 *
7155 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7156 	 */
7157 	int irq_type =
7158 		amdgpu_display_crtc_idx_to_irq_type(
7159 			adev,
7160 			acrtc->crtc_id);
7161 
7162 	if (enable) {
7163 		drm_crtc_vblank_on(&acrtc->base);
7164 		amdgpu_irq_get(
7165 			adev,
7166 			&adev->pageflip_irq,
7167 			irq_type);
7168 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7169 		amdgpu_irq_get(
7170 			adev,
7171 			&adev->vline0_irq,
7172 			irq_type);
7173 #endif
7174 	} else {
7175 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7176 		amdgpu_irq_put(
7177 			adev,
7178 			&adev->vline0_irq,
7179 			irq_type);
7180 #endif
7181 		amdgpu_irq_put(
7182 			adev,
7183 			&adev->pageflip_irq,
7184 			irq_type);
7185 		drm_crtc_vblank_off(&acrtc->base);
7186 	}
7187 }
7188 
7189 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7190 				      struct amdgpu_crtc *acrtc)
7191 {
7192 	int irq_type =
7193 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7194 
7195 	/**
7196 	 * This reads the current state for the IRQ and force reapplies
7197 	 * the setting to hardware.
7198 	 */
7199 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7200 }
7201 
7202 static bool
7203 is_scaling_state_different(const struct dm_connector_state *dm_state,
7204 			   const struct dm_connector_state *old_dm_state)
7205 {
7206 	if (dm_state->scaling != old_dm_state->scaling)
7207 		return true;
7208 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7209 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7210 			return true;
7211 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7212 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7213 			return true;
7214 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7215 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7216 		return true;
7217 	return false;
7218 }
7219 
7220 #ifdef CONFIG_DRM_AMD_DC_HDCP
7221 static bool is_content_protection_different(struct drm_connector_state *state,
7222 					    const struct drm_connector_state *old_state,
7223 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7224 {
7225 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7226 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7227 
7228 	/* Handle: Type0/1 change */
7229 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7230 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7231 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7232 		return true;
7233 	}
7234 
7235 	/* CP is being re enabled, ignore this
7236 	 *
7237 	 * Handles:	ENABLED -> DESIRED
7238 	 */
7239 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7240 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7241 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7242 		return false;
7243 	}
7244 
7245 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7246 	 *
7247 	 * Handles:	UNDESIRED -> ENABLED
7248 	 */
7249 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7250 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7251 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7252 
7253 	/* Stream removed and re-enabled
7254 	 *
7255 	 * Can sometimes overlap with the HPD case,
7256 	 * thus set update_hdcp to false to avoid
7257 	 * setting HDCP multiple times.
7258 	 *
7259 	 * Handles:	DESIRED -> DESIRED (Special case)
7260 	 */
7261 	if (!(old_state->crtc && old_state->crtc->enabled) &&
7262 		state->crtc && state->crtc->enabled &&
7263 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7264 		dm_con_state->update_hdcp = false;
7265 		return true;
7266 	}
7267 
7268 	/* Hot-plug, headless s3, dpms
7269 	 *
7270 	 * Only start HDCP if the display is connected/enabled.
7271 	 * update_hdcp flag will be set to false until the next
7272 	 * HPD comes in.
7273 	 *
7274 	 * Handles:	DESIRED -> DESIRED (Special case)
7275 	 */
7276 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7277 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7278 		dm_con_state->update_hdcp = false;
7279 		return true;
7280 	}
7281 
7282 	/*
7283 	 * Handles:	UNDESIRED -> UNDESIRED
7284 	 *		DESIRED -> DESIRED
7285 	 *		ENABLED -> ENABLED
7286 	 */
7287 	if (old_state->content_protection == state->content_protection)
7288 		return false;
7289 
7290 	/*
7291 	 * Handles:	UNDESIRED -> DESIRED
7292 	 *		DESIRED -> UNDESIRED
7293 	 *		ENABLED -> UNDESIRED
7294 	 */
7295 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7296 		return true;
7297 
7298 	/*
7299 	 * Handles:	DESIRED -> ENABLED
7300 	 */
7301 	return false;
7302 }
7303 
7304 #endif
7305 static void remove_stream(struct amdgpu_device *adev,
7306 			  struct amdgpu_crtc *acrtc,
7307 			  struct dc_stream_state *stream)
7308 {
7309 	/* this is the update mode case */
7310 
7311 	acrtc->otg_inst = -1;
7312 	acrtc->enabled = false;
7313 }
7314 
7315 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7316 {
7317 
7318 	assert_spin_locked(&acrtc->base.dev->event_lock);
7319 	WARN_ON(acrtc->event);
7320 
7321 	acrtc->event = acrtc->base.state->event;
7322 
7323 	/* Set the flip status */
7324 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7325 
7326 	/* Mark this event as consumed */
7327 	acrtc->base.state->event = NULL;
7328 
7329 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7330 		     acrtc->crtc_id);
7331 }
7332 
7333 static void update_freesync_state_on_stream(
7334 	struct amdgpu_display_manager *dm,
7335 	struct dm_crtc_state *new_crtc_state,
7336 	struct dc_stream_state *new_stream,
7337 	struct dc_plane_state *surface,
7338 	u32 flip_timestamp_in_us)
7339 {
7340 	struct mod_vrr_params vrr_params;
7341 	struct dc_info_packet vrr_infopacket = {0};
7342 	struct amdgpu_device *adev = dm->adev;
7343 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7344 	unsigned long flags;
7345 	bool pack_sdp_v1_3 = false;
7346 
7347 	if (!new_stream)
7348 		return;
7349 
7350 	/*
7351 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7352 	 * For now it's sufficient to just guard against these conditions.
7353 	 */
7354 
7355 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7356 		return;
7357 
7358 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7359         vrr_params = acrtc->dm_irq_params.vrr_params;
7360 
7361 	if (surface) {
7362 		mod_freesync_handle_preflip(
7363 			dm->freesync_module,
7364 			surface,
7365 			new_stream,
7366 			flip_timestamp_in_us,
7367 			&vrr_params);
7368 
7369 		if (adev->family < AMDGPU_FAMILY_AI &&
7370 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7371 			mod_freesync_handle_v_update(dm->freesync_module,
7372 						     new_stream, &vrr_params);
7373 
7374 			/* Need to call this before the frame ends. */
7375 			dc_stream_adjust_vmin_vmax(dm->dc,
7376 						   new_crtc_state->stream,
7377 						   &vrr_params.adjust);
7378 		}
7379 	}
7380 
7381 	mod_freesync_build_vrr_infopacket(
7382 		dm->freesync_module,
7383 		new_stream,
7384 		&vrr_params,
7385 		PACKET_TYPE_VRR,
7386 		TRANSFER_FUNC_UNKNOWN,
7387 		&vrr_infopacket,
7388 		pack_sdp_v1_3);
7389 
7390 	new_crtc_state->freesync_timing_changed |=
7391 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7392 			&vrr_params.adjust,
7393 			sizeof(vrr_params.adjust)) != 0);
7394 
7395 	new_crtc_state->freesync_vrr_info_changed |=
7396 		(memcmp(&new_crtc_state->vrr_infopacket,
7397 			&vrr_infopacket,
7398 			sizeof(vrr_infopacket)) != 0);
7399 
7400 	acrtc->dm_irq_params.vrr_params = vrr_params;
7401 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7402 
7403 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7404 	new_stream->vrr_infopacket = vrr_infopacket;
7405 
7406 	if (new_crtc_state->freesync_vrr_info_changed)
7407 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7408 			      new_crtc_state->base.crtc->base.id,
7409 			      (int)new_crtc_state->base.vrr_enabled,
7410 			      (int)vrr_params.state);
7411 
7412 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7413 }
7414 
7415 static void update_stream_irq_parameters(
7416 	struct amdgpu_display_manager *dm,
7417 	struct dm_crtc_state *new_crtc_state)
7418 {
7419 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7420 	struct mod_vrr_params vrr_params;
7421 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7422 	struct amdgpu_device *adev = dm->adev;
7423 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7424 	unsigned long flags;
7425 
7426 	if (!new_stream)
7427 		return;
7428 
7429 	/*
7430 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7431 	 * For now it's sufficient to just guard against these conditions.
7432 	 */
7433 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7434 		return;
7435 
7436 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7437 	vrr_params = acrtc->dm_irq_params.vrr_params;
7438 
7439 	if (new_crtc_state->vrr_supported &&
7440 	    config.min_refresh_in_uhz &&
7441 	    config.max_refresh_in_uhz) {
7442 		/*
7443 		 * if freesync compatible mode was set, config.state will be set
7444 		 * in atomic check
7445 		 */
7446 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7447 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7448 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7449 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7450 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7451 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7452 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7453 		} else {
7454 			config.state = new_crtc_state->base.vrr_enabled ?
7455 						     VRR_STATE_ACTIVE_VARIABLE :
7456 						     VRR_STATE_INACTIVE;
7457 		}
7458 	} else {
7459 		config.state = VRR_STATE_UNSUPPORTED;
7460 	}
7461 
7462 	mod_freesync_build_vrr_params(dm->freesync_module,
7463 				      new_stream,
7464 				      &config, &vrr_params);
7465 
7466 	new_crtc_state->freesync_timing_changed |=
7467 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7468 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7469 
7470 	new_crtc_state->freesync_config = config;
7471 	/* Copy state for access from DM IRQ handler */
7472 	acrtc->dm_irq_params.freesync_config = config;
7473 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7474 	acrtc->dm_irq_params.vrr_params = vrr_params;
7475 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7476 }
7477 
7478 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7479 					    struct dm_crtc_state *new_state)
7480 {
7481 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7482 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7483 
7484 	if (!old_vrr_active && new_vrr_active) {
7485 		/* Transition VRR inactive -> active:
7486 		 * While VRR is active, we must not disable vblank irq, as a
7487 		 * reenable after disable would compute bogus vblank/pflip
7488 		 * timestamps if it likely happened inside display front-porch.
7489 		 *
7490 		 * We also need vupdate irq for the actual core vblank handling
7491 		 * at end of vblank.
7492 		 */
7493 		dm_set_vupdate_irq(new_state->base.crtc, true);
7494 		drm_crtc_vblank_get(new_state->base.crtc);
7495 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7496 				 __func__, new_state->base.crtc->base.id);
7497 	} else if (old_vrr_active && !new_vrr_active) {
7498 		/* Transition VRR active -> inactive:
7499 		 * Allow vblank irq disable again for fixed refresh rate.
7500 		 */
7501 		dm_set_vupdate_irq(new_state->base.crtc, false);
7502 		drm_crtc_vblank_put(new_state->base.crtc);
7503 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7504 				 __func__, new_state->base.crtc->base.id);
7505 	}
7506 }
7507 
7508 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7509 {
7510 	struct drm_plane *plane;
7511 	struct drm_plane_state *old_plane_state;
7512 	int i;
7513 
7514 	/*
7515 	 * TODO: Make this per-stream so we don't issue redundant updates for
7516 	 * commits with multiple streams.
7517 	 */
7518 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
7519 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7520 			handle_cursor_update(plane, old_plane_state);
7521 }
7522 
7523 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7524 				    struct dc_state *dc_state,
7525 				    struct drm_device *dev,
7526 				    struct amdgpu_display_manager *dm,
7527 				    struct drm_crtc *pcrtc,
7528 				    bool wait_for_vblank)
7529 {
7530 	uint32_t i;
7531 	uint64_t timestamp_ns;
7532 	struct drm_plane *plane;
7533 	struct drm_plane_state *old_plane_state, *new_plane_state;
7534 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7535 	struct drm_crtc_state *new_pcrtc_state =
7536 			drm_atomic_get_new_crtc_state(state, pcrtc);
7537 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7538 	struct dm_crtc_state *dm_old_crtc_state =
7539 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7540 	int planes_count = 0, vpos, hpos;
7541 	unsigned long flags;
7542 	uint32_t target_vblank, last_flip_vblank;
7543 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7544 	bool cursor_update = false;
7545 	bool pflip_present = false;
7546 	struct {
7547 		struct dc_surface_update surface_updates[MAX_SURFACES];
7548 		struct dc_plane_info plane_infos[MAX_SURFACES];
7549 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7550 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7551 		struct dc_stream_update stream_update;
7552 	} *bundle;
7553 
7554 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7555 
7556 	if (!bundle) {
7557 		dm_error("Failed to allocate update bundle\n");
7558 		goto cleanup;
7559 	}
7560 
7561 	/*
7562 	 * Disable the cursor first if we're disabling all the planes.
7563 	 * It'll remain on the screen after the planes are re-enabled
7564 	 * if we don't.
7565 	 */
7566 	if (acrtc_state->active_planes == 0)
7567 		amdgpu_dm_commit_cursors(state);
7568 
7569 	/* update planes when needed */
7570 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7571 		struct drm_crtc *crtc = new_plane_state->crtc;
7572 		struct drm_crtc_state *new_crtc_state;
7573 		struct drm_framebuffer *fb = new_plane_state->fb;
7574 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7575 		bool plane_needs_flip;
7576 		struct dc_plane_state *dc_plane;
7577 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7578 
7579 		/* Cursor plane is handled after stream updates */
7580 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7581 			if ((fb && crtc == pcrtc) ||
7582 			    (old_plane_state->fb && old_plane_state->crtc == pcrtc))
7583 				cursor_update = true;
7584 
7585 			continue;
7586 		}
7587 
7588 		if (!fb || !crtc || pcrtc != crtc)
7589 			continue;
7590 
7591 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7592 		if (!new_crtc_state->active)
7593 			continue;
7594 
7595 		dc_plane = dm_new_plane_state->dc_state;
7596 
7597 		bundle->surface_updates[planes_count].surface = dc_plane;
7598 		if (new_pcrtc_state->color_mgmt_changed) {
7599 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7600 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7601 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7602 		}
7603 
7604 		fill_dc_scaling_info(dm->adev, new_plane_state,
7605 				     &bundle->scaling_infos[planes_count]);
7606 
7607 		bundle->surface_updates[planes_count].scaling_info =
7608 			&bundle->scaling_infos[planes_count];
7609 
7610 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7611 
7612 		pflip_present = pflip_present || plane_needs_flip;
7613 
7614 		if (!plane_needs_flip) {
7615 			planes_count += 1;
7616 			continue;
7617 		}
7618 
7619 		fill_dc_plane_info_and_addr(
7620 			dm->adev, new_plane_state,
7621 			afb->tiling_flags,
7622 			&bundle->plane_infos[planes_count],
7623 			&bundle->flip_addrs[planes_count].address,
7624 			afb->tmz_surface, false);
7625 
7626 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
7627 				 new_plane_state->plane->index,
7628 				 bundle->plane_infos[planes_count].dcc.enable);
7629 
7630 		bundle->surface_updates[planes_count].plane_info =
7631 			&bundle->plane_infos[planes_count];
7632 
7633 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
7634 				    new_crtc_state,
7635 				    &bundle->flip_addrs[planes_count]);
7636 
7637 		/*
7638 		 * Only allow immediate flips for fast updates that don't
7639 		 * change FB pitch, DCC state, rotation or mirroing.
7640 		 */
7641 		bundle->flip_addrs[planes_count].flip_immediate =
7642 			crtc->state->async_flip &&
7643 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7644 
7645 		timestamp_ns = ktime_get_ns();
7646 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7647 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7648 		bundle->surface_updates[planes_count].surface = dc_plane;
7649 
7650 		if (!bundle->surface_updates[planes_count].surface) {
7651 			DRM_ERROR("No surface for CRTC: id=%d\n",
7652 					acrtc_attach->crtc_id);
7653 			continue;
7654 		}
7655 
7656 		if (plane == pcrtc->primary)
7657 			update_freesync_state_on_stream(
7658 				dm,
7659 				acrtc_state,
7660 				acrtc_state->stream,
7661 				dc_plane,
7662 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7663 
7664 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
7665 				 __func__,
7666 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7667 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7668 
7669 		planes_count += 1;
7670 
7671 	}
7672 
7673 	if (pflip_present) {
7674 		if (!vrr_active) {
7675 			/* Use old throttling in non-vrr fixed refresh rate mode
7676 			 * to keep flip scheduling based on target vblank counts
7677 			 * working in a backwards compatible way, e.g., for
7678 			 * clients using the GLX_OML_sync_control extension or
7679 			 * DRI3/Present extension with defined target_msc.
7680 			 */
7681 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7682 		}
7683 		else {
7684 			/* For variable refresh rate mode only:
7685 			 * Get vblank of last completed flip to avoid > 1 vrr
7686 			 * flips per video frame by use of throttling, but allow
7687 			 * flip programming anywhere in the possibly large
7688 			 * variable vrr vblank interval for fine-grained flip
7689 			 * timing control and more opportunity to avoid stutter
7690 			 * on late submission of flips.
7691 			 */
7692 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7693 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7694 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7695 		}
7696 
7697 		target_vblank = last_flip_vblank + wait_for_vblank;
7698 
7699 		/*
7700 		 * Wait until we're out of the vertical blank period before the one
7701 		 * targeted by the flip
7702 		 */
7703 		while ((acrtc_attach->enabled &&
7704 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7705 							    0, &vpos, &hpos, NULL,
7706 							    NULL, &pcrtc->hwmode)
7707 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7708 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7709 			(int)(target_vblank -
7710 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7711 			usleep_range(1000, 1100);
7712 		}
7713 
7714 		/**
7715 		 * Prepare the flip event for the pageflip interrupt to handle.
7716 		 *
7717 		 * This only works in the case where we've already turned on the
7718 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7719 		 * from 0 -> n planes we have to skip a hardware generated event
7720 		 * and rely on sending it from software.
7721 		 */
7722 		if (acrtc_attach->base.state->event &&
7723 		    acrtc_state->active_planes > 0) {
7724 			drm_crtc_vblank_get(pcrtc);
7725 
7726 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7727 
7728 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7729 			prepare_flip_isr(acrtc_attach);
7730 
7731 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7732 		}
7733 
7734 		if (acrtc_state->stream) {
7735 			if (acrtc_state->freesync_vrr_info_changed)
7736 				bundle->stream_update.vrr_infopacket =
7737 					&acrtc_state->stream->vrr_infopacket;
7738 		}
7739 	} else if (cursor_update && acrtc_state->active_planes > 0 &&
7740 		   acrtc_attach->base.state->event) {
7741 		drm_crtc_vblank_get(pcrtc);
7742 
7743 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7744 
7745 		acrtc_attach->event = acrtc_attach->base.state->event;
7746 		acrtc_attach->base.state->event = NULL;
7747 
7748 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7749 	}
7750 
7751 	/* Update the planes if changed or disable if we don't have any. */
7752 	if ((planes_count || acrtc_state->active_planes == 0) &&
7753 		acrtc_state->stream) {
7754 		/*
7755 		 * If PSR or idle optimizations are enabled then flush out
7756 		 * any pending work before hardware programming.
7757 		 */
7758 		if (dm->vblank_control_workqueue)
7759 			flush_workqueue(dm->vblank_control_workqueue);
7760 
7761 		bundle->stream_update.stream = acrtc_state->stream;
7762 		if (new_pcrtc_state->mode_changed) {
7763 			bundle->stream_update.src = acrtc_state->stream->src;
7764 			bundle->stream_update.dst = acrtc_state->stream->dst;
7765 		}
7766 
7767 		if (new_pcrtc_state->color_mgmt_changed) {
7768 			/*
7769 			 * TODO: This isn't fully correct since we've actually
7770 			 * already modified the stream in place.
7771 			 */
7772 			bundle->stream_update.gamut_remap =
7773 				&acrtc_state->stream->gamut_remap_matrix;
7774 			bundle->stream_update.output_csc_transform =
7775 				&acrtc_state->stream->csc_color_matrix;
7776 			bundle->stream_update.out_transfer_func =
7777 				acrtc_state->stream->out_transfer_func;
7778 		}
7779 
7780 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7781 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7782 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7783 
7784 		/*
7785 		 * If FreeSync state on the stream has changed then we need to
7786 		 * re-adjust the min/max bounds now that DC doesn't handle this
7787 		 * as part of commit.
7788 		 */
7789 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
7790 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7791 			dc_stream_adjust_vmin_vmax(
7792 				dm->dc, acrtc_state->stream,
7793 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7794 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7795 		}
7796 		mutex_lock(&dm->dc_lock);
7797 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7798 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7799 			amdgpu_dm_psr_disable(acrtc_state->stream);
7800 
7801 		dc_commit_updates_for_stream(dm->dc,
7802 						     bundle->surface_updates,
7803 						     planes_count,
7804 						     acrtc_state->stream,
7805 						     &bundle->stream_update,
7806 						     dc_state);
7807 
7808 		/**
7809 		 * Enable or disable the interrupts on the backend.
7810 		 *
7811 		 * Most pipes are put into power gating when unused.
7812 		 *
7813 		 * When power gating is enabled on a pipe we lose the
7814 		 * interrupt enablement state when power gating is disabled.
7815 		 *
7816 		 * So we need to update the IRQ control state in hardware
7817 		 * whenever the pipe turns on (since it could be previously
7818 		 * power gated) or off (since some pipes can't be power gated
7819 		 * on some ASICs).
7820 		 */
7821 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7822 			dm_update_pflip_irq_state(drm_to_adev(dev),
7823 						  acrtc_attach);
7824 
7825 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7826 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7827 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7828 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7829 
7830 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
7831 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
7832 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
7833 			struct amdgpu_dm_connector *aconn =
7834 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
7835 
7836 			if (aconn->psr_skip_count > 0)
7837 				aconn->psr_skip_count--;
7838 
7839 			/* Allow PSR when skip count is 0. */
7840 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
7841 
7842 			/*
7843 			 * If sink supports PSR SU, there is no need to rely on
7844 			 * a vblank event disable request to enable PSR. PSR SU
7845 			 * can be enabled immediately once OS demonstrates an
7846 			 * adequate number of fast atomic commits to notify KMD
7847 			 * of update events. See `vblank_control_worker()`.
7848 			 */
7849 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
7850 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
7851 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
7852 				amdgpu_dm_psr_enable(acrtc_state->stream);
7853 		} else {
7854 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
7855 		}
7856 
7857 		mutex_unlock(&dm->dc_lock);
7858 	}
7859 
7860 	/*
7861 	 * Update cursor state *after* programming all the planes.
7862 	 * This avoids redundant programming in the case where we're going
7863 	 * to be disabling a single plane - those pipes are being disabled.
7864 	 */
7865 	if (acrtc_state->active_planes)
7866 		amdgpu_dm_commit_cursors(state);
7867 
7868 cleanup:
7869 	kfree(bundle);
7870 }
7871 
7872 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7873 				   struct drm_atomic_state *state)
7874 {
7875 	struct amdgpu_device *adev = drm_to_adev(dev);
7876 	struct amdgpu_dm_connector *aconnector;
7877 	struct drm_connector *connector;
7878 	struct drm_connector_state *old_con_state, *new_con_state;
7879 	struct drm_crtc_state *new_crtc_state;
7880 	struct dm_crtc_state *new_dm_crtc_state;
7881 	const struct dc_stream_status *status;
7882 	int i, inst;
7883 
7884 	/* Notify device removals. */
7885 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7886 		if (old_con_state->crtc != new_con_state->crtc) {
7887 			/* CRTC changes require notification. */
7888 			goto notify;
7889 		}
7890 
7891 		if (!new_con_state->crtc)
7892 			continue;
7893 
7894 		new_crtc_state = drm_atomic_get_new_crtc_state(
7895 			state, new_con_state->crtc);
7896 
7897 		if (!new_crtc_state)
7898 			continue;
7899 
7900 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7901 			continue;
7902 
7903 	notify:
7904 		aconnector = to_amdgpu_dm_connector(connector);
7905 
7906 		mutex_lock(&adev->dm.audio_lock);
7907 		inst = aconnector->audio_inst;
7908 		aconnector->audio_inst = -1;
7909 		mutex_unlock(&adev->dm.audio_lock);
7910 
7911 		amdgpu_dm_audio_eld_notify(adev, inst);
7912 	}
7913 
7914 	/* Notify audio device additions. */
7915 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7916 		if (!new_con_state->crtc)
7917 			continue;
7918 
7919 		new_crtc_state = drm_atomic_get_new_crtc_state(
7920 			state, new_con_state->crtc);
7921 
7922 		if (!new_crtc_state)
7923 			continue;
7924 
7925 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7926 			continue;
7927 
7928 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7929 		if (!new_dm_crtc_state->stream)
7930 			continue;
7931 
7932 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7933 		if (!status)
7934 			continue;
7935 
7936 		aconnector = to_amdgpu_dm_connector(connector);
7937 
7938 		mutex_lock(&adev->dm.audio_lock);
7939 		inst = status->audio_inst;
7940 		aconnector->audio_inst = inst;
7941 		mutex_unlock(&adev->dm.audio_lock);
7942 
7943 		amdgpu_dm_audio_eld_notify(adev, inst);
7944 	}
7945 }
7946 
7947 /*
7948  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7949  * @crtc_state: the DRM CRTC state
7950  * @stream_state: the DC stream state.
7951  *
7952  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7953  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7954  */
7955 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7956 						struct dc_stream_state *stream_state)
7957 {
7958 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7959 }
7960 
7961 /**
7962  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7963  * @state: The atomic state to commit
7964  *
7965  * This will tell DC to commit the constructed DC state from atomic_check,
7966  * programming the hardware. Any failures here implies a hardware failure, since
7967  * atomic check should have filtered anything non-kosher.
7968  */
7969 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7970 {
7971 	struct drm_device *dev = state->dev;
7972 	struct amdgpu_device *adev = drm_to_adev(dev);
7973 	struct amdgpu_display_manager *dm = &adev->dm;
7974 	struct dm_atomic_state *dm_state;
7975 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7976 	uint32_t i, j;
7977 	struct drm_crtc *crtc;
7978 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7979 	unsigned long flags;
7980 	bool wait_for_vblank = true;
7981 	struct drm_connector *connector;
7982 	struct drm_connector_state *old_con_state, *new_con_state;
7983 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7984 	int crtc_disable_count = 0;
7985 	bool mode_set_reset_required = false;
7986 	int r;
7987 
7988 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
7989 
7990 	r = drm_atomic_helper_wait_for_fences(dev, state, false);
7991 	if (unlikely(r))
7992 		DRM_ERROR("Waiting for fences timed out!");
7993 
7994 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7995 
7996 	dm_state = dm_atomic_get_new_state(state);
7997 	if (dm_state && dm_state->context) {
7998 		dc_state = dm_state->context;
7999 	} else {
8000 		/* No state changes, retain current state. */
8001 		dc_state_temp = dc_create_state(dm->dc);
8002 		ASSERT(dc_state_temp);
8003 		dc_state = dc_state_temp;
8004 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8005 	}
8006 
8007 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8008 				       new_crtc_state, i) {
8009 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8010 
8011 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8012 
8013 		if (old_crtc_state->active &&
8014 		    (!new_crtc_state->active ||
8015 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8016 			manage_dm_interrupts(adev, acrtc, false);
8017 			dc_stream_release(dm_old_crtc_state->stream);
8018 		}
8019 	}
8020 
8021 	drm_atomic_helper_calc_timestamping_constants(state);
8022 
8023 	/* update changed items */
8024 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8025 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8026 
8027 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8028 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8029 
8030 		drm_dbg_state(state->dev,
8031 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8032 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8033 			"connectors_changed:%d\n",
8034 			acrtc->crtc_id,
8035 			new_crtc_state->enable,
8036 			new_crtc_state->active,
8037 			new_crtc_state->planes_changed,
8038 			new_crtc_state->mode_changed,
8039 			new_crtc_state->active_changed,
8040 			new_crtc_state->connectors_changed);
8041 
8042 		/* Disable cursor if disabling crtc */
8043 		if (old_crtc_state->active && !new_crtc_state->active) {
8044 			struct dc_cursor_position position;
8045 
8046 			memset(&position, 0, sizeof(position));
8047 			mutex_lock(&dm->dc_lock);
8048 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8049 			mutex_unlock(&dm->dc_lock);
8050 		}
8051 
8052 		/* Copy all transient state flags into dc state */
8053 		if (dm_new_crtc_state->stream) {
8054 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8055 							    dm_new_crtc_state->stream);
8056 		}
8057 
8058 		/* handles headless hotplug case, updating new_state and
8059 		 * aconnector as needed
8060 		 */
8061 
8062 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8063 
8064 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8065 
8066 			if (!dm_new_crtc_state->stream) {
8067 				/*
8068 				 * this could happen because of issues with
8069 				 * userspace notifications delivery.
8070 				 * In this case userspace tries to set mode on
8071 				 * display which is disconnected in fact.
8072 				 * dc_sink is NULL in this case on aconnector.
8073 				 * We expect reset mode will come soon.
8074 				 *
8075 				 * This can also happen when unplug is done
8076 				 * during resume sequence ended
8077 				 *
8078 				 * In this case, we want to pretend we still
8079 				 * have a sink to keep the pipe running so that
8080 				 * hw state is consistent with the sw state
8081 				 */
8082 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8083 						__func__, acrtc->base.base.id);
8084 				continue;
8085 			}
8086 
8087 			if (dm_old_crtc_state->stream)
8088 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8089 
8090 			pm_runtime_get_noresume(dev->dev);
8091 
8092 			acrtc->enabled = true;
8093 			acrtc->hw_mode = new_crtc_state->mode;
8094 			crtc->hwmode = new_crtc_state->mode;
8095 			mode_set_reset_required = true;
8096 		} else if (modereset_required(new_crtc_state)) {
8097 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8098 			/* i.e. reset mode */
8099 			if (dm_old_crtc_state->stream)
8100 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8101 
8102 			mode_set_reset_required = true;
8103 		}
8104 	} /* for_each_crtc_in_state() */
8105 
8106 	if (dc_state) {
8107 		/* if there mode set or reset, disable eDP PSR */
8108 		if (mode_set_reset_required) {
8109 			if (dm->vblank_control_workqueue)
8110 				flush_workqueue(dm->vblank_control_workqueue);
8111 
8112 			amdgpu_dm_psr_disable_all(dm);
8113 		}
8114 
8115 		dm_enable_per_frame_crtc_master_sync(dc_state);
8116 		mutex_lock(&dm->dc_lock);
8117 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8118 
8119 		/* Allow idle optimization when vblank count is 0 for display off */
8120 		if (dm->active_vblank_irq_count == 0)
8121 			dc_allow_idle_optimizations(dm->dc, true);
8122 		mutex_unlock(&dm->dc_lock);
8123 	}
8124 
8125 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8126 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8127 
8128 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8129 
8130 		if (dm_new_crtc_state->stream != NULL) {
8131 			const struct dc_stream_status *status =
8132 					dc_stream_get_status(dm_new_crtc_state->stream);
8133 
8134 			if (!status)
8135 				status = dc_stream_get_status_from_state(dc_state,
8136 									 dm_new_crtc_state->stream);
8137 			if (!status)
8138 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8139 			else
8140 				acrtc->otg_inst = status->primary_otg_inst;
8141 		}
8142 	}
8143 #ifdef CONFIG_DRM_AMD_DC_HDCP
8144 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8145 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8146 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8147 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8148 
8149 		new_crtc_state = NULL;
8150 
8151 		if (acrtc)
8152 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8153 
8154 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8155 
8156 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8157 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8158 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8159 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8160 			dm_new_con_state->update_hdcp = true;
8161 			continue;
8162 		}
8163 
8164 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8165 			hdcp_update_display(
8166 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8167 				new_con_state->hdcp_content_type,
8168 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8169 	}
8170 #endif
8171 
8172 	/* Handle connector state changes */
8173 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8174 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8175 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8176 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8177 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8178 		struct dc_stream_update stream_update;
8179 		struct dc_info_packet hdr_packet;
8180 		struct dc_stream_status *status = NULL;
8181 		bool abm_changed, hdr_changed, scaling_changed;
8182 
8183 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8184 		memset(&stream_update, 0, sizeof(stream_update));
8185 
8186 		if (acrtc) {
8187 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8188 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8189 		}
8190 
8191 		/* Skip any modesets/resets */
8192 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8193 			continue;
8194 
8195 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8196 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8197 
8198 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8199 							     dm_old_con_state);
8200 
8201 		abm_changed = dm_new_crtc_state->abm_level !=
8202 			      dm_old_crtc_state->abm_level;
8203 
8204 		hdr_changed =
8205 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8206 
8207 		if (!scaling_changed && !abm_changed && !hdr_changed)
8208 			continue;
8209 
8210 		stream_update.stream = dm_new_crtc_state->stream;
8211 		if (scaling_changed) {
8212 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8213 					dm_new_con_state, dm_new_crtc_state->stream);
8214 
8215 			stream_update.src = dm_new_crtc_state->stream->src;
8216 			stream_update.dst = dm_new_crtc_state->stream->dst;
8217 		}
8218 
8219 		if (abm_changed) {
8220 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8221 
8222 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8223 		}
8224 
8225 		if (hdr_changed) {
8226 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8227 			stream_update.hdr_static_metadata = &hdr_packet;
8228 		}
8229 
8230 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8231 
8232 		if (WARN_ON(!status))
8233 			continue;
8234 
8235 		WARN_ON(!status->plane_count);
8236 
8237 		/*
8238 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8239 		 * Here we create an empty update on each plane.
8240 		 * To fix this, DC should permit updating only stream properties.
8241 		 */
8242 		for (j = 0; j < status->plane_count; j++)
8243 			dummy_updates[j].surface = status->plane_states[0];
8244 
8245 
8246 		mutex_lock(&dm->dc_lock);
8247 		dc_commit_updates_for_stream(dm->dc,
8248 						     dummy_updates,
8249 						     status->plane_count,
8250 						     dm_new_crtc_state->stream,
8251 						     &stream_update,
8252 						     dc_state);
8253 		mutex_unlock(&dm->dc_lock);
8254 	}
8255 
8256 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8257 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8258 				      new_crtc_state, i) {
8259 		if (old_crtc_state->active && !new_crtc_state->active)
8260 			crtc_disable_count++;
8261 
8262 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8263 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8264 
8265 		/* For freesync config update on crtc state and params for irq */
8266 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8267 
8268 		/* Handle vrr on->off / off->on transitions */
8269 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8270 						dm_new_crtc_state);
8271 	}
8272 
8273 	/**
8274 	 * Enable interrupts for CRTCs that are newly enabled or went through
8275 	 * a modeset. It was intentionally deferred until after the front end
8276 	 * state was modified to wait until the OTG was on and so the IRQ
8277 	 * handlers didn't access stale or invalid state.
8278 	 */
8279 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8280 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8281 #ifdef CONFIG_DEBUG_FS
8282 		bool configure_crc = false;
8283 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8284 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8285 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
8286 #endif
8287 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8288 		cur_crc_src = acrtc->dm_irq_params.crc_src;
8289 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8290 #endif
8291 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8292 
8293 		if (new_crtc_state->active &&
8294 		    (!old_crtc_state->active ||
8295 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8296 			dc_stream_retain(dm_new_crtc_state->stream);
8297 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8298 			manage_dm_interrupts(adev, acrtc, true);
8299 
8300 #ifdef CONFIG_DEBUG_FS
8301 			/**
8302 			 * Frontend may have changed so reapply the CRC capture
8303 			 * settings for the stream.
8304 			 */
8305 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8306 
8307 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8308 				configure_crc = true;
8309 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8310 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
8311 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8312 					acrtc->dm_irq_params.crc_window.update_win = true;
8313 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
8314 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
8315 					crc_rd_wrk->crtc = crtc;
8316 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
8317 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8318 				}
8319 #endif
8320 			}
8321 
8322 			if (configure_crc)
8323 				if (amdgpu_dm_crtc_configure_crc_source(
8324 					crtc, dm_new_crtc_state, cur_crc_src))
8325 					DRM_DEBUG_DRIVER("Failed to configure crc source");
8326 #endif
8327 		}
8328 	}
8329 
8330 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8331 		if (new_crtc_state->async_flip)
8332 			wait_for_vblank = false;
8333 
8334 	/* update planes when needed per crtc*/
8335 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8336 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8337 
8338 		if (dm_new_crtc_state->stream)
8339 			amdgpu_dm_commit_planes(state, dc_state, dev,
8340 						dm, crtc, wait_for_vblank);
8341 	}
8342 
8343 	/* Update audio instances for each connector. */
8344 	amdgpu_dm_commit_audio(dev, state);
8345 
8346 	/* restore the backlight level */
8347 	for (i = 0; i < dm->num_of_edps; i++) {
8348 		if (dm->backlight_dev[i] &&
8349 		    (dm->actual_brightness[i] != dm->brightness[i]))
8350 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8351 	}
8352 
8353 	/*
8354 	 * send vblank event on all events not handled in flip and
8355 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8356 	 */
8357 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8358 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8359 
8360 		if (new_crtc_state->event)
8361 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8362 
8363 		new_crtc_state->event = NULL;
8364 	}
8365 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8366 
8367 	/* Signal HW programming completion */
8368 	drm_atomic_helper_commit_hw_done(state);
8369 
8370 	if (wait_for_vblank)
8371 		drm_atomic_helper_wait_for_flip_done(dev, state);
8372 
8373 	drm_atomic_helper_cleanup_planes(dev, state);
8374 
8375 	/* return the stolen vga memory back to VRAM */
8376 	if (!adev->mman.keep_stolen_vga_memory)
8377 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8378 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8379 
8380 	/*
8381 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8382 	 * so we can put the GPU into runtime suspend if we're not driving any
8383 	 * displays anymore
8384 	 */
8385 	for (i = 0; i < crtc_disable_count; i++)
8386 		pm_runtime_put_autosuspend(dev->dev);
8387 	pm_runtime_mark_last_busy(dev->dev);
8388 
8389 	if (dc_state_temp)
8390 		dc_release_state(dc_state_temp);
8391 }
8392 
8393 
8394 static int dm_force_atomic_commit(struct drm_connector *connector)
8395 {
8396 	int ret = 0;
8397 	struct drm_device *ddev = connector->dev;
8398 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8399 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8400 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8401 	struct drm_connector_state *conn_state;
8402 	struct drm_crtc_state *crtc_state;
8403 	struct drm_plane_state *plane_state;
8404 
8405 	if (!state)
8406 		return -ENOMEM;
8407 
8408 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8409 
8410 	/* Construct an atomic state to restore previous display setting */
8411 
8412 	/*
8413 	 * Attach connectors to drm_atomic_state
8414 	 */
8415 	conn_state = drm_atomic_get_connector_state(state, connector);
8416 
8417 	ret = PTR_ERR_OR_ZERO(conn_state);
8418 	if (ret)
8419 		goto out;
8420 
8421 	/* Attach crtc to drm_atomic_state*/
8422 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8423 
8424 	ret = PTR_ERR_OR_ZERO(crtc_state);
8425 	if (ret)
8426 		goto out;
8427 
8428 	/* force a restore */
8429 	crtc_state->mode_changed = true;
8430 
8431 	/* Attach plane to drm_atomic_state */
8432 	plane_state = drm_atomic_get_plane_state(state, plane);
8433 
8434 	ret = PTR_ERR_OR_ZERO(plane_state);
8435 	if (ret)
8436 		goto out;
8437 
8438 	/* Call commit internally with the state we just constructed */
8439 	ret = drm_atomic_commit(state);
8440 
8441 out:
8442 	drm_atomic_state_put(state);
8443 	if (ret)
8444 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8445 
8446 	return ret;
8447 }
8448 
8449 /*
8450  * This function handles all cases when set mode does not come upon hotplug.
8451  * This includes when a display is unplugged then plugged back into the
8452  * same port and when running without usermode desktop manager supprot
8453  */
8454 void dm_restore_drm_connector_state(struct drm_device *dev,
8455 				    struct drm_connector *connector)
8456 {
8457 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8458 	struct amdgpu_crtc *disconnected_acrtc;
8459 	struct dm_crtc_state *acrtc_state;
8460 
8461 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8462 		return;
8463 
8464 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8465 	if (!disconnected_acrtc)
8466 		return;
8467 
8468 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8469 	if (!acrtc_state->stream)
8470 		return;
8471 
8472 	/*
8473 	 * If the previous sink is not released and different from the current,
8474 	 * we deduce we are in a state where we can not rely on usermode call
8475 	 * to turn on the display, so we do it here
8476 	 */
8477 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8478 		dm_force_atomic_commit(&aconnector->base);
8479 }
8480 
8481 /*
8482  * Grabs all modesetting locks to serialize against any blocking commits,
8483  * Waits for completion of all non blocking commits.
8484  */
8485 static int do_aquire_global_lock(struct drm_device *dev,
8486 				 struct drm_atomic_state *state)
8487 {
8488 	struct drm_crtc *crtc;
8489 	struct drm_crtc_commit *commit;
8490 	long ret;
8491 
8492 	/*
8493 	 * Adding all modeset locks to aquire_ctx will
8494 	 * ensure that when the framework release it the
8495 	 * extra locks we are locking here will get released to
8496 	 */
8497 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8498 	if (ret)
8499 		return ret;
8500 
8501 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8502 		spin_lock(&crtc->commit_lock);
8503 		commit = list_first_entry_or_null(&crtc->commit_list,
8504 				struct drm_crtc_commit, commit_entry);
8505 		if (commit)
8506 			drm_crtc_commit_get(commit);
8507 		spin_unlock(&crtc->commit_lock);
8508 
8509 		if (!commit)
8510 			continue;
8511 
8512 		/*
8513 		 * Make sure all pending HW programming completed and
8514 		 * page flips done
8515 		 */
8516 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8517 
8518 		if (ret > 0)
8519 			ret = wait_for_completion_interruptible_timeout(
8520 					&commit->flip_done, 10*HZ);
8521 
8522 		if (ret == 0)
8523 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8524 				  "timed out\n", crtc->base.id, crtc->name);
8525 
8526 		drm_crtc_commit_put(commit);
8527 	}
8528 
8529 	return ret < 0 ? ret : 0;
8530 }
8531 
8532 static void get_freesync_config_for_crtc(
8533 	struct dm_crtc_state *new_crtc_state,
8534 	struct dm_connector_state *new_con_state)
8535 {
8536 	struct mod_freesync_config config = {0};
8537 	struct amdgpu_dm_connector *aconnector =
8538 			to_amdgpu_dm_connector(new_con_state->base.connector);
8539 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8540 	int vrefresh = drm_mode_vrefresh(mode);
8541 	bool fs_vid_mode = false;
8542 
8543 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8544 					vrefresh >= aconnector->min_vfreq &&
8545 					vrefresh <= aconnector->max_vfreq;
8546 
8547 	if (new_crtc_state->vrr_supported) {
8548 		new_crtc_state->stream->ignore_msa_timing_param = true;
8549 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
8550 
8551 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
8552 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
8553 		config.vsif_supported = true;
8554 		config.btr = true;
8555 
8556 		if (fs_vid_mode) {
8557 			config.state = VRR_STATE_ACTIVE_FIXED;
8558 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
8559 			goto out;
8560 		} else if (new_crtc_state->base.vrr_enabled) {
8561 			config.state = VRR_STATE_ACTIVE_VARIABLE;
8562 		} else {
8563 			config.state = VRR_STATE_INACTIVE;
8564 		}
8565 	}
8566 out:
8567 	new_crtc_state->freesync_config = config;
8568 }
8569 
8570 static void reset_freesync_config_for_crtc(
8571 	struct dm_crtc_state *new_crtc_state)
8572 {
8573 	new_crtc_state->vrr_supported = false;
8574 
8575 	memset(&new_crtc_state->vrr_infopacket, 0,
8576 	       sizeof(new_crtc_state->vrr_infopacket));
8577 }
8578 
8579 static bool
8580 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
8581 				 struct drm_crtc_state *new_crtc_state)
8582 {
8583 	const struct drm_display_mode *old_mode, *new_mode;
8584 
8585 	if (!old_crtc_state || !new_crtc_state)
8586 		return false;
8587 
8588 	old_mode = &old_crtc_state->mode;
8589 	new_mode = &new_crtc_state->mode;
8590 
8591 	if (old_mode->clock       == new_mode->clock &&
8592 	    old_mode->hdisplay    == new_mode->hdisplay &&
8593 	    old_mode->vdisplay    == new_mode->vdisplay &&
8594 	    old_mode->htotal      == new_mode->htotal &&
8595 	    old_mode->vtotal      != new_mode->vtotal &&
8596 	    old_mode->hsync_start == new_mode->hsync_start &&
8597 	    old_mode->vsync_start != new_mode->vsync_start &&
8598 	    old_mode->hsync_end   == new_mode->hsync_end &&
8599 	    old_mode->vsync_end   != new_mode->vsync_end &&
8600 	    old_mode->hskew       == new_mode->hskew &&
8601 	    old_mode->vscan       == new_mode->vscan &&
8602 	    (old_mode->vsync_end - old_mode->vsync_start) ==
8603 	    (new_mode->vsync_end - new_mode->vsync_start))
8604 		return true;
8605 
8606 	return false;
8607 }
8608 
8609 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
8610 	uint64_t num, den, res;
8611 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
8612 
8613 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
8614 
8615 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
8616 	den = (unsigned long long)new_crtc_state->mode.htotal *
8617 	      (unsigned long long)new_crtc_state->mode.vtotal;
8618 
8619 	res = div_u64(num, den);
8620 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
8621 }
8622 
8623 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8624 			 struct drm_atomic_state *state,
8625 			 struct drm_crtc *crtc,
8626 			 struct drm_crtc_state *old_crtc_state,
8627 			 struct drm_crtc_state *new_crtc_state,
8628 			 bool enable,
8629 			 bool *lock_and_validation_needed)
8630 {
8631 	struct dm_atomic_state *dm_state = NULL;
8632 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8633 	struct dc_stream_state *new_stream;
8634 	int ret = 0;
8635 
8636 	/*
8637 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8638 	 * update changed items
8639 	 */
8640 	struct amdgpu_crtc *acrtc = NULL;
8641 	struct amdgpu_dm_connector *aconnector = NULL;
8642 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8643 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8644 
8645 	new_stream = NULL;
8646 
8647 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8648 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8649 	acrtc = to_amdgpu_crtc(crtc);
8650 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8651 
8652 	/* TODO This hack should go away */
8653 	if (aconnector && enable) {
8654 		/* Make sure fake sink is created in plug-in scenario */
8655 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8656 							    &aconnector->base);
8657 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8658 							    &aconnector->base);
8659 
8660 		if (IS_ERR(drm_new_conn_state)) {
8661 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8662 			goto fail;
8663 		}
8664 
8665 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8666 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8667 
8668 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8669 			goto skip_modeset;
8670 
8671 		new_stream = create_validate_stream_for_sink(aconnector,
8672 							     &new_crtc_state->mode,
8673 							     dm_new_conn_state,
8674 							     dm_old_crtc_state->stream);
8675 
8676 		/*
8677 		 * we can have no stream on ACTION_SET if a display
8678 		 * was disconnected during S3, in this case it is not an
8679 		 * error, the OS will be updated after detection, and
8680 		 * will do the right thing on next atomic commit
8681 		 */
8682 
8683 		if (!new_stream) {
8684 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8685 					__func__, acrtc->base.base.id);
8686 			ret = -ENOMEM;
8687 			goto fail;
8688 		}
8689 
8690 		/*
8691 		 * TODO: Check VSDB bits to decide whether this should
8692 		 * be enabled or not.
8693 		 */
8694 		new_stream->triggered_crtc_reset.enabled =
8695 			dm->force_timing_sync;
8696 
8697 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8698 
8699 		ret = fill_hdr_info_packet(drm_new_conn_state,
8700 					   &new_stream->hdr_static_metadata);
8701 		if (ret)
8702 			goto fail;
8703 
8704 		/*
8705 		 * If we already removed the old stream from the context
8706 		 * (and set the new stream to NULL) then we can't reuse
8707 		 * the old stream even if the stream and scaling are unchanged.
8708 		 * We'll hit the BUG_ON and black screen.
8709 		 *
8710 		 * TODO: Refactor this function to allow this check to work
8711 		 * in all conditions.
8712 		 */
8713 		if (dm_new_crtc_state->stream &&
8714 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
8715 			goto skip_modeset;
8716 
8717 		if (dm_new_crtc_state->stream &&
8718 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8719 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8720 			new_crtc_state->mode_changed = false;
8721 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8722 					 new_crtc_state->mode_changed);
8723 		}
8724 	}
8725 
8726 	/* mode_changed flag may get updated above, need to check again */
8727 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8728 		goto skip_modeset;
8729 
8730 	drm_dbg_state(state->dev,
8731 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8732 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8733 		"connectors_changed:%d\n",
8734 		acrtc->crtc_id,
8735 		new_crtc_state->enable,
8736 		new_crtc_state->active,
8737 		new_crtc_state->planes_changed,
8738 		new_crtc_state->mode_changed,
8739 		new_crtc_state->active_changed,
8740 		new_crtc_state->connectors_changed);
8741 
8742 	/* Remove stream for any changed/disabled CRTC */
8743 	if (!enable) {
8744 
8745 		if (!dm_old_crtc_state->stream)
8746 			goto skip_modeset;
8747 
8748 		if (dm_new_crtc_state->stream &&
8749 		    is_timing_unchanged_for_freesync(new_crtc_state,
8750 						     old_crtc_state)) {
8751 			new_crtc_state->mode_changed = false;
8752 			DRM_DEBUG_DRIVER(
8753 				"Mode change not required for front porch change, "
8754 				"setting mode_changed to %d",
8755 				new_crtc_state->mode_changed);
8756 
8757 			set_freesync_fixed_config(dm_new_crtc_state);
8758 
8759 			goto skip_modeset;
8760 		} else if (aconnector &&
8761 			   is_freesync_video_mode(&new_crtc_state->mode,
8762 						  aconnector)) {
8763 			struct drm_display_mode *high_mode;
8764 
8765 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
8766 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
8767 				set_freesync_fixed_config(dm_new_crtc_state);
8768 			}
8769 		}
8770 
8771 		ret = dm_atomic_get_state(state, &dm_state);
8772 		if (ret)
8773 			goto fail;
8774 
8775 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8776 				crtc->base.id);
8777 
8778 		/* i.e. reset mode */
8779 		if (dc_remove_stream_from_ctx(
8780 				dm->dc,
8781 				dm_state->context,
8782 				dm_old_crtc_state->stream) != DC_OK) {
8783 			ret = -EINVAL;
8784 			goto fail;
8785 		}
8786 
8787 		dc_stream_release(dm_old_crtc_state->stream);
8788 		dm_new_crtc_state->stream = NULL;
8789 
8790 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8791 
8792 		*lock_and_validation_needed = true;
8793 
8794 	} else {/* Add stream for any updated/enabled CRTC */
8795 		/*
8796 		 * Quick fix to prevent NULL pointer on new_stream when
8797 		 * added MST connectors not found in existing crtc_state in the chained mode
8798 		 * TODO: need to dig out the root cause of that
8799 		 */
8800 		if (!aconnector)
8801 			goto skip_modeset;
8802 
8803 		if (modereset_required(new_crtc_state))
8804 			goto skip_modeset;
8805 
8806 		if (modeset_required(new_crtc_state, new_stream,
8807 				     dm_old_crtc_state->stream)) {
8808 
8809 			WARN_ON(dm_new_crtc_state->stream);
8810 
8811 			ret = dm_atomic_get_state(state, &dm_state);
8812 			if (ret)
8813 				goto fail;
8814 
8815 			dm_new_crtc_state->stream = new_stream;
8816 
8817 			dc_stream_retain(new_stream);
8818 
8819 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
8820 					 crtc->base.id);
8821 
8822 			if (dc_add_stream_to_ctx(
8823 					dm->dc,
8824 					dm_state->context,
8825 					dm_new_crtc_state->stream) != DC_OK) {
8826 				ret = -EINVAL;
8827 				goto fail;
8828 			}
8829 
8830 			*lock_and_validation_needed = true;
8831 		}
8832 	}
8833 
8834 skip_modeset:
8835 	/* Release extra reference */
8836 	if (new_stream)
8837 		 dc_stream_release(new_stream);
8838 
8839 	/*
8840 	 * We want to do dc stream updates that do not require a
8841 	 * full modeset below.
8842 	 */
8843 	if (!(enable && aconnector && new_crtc_state->active))
8844 		return 0;
8845 	/*
8846 	 * Given above conditions, the dc state cannot be NULL because:
8847 	 * 1. We're in the process of enabling CRTCs (just been added
8848 	 *    to the dc context, or already is on the context)
8849 	 * 2. Has a valid connector attached, and
8850 	 * 3. Is currently active and enabled.
8851 	 * => The dc stream state currently exists.
8852 	 */
8853 	BUG_ON(dm_new_crtc_state->stream == NULL);
8854 
8855 	/* Scaling or underscan settings */
8856 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
8857 				drm_atomic_crtc_needs_modeset(new_crtc_state))
8858 		update_stream_scaling_settings(
8859 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8860 
8861 	/* ABM settings */
8862 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8863 
8864 	/*
8865 	 * Color management settings. We also update color properties
8866 	 * when a modeset is needed, to ensure it gets reprogrammed.
8867 	 */
8868 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8869 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8870 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8871 		if (ret)
8872 			goto fail;
8873 	}
8874 
8875 	/* Update Freesync settings. */
8876 	get_freesync_config_for_crtc(dm_new_crtc_state,
8877 				     dm_new_conn_state);
8878 
8879 	return ret;
8880 
8881 fail:
8882 	if (new_stream)
8883 		dc_stream_release(new_stream);
8884 	return ret;
8885 }
8886 
8887 static bool should_reset_plane(struct drm_atomic_state *state,
8888 			       struct drm_plane *plane,
8889 			       struct drm_plane_state *old_plane_state,
8890 			       struct drm_plane_state *new_plane_state)
8891 {
8892 	struct drm_plane *other;
8893 	struct drm_plane_state *old_other_state, *new_other_state;
8894 	struct drm_crtc_state *new_crtc_state;
8895 	int i;
8896 
8897 	/*
8898 	 * TODO: Remove this hack once the checks below are sufficient
8899 	 * enough to determine when we need to reset all the planes on
8900 	 * the stream.
8901 	 */
8902 	if (state->allow_modeset)
8903 		return true;
8904 
8905 	/* Exit early if we know that we're adding or removing the plane. */
8906 	if (old_plane_state->crtc != new_plane_state->crtc)
8907 		return true;
8908 
8909 	/* old crtc == new_crtc == NULL, plane not in context. */
8910 	if (!new_plane_state->crtc)
8911 		return false;
8912 
8913 	new_crtc_state =
8914 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8915 
8916 	if (!new_crtc_state)
8917 		return true;
8918 
8919 	/* CRTC Degamma changes currently require us to recreate planes. */
8920 	if (new_crtc_state->color_mgmt_changed)
8921 		return true;
8922 
8923 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8924 		return true;
8925 
8926 	/*
8927 	 * If there are any new primary or overlay planes being added or
8928 	 * removed then the z-order can potentially change. To ensure
8929 	 * correct z-order and pipe acquisition the current DC architecture
8930 	 * requires us to remove and recreate all existing planes.
8931 	 *
8932 	 * TODO: Come up with a more elegant solution for this.
8933 	 */
8934 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8935 		struct amdgpu_framebuffer *old_afb, *new_afb;
8936 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8937 			continue;
8938 
8939 		if (old_other_state->crtc != new_plane_state->crtc &&
8940 		    new_other_state->crtc != new_plane_state->crtc)
8941 			continue;
8942 
8943 		if (old_other_state->crtc != new_other_state->crtc)
8944 			return true;
8945 
8946 		/* Src/dst size and scaling updates. */
8947 		if (old_other_state->src_w != new_other_state->src_w ||
8948 		    old_other_state->src_h != new_other_state->src_h ||
8949 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8950 		    old_other_state->crtc_h != new_other_state->crtc_h)
8951 			return true;
8952 
8953 		/* Rotation / mirroring updates. */
8954 		if (old_other_state->rotation != new_other_state->rotation)
8955 			return true;
8956 
8957 		/* Blending updates. */
8958 		if (old_other_state->pixel_blend_mode !=
8959 		    new_other_state->pixel_blend_mode)
8960 			return true;
8961 
8962 		/* Alpha updates. */
8963 		if (old_other_state->alpha != new_other_state->alpha)
8964 			return true;
8965 
8966 		/* Colorspace changes. */
8967 		if (old_other_state->color_range != new_other_state->color_range ||
8968 		    old_other_state->color_encoding != new_other_state->color_encoding)
8969 			return true;
8970 
8971 		/* Framebuffer checks fall at the end. */
8972 		if (!old_other_state->fb || !new_other_state->fb)
8973 			continue;
8974 
8975 		/* Pixel format changes can require bandwidth updates. */
8976 		if (old_other_state->fb->format != new_other_state->fb->format)
8977 			return true;
8978 
8979 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8980 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8981 
8982 		/* Tiling and DCC changes also require bandwidth updates. */
8983 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8984 		    old_afb->base.modifier != new_afb->base.modifier)
8985 			return true;
8986 	}
8987 
8988 	return false;
8989 }
8990 
8991 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8992 			      struct drm_plane_state *new_plane_state,
8993 			      struct drm_framebuffer *fb)
8994 {
8995 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8996 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8997 	unsigned int pitch;
8998 	bool linear;
8999 
9000 	if (fb->width > new_acrtc->max_cursor_width ||
9001 	    fb->height > new_acrtc->max_cursor_height) {
9002 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9003 				 new_plane_state->fb->width,
9004 				 new_plane_state->fb->height);
9005 		return -EINVAL;
9006 	}
9007 	if (new_plane_state->src_w != fb->width << 16 ||
9008 	    new_plane_state->src_h != fb->height << 16) {
9009 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9010 		return -EINVAL;
9011 	}
9012 
9013 	/* Pitch in pixels */
9014 	pitch = fb->pitches[0] / fb->format->cpp[0];
9015 
9016 	if (fb->width != pitch) {
9017 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9018 				 fb->width, pitch);
9019 		return -EINVAL;
9020 	}
9021 
9022 	switch (pitch) {
9023 	case 64:
9024 	case 128:
9025 	case 256:
9026 		/* FB pitch is supported by cursor plane */
9027 		break;
9028 	default:
9029 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9030 		return -EINVAL;
9031 	}
9032 
9033 	/* Core DRM takes care of checking FB modifiers, so we only need to
9034 	 * check tiling flags when the FB doesn't have a modifier. */
9035 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9036 		if (adev->family < AMDGPU_FAMILY_AI) {
9037 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9038 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9039 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9040 		} else {
9041 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9042 		}
9043 		if (!linear) {
9044 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9045 			return -EINVAL;
9046 		}
9047 	}
9048 
9049 	return 0;
9050 }
9051 
9052 static int dm_update_plane_state(struct dc *dc,
9053 				 struct drm_atomic_state *state,
9054 				 struct drm_plane *plane,
9055 				 struct drm_plane_state *old_plane_state,
9056 				 struct drm_plane_state *new_plane_state,
9057 				 bool enable,
9058 				 bool *lock_and_validation_needed)
9059 {
9060 
9061 	struct dm_atomic_state *dm_state = NULL;
9062 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9063 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9064 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9065 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9066 	struct amdgpu_crtc *new_acrtc;
9067 	bool needs_reset;
9068 	int ret = 0;
9069 
9070 
9071 	new_plane_crtc = new_plane_state->crtc;
9072 	old_plane_crtc = old_plane_state->crtc;
9073 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9074 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9075 
9076 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9077 		if (!enable || !new_plane_crtc ||
9078 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9079 			return 0;
9080 
9081 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9082 
9083 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9084 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9085 			return -EINVAL;
9086 		}
9087 
9088 		if (new_plane_state->fb) {
9089 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9090 						 new_plane_state->fb);
9091 			if (ret)
9092 				return ret;
9093 		}
9094 
9095 		return 0;
9096 	}
9097 
9098 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9099 					 new_plane_state);
9100 
9101 	/* Remove any changed/removed planes */
9102 	if (!enable) {
9103 		if (!needs_reset)
9104 			return 0;
9105 
9106 		if (!old_plane_crtc)
9107 			return 0;
9108 
9109 		old_crtc_state = drm_atomic_get_old_crtc_state(
9110 				state, old_plane_crtc);
9111 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9112 
9113 		if (!dm_old_crtc_state->stream)
9114 			return 0;
9115 
9116 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9117 				plane->base.id, old_plane_crtc->base.id);
9118 
9119 		ret = dm_atomic_get_state(state, &dm_state);
9120 		if (ret)
9121 			return ret;
9122 
9123 		if (!dc_remove_plane_from_context(
9124 				dc,
9125 				dm_old_crtc_state->stream,
9126 				dm_old_plane_state->dc_state,
9127 				dm_state->context)) {
9128 
9129 			return -EINVAL;
9130 		}
9131 
9132 
9133 		dc_plane_state_release(dm_old_plane_state->dc_state);
9134 		dm_new_plane_state->dc_state = NULL;
9135 
9136 		*lock_and_validation_needed = true;
9137 
9138 	} else { /* Add new planes */
9139 		struct dc_plane_state *dc_new_plane_state;
9140 
9141 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9142 			return 0;
9143 
9144 		if (!new_plane_crtc)
9145 			return 0;
9146 
9147 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9148 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9149 
9150 		if (!dm_new_crtc_state->stream)
9151 			return 0;
9152 
9153 		if (!needs_reset)
9154 			return 0;
9155 
9156 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9157 		if (ret)
9158 			return ret;
9159 
9160 		WARN_ON(dm_new_plane_state->dc_state);
9161 
9162 		dc_new_plane_state = dc_create_plane_state(dc);
9163 		if (!dc_new_plane_state)
9164 			return -ENOMEM;
9165 
9166 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9167 				 plane->base.id, new_plane_crtc->base.id);
9168 
9169 		ret = fill_dc_plane_attributes(
9170 			drm_to_adev(new_plane_crtc->dev),
9171 			dc_new_plane_state,
9172 			new_plane_state,
9173 			new_crtc_state);
9174 		if (ret) {
9175 			dc_plane_state_release(dc_new_plane_state);
9176 			return ret;
9177 		}
9178 
9179 		ret = dm_atomic_get_state(state, &dm_state);
9180 		if (ret) {
9181 			dc_plane_state_release(dc_new_plane_state);
9182 			return ret;
9183 		}
9184 
9185 		/*
9186 		 * Any atomic check errors that occur after this will
9187 		 * not need a release. The plane state will be attached
9188 		 * to the stream, and therefore part of the atomic
9189 		 * state. It'll be released when the atomic state is
9190 		 * cleaned.
9191 		 */
9192 		if (!dc_add_plane_to_context(
9193 				dc,
9194 				dm_new_crtc_state->stream,
9195 				dc_new_plane_state,
9196 				dm_state->context)) {
9197 
9198 			dc_plane_state_release(dc_new_plane_state);
9199 			return -EINVAL;
9200 		}
9201 
9202 		dm_new_plane_state->dc_state = dc_new_plane_state;
9203 
9204 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9205 
9206 		/* Tell DC to do a full surface update every time there
9207 		 * is a plane change. Inefficient, but works for now.
9208 		 */
9209 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9210 
9211 		*lock_and_validation_needed = true;
9212 	}
9213 
9214 
9215 	return ret;
9216 }
9217 
9218 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9219 				       int *src_w, int *src_h)
9220 {
9221 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9222 	case DRM_MODE_ROTATE_90:
9223 	case DRM_MODE_ROTATE_270:
9224 		*src_w = plane_state->src_h >> 16;
9225 		*src_h = plane_state->src_w >> 16;
9226 		break;
9227 	case DRM_MODE_ROTATE_0:
9228 	case DRM_MODE_ROTATE_180:
9229 	default:
9230 		*src_w = plane_state->src_w >> 16;
9231 		*src_h = plane_state->src_h >> 16;
9232 		break;
9233 	}
9234 }
9235 
9236 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9237 				struct drm_crtc *crtc,
9238 				struct drm_crtc_state *new_crtc_state)
9239 {
9240 	struct drm_plane *cursor = crtc->cursor, *underlying;
9241 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
9242 	int i;
9243 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9244 	int cursor_src_w, cursor_src_h;
9245 	int underlying_src_w, underlying_src_h;
9246 
9247 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9248 	 * cursor per pipe but it's going to inherit the scaling and
9249 	 * positioning from the underlying pipe. Check the cursor plane's
9250 	 * blending properties match the underlying planes'. */
9251 
9252 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9253 	if (!new_cursor_state || !new_cursor_state->fb) {
9254 		return 0;
9255 	}
9256 
9257 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9258 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9259 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9260 
9261 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9262 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
9263 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9264 			continue;
9265 
9266 		/* Ignore disabled planes */
9267 		if (!new_underlying_state->fb)
9268 			continue;
9269 
9270 		dm_get_oriented_plane_size(new_underlying_state,
9271 					   &underlying_src_w, &underlying_src_h);
9272 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9273 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9274 
9275 		if (cursor_scale_w != underlying_scale_w ||
9276 		    cursor_scale_h != underlying_scale_h) {
9277 			drm_dbg_atomic(crtc->dev,
9278 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9279 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9280 			return -EINVAL;
9281 		}
9282 
9283 		/* If this plane covers the whole CRTC, no need to check planes underneath */
9284 		if (new_underlying_state->crtc_x <= 0 &&
9285 		    new_underlying_state->crtc_y <= 0 &&
9286 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9287 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9288 			break;
9289 	}
9290 
9291 	return 0;
9292 }
9293 
9294 #if defined(CONFIG_DRM_AMD_DC_DCN)
9295 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9296 {
9297 	struct drm_connector *connector;
9298 	struct drm_connector_state *conn_state, *old_conn_state;
9299 	struct amdgpu_dm_connector *aconnector = NULL;
9300 	int i;
9301 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9302 		if (!conn_state->crtc)
9303 			conn_state = old_conn_state;
9304 
9305 		if (conn_state->crtc != crtc)
9306 			continue;
9307 
9308 		aconnector = to_amdgpu_dm_connector(connector);
9309 		if (!aconnector->port || !aconnector->mst_port)
9310 			aconnector = NULL;
9311 		else
9312 			break;
9313 	}
9314 
9315 	if (!aconnector)
9316 		return 0;
9317 
9318 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9319 }
9320 #endif
9321 
9322 /**
9323  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9324  *
9325  * @dev: The DRM device
9326  * @state: The atomic state to commit
9327  *
9328  * Validate that the given atomic state is programmable by DC into hardware.
9329  * This involves constructing a &struct dc_state reflecting the new hardware
9330  * state we wish to commit, then querying DC to see if it is programmable. It's
9331  * important not to modify the existing DC state. Otherwise, atomic_check
9332  * may unexpectedly commit hardware changes.
9333  *
9334  * When validating the DC state, it's important that the right locks are
9335  * acquired. For full updates case which removes/adds/updates streams on one
9336  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9337  * that any such full update commit will wait for completion of any outstanding
9338  * flip using DRMs synchronization events.
9339  *
9340  * Note that DM adds the affected connectors for all CRTCs in state, when that
9341  * might not seem necessary. This is because DC stream creation requires the
9342  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9343  * be possible but non-trivial - a possible TODO item.
9344  *
9345  * Return: -Error code if validation failed.
9346  */
9347 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9348 				  struct drm_atomic_state *state)
9349 {
9350 	struct amdgpu_device *adev = drm_to_adev(dev);
9351 	struct dm_atomic_state *dm_state = NULL;
9352 	struct dc *dc = adev->dm.dc;
9353 	struct drm_connector *connector;
9354 	struct drm_connector_state *old_con_state, *new_con_state;
9355 	struct drm_crtc *crtc;
9356 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9357 	struct drm_plane *plane;
9358 	struct drm_plane_state *old_plane_state, *new_plane_state;
9359 	enum dc_status status;
9360 	int ret, i;
9361 	bool lock_and_validation_needed = false;
9362 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9363 #if defined(CONFIG_DRM_AMD_DC_DCN)
9364 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
9365 	struct drm_dp_mst_topology_state *mst_state;
9366 	struct drm_dp_mst_topology_mgr *mgr;
9367 #endif
9368 
9369 	trace_amdgpu_dm_atomic_check_begin(state);
9370 
9371 	ret = drm_atomic_helper_check_modeset(dev, state);
9372 	if (ret) {
9373 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
9374 		goto fail;
9375 	}
9376 
9377 	/* Check connector changes */
9378 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9379 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9380 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9381 
9382 		/* Skip connectors that are disabled or part of modeset already. */
9383 		if (!new_con_state->crtc)
9384 			continue;
9385 
9386 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9387 		if (IS_ERR(new_crtc_state)) {
9388 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
9389 			ret = PTR_ERR(new_crtc_state);
9390 			goto fail;
9391 		}
9392 
9393 		if (dm_old_con_state->abm_level !=
9394 		    dm_new_con_state->abm_level)
9395 			new_crtc_state->connectors_changed = true;
9396 	}
9397 
9398 #if defined(CONFIG_DRM_AMD_DC_DCN)
9399 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9400 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9401 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9402 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9403 				if (ret) {
9404 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
9405 					goto fail;
9406 				}
9407 			}
9408 		}
9409 		if (!pre_validate_dsc(state, &dm_state, vars)) {
9410 			ret = -EINVAL;
9411 			goto fail;
9412 		}
9413 	}
9414 #endif
9415 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9416 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9417 
9418 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9419 		    !new_crtc_state->color_mgmt_changed &&
9420 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9421 			dm_old_crtc_state->dsc_force_changed == false)
9422 			continue;
9423 
9424 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
9425 		if (ret) {
9426 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
9427 			goto fail;
9428 		}
9429 
9430 		if (!new_crtc_state->enable)
9431 			continue;
9432 
9433 		ret = drm_atomic_add_affected_connectors(state, crtc);
9434 		if (ret) {
9435 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
9436 			goto fail;
9437 		}
9438 
9439 		ret = drm_atomic_add_affected_planes(state, crtc);
9440 		if (ret) {
9441 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
9442 			goto fail;
9443 		}
9444 
9445 		if (dm_old_crtc_state->dsc_force_changed)
9446 			new_crtc_state->mode_changed = true;
9447 	}
9448 
9449 	/*
9450 	 * Add all primary and overlay planes on the CRTC to the state
9451 	 * whenever a plane is enabled to maintain correct z-ordering
9452 	 * and to enable fast surface updates.
9453 	 */
9454 	drm_for_each_crtc(crtc, dev) {
9455 		bool modified = false;
9456 
9457 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9458 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9459 				continue;
9460 
9461 			if (new_plane_state->crtc == crtc ||
9462 			    old_plane_state->crtc == crtc) {
9463 				modified = true;
9464 				break;
9465 			}
9466 		}
9467 
9468 		if (!modified)
9469 			continue;
9470 
9471 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9472 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9473 				continue;
9474 
9475 			new_plane_state =
9476 				drm_atomic_get_plane_state(state, plane);
9477 
9478 			if (IS_ERR(new_plane_state)) {
9479 				ret = PTR_ERR(new_plane_state);
9480 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
9481 				goto fail;
9482 			}
9483 		}
9484 	}
9485 
9486 	/* Remove exiting planes if they are modified */
9487 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9488 		ret = dm_update_plane_state(dc, state, plane,
9489 					    old_plane_state,
9490 					    new_plane_state,
9491 					    false,
9492 					    &lock_and_validation_needed);
9493 		if (ret) {
9494 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9495 			goto fail;
9496 		}
9497 	}
9498 
9499 	/* Disable all crtcs which require disable */
9500 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9501 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9502 					   old_crtc_state,
9503 					   new_crtc_state,
9504 					   false,
9505 					   &lock_and_validation_needed);
9506 		if (ret) {
9507 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
9508 			goto fail;
9509 		}
9510 	}
9511 
9512 	/* Enable all crtcs which require enable */
9513 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9514 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9515 					   old_crtc_state,
9516 					   new_crtc_state,
9517 					   true,
9518 					   &lock_and_validation_needed);
9519 		if (ret) {
9520 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
9521 			goto fail;
9522 		}
9523 	}
9524 
9525 	/* Add new/modified planes */
9526 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9527 		ret = dm_update_plane_state(dc, state, plane,
9528 					    old_plane_state,
9529 					    new_plane_state,
9530 					    true,
9531 					    &lock_and_validation_needed);
9532 		if (ret) {
9533 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
9534 			goto fail;
9535 		}
9536 	}
9537 
9538 	/* Run this here since we want to validate the streams we created */
9539 	ret = drm_atomic_helper_check_planes(dev, state);
9540 	if (ret) {
9541 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
9542 		goto fail;
9543 	}
9544 
9545 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9546 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9547 		if (dm_new_crtc_state->mpo_requested)
9548 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
9549 	}
9550 
9551 	/* Check cursor planes scaling */
9552 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9553 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9554 		if (ret) {
9555 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
9556 			goto fail;
9557 		}
9558 	}
9559 
9560 	if (state->legacy_cursor_update) {
9561 		/*
9562 		 * This is a fast cursor update coming from the plane update
9563 		 * helper, check if it can be done asynchronously for better
9564 		 * performance.
9565 		 */
9566 		state->async_update =
9567 			!drm_atomic_helper_async_check(dev, state);
9568 
9569 		/*
9570 		 * Skip the remaining global validation if this is an async
9571 		 * update. Cursor updates can be done without affecting
9572 		 * state or bandwidth calcs and this avoids the performance
9573 		 * penalty of locking the private state object and
9574 		 * allocating a new dc_state.
9575 		 */
9576 		if (state->async_update)
9577 			return 0;
9578 	}
9579 
9580 	/* Check scaling and underscan changes*/
9581 	/* TODO Removed scaling changes validation due to inability to commit
9582 	 * new stream into context w\o causing full reset. Need to
9583 	 * decide how to handle.
9584 	 */
9585 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9586 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9587 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9588 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9589 
9590 		/* Skip any modesets/resets */
9591 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9592 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9593 			continue;
9594 
9595 		/* Skip any thing not scale or underscan changes */
9596 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9597 			continue;
9598 
9599 		lock_and_validation_needed = true;
9600 	}
9601 
9602 #if defined(CONFIG_DRM_AMD_DC_DCN)
9603 	/* set the slot info for each mst_state based on the link encoding format */
9604 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
9605 		struct amdgpu_dm_connector *aconnector;
9606 		struct drm_connector *connector;
9607 		struct drm_connector_list_iter iter;
9608 		u8 link_coding_cap;
9609 
9610 		if (!mgr->mst_state )
9611 			continue;
9612 
9613 		drm_connector_list_iter_begin(dev, &iter);
9614 		drm_for_each_connector_iter(connector, &iter) {
9615 			int id = connector->index;
9616 
9617 			if (id == mst_state->mgr->conn_base_id) {
9618 				aconnector = to_amdgpu_dm_connector(connector);
9619 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
9620 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
9621 
9622 				break;
9623 			}
9624 		}
9625 		drm_connector_list_iter_end(&iter);
9626 
9627 	}
9628 #endif
9629 	/**
9630 	 * Streams and planes are reset when there are changes that affect
9631 	 * bandwidth. Anything that affects bandwidth needs to go through
9632 	 * DC global validation to ensure that the configuration can be applied
9633 	 * to hardware.
9634 	 *
9635 	 * We have to currently stall out here in atomic_check for outstanding
9636 	 * commits to finish in this case because our IRQ handlers reference
9637 	 * DRM state directly - we can end up disabling interrupts too early
9638 	 * if we don't.
9639 	 *
9640 	 * TODO: Remove this stall and drop DM state private objects.
9641 	 */
9642 	if (lock_and_validation_needed) {
9643 		ret = dm_atomic_get_state(state, &dm_state);
9644 		if (ret) {
9645 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
9646 			goto fail;
9647 		}
9648 
9649 		ret = do_aquire_global_lock(dev, state);
9650 		if (ret) {
9651 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
9652 			goto fail;
9653 		}
9654 
9655 #if defined(CONFIG_DRM_AMD_DC_DCN)
9656 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
9657 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
9658 			ret = -EINVAL;
9659 			goto fail;
9660 		}
9661 
9662 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
9663 		if (ret) {
9664 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
9665 			goto fail;
9666 		}
9667 #endif
9668 
9669 		/*
9670 		 * Perform validation of MST topology in the state:
9671 		 * We need to perform MST atomic check before calling
9672 		 * dc_validate_global_state(), or there is a chance
9673 		 * to get stuck in an infinite loop and hang eventually.
9674 		 */
9675 		ret = drm_dp_mst_atomic_check(state);
9676 		if (ret) {
9677 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
9678 			goto fail;
9679 		}
9680 		status = dc_validate_global_state(dc, dm_state->context, true);
9681 		if (status != DC_OK) {
9682 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
9683 				       dc_status_to_str(status), status);
9684 			ret = -EINVAL;
9685 			goto fail;
9686 		}
9687 	} else {
9688 		/*
9689 		 * The commit is a fast update. Fast updates shouldn't change
9690 		 * the DC context, affect global validation, and can have their
9691 		 * commit work done in parallel with other commits not touching
9692 		 * the same resource. If we have a new DC context as part of
9693 		 * the DM atomic state from validation we need to free it and
9694 		 * retain the existing one instead.
9695 		 *
9696 		 * Furthermore, since the DM atomic state only contains the DC
9697 		 * context and can safely be annulled, we can free the state
9698 		 * and clear the associated private object now to free
9699 		 * some memory and avoid a possible use-after-free later.
9700 		 */
9701 
9702 		for (i = 0; i < state->num_private_objs; i++) {
9703 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9704 
9705 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9706 				int j = state->num_private_objs-1;
9707 
9708 				dm_atomic_destroy_state(obj,
9709 						state->private_objs[i].state);
9710 
9711 				/* If i is not at the end of the array then the
9712 				 * last element needs to be moved to where i was
9713 				 * before the array can safely be truncated.
9714 				 */
9715 				if (i != j)
9716 					state->private_objs[i] =
9717 						state->private_objs[j];
9718 
9719 				state->private_objs[j].ptr = NULL;
9720 				state->private_objs[j].state = NULL;
9721 				state->private_objs[j].old_state = NULL;
9722 				state->private_objs[j].new_state = NULL;
9723 
9724 				state->num_private_objs = j;
9725 				break;
9726 			}
9727 		}
9728 	}
9729 
9730 	/* Store the overall update type for use later in atomic check. */
9731 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9732 		struct dm_crtc_state *dm_new_crtc_state =
9733 			to_dm_crtc_state(new_crtc_state);
9734 
9735 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9736 							 UPDATE_TYPE_FULL :
9737 							 UPDATE_TYPE_FAST;
9738 	}
9739 
9740 	/* Must be success */
9741 	WARN_ON(ret);
9742 
9743 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9744 
9745 	return ret;
9746 
9747 fail:
9748 	if (ret == -EDEADLK)
9749 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9750 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9751 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9752 	else
9753 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9754 
9755 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9756 
9757 	return ret;
9758 }
9759 
9760 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9761 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9762 {
9763 	uint8_t dpcd_data;
9764 	bool capable = false;
9765 
9766 	if (amdgpu_dm_connector->dc_link &&
9767 		dm_helpers_dp_read_dpcd(
9768 				NULL,
9769 				amdgpu_dm_connector->dc_link,
9770 				DP_DOWN_STREAM_PORT_COUNT,
9771 				&dpcd_data,
9772 				sizeof(dpcd_data))) {
9773 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9774 	}
9775 
9776 	return capable;
9777 }
9778 
9779 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
9780 		unsigned int offset,
9781 		unsigned int total_length,
9782 		uint8_t *data,
9783 		unsigned int length,
9784 		struct amdgpu_hdmi_vsdb_info *vsdb)
9785 {
9786 	bool res;
9787 	union dmub_rb_cmd cmd;
9788 	struct dmub_cmd_send_edid_cea *input;
9789 	struct dmub_cmd_edid_cea_output *output;
9790 
9791 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
9792 		return false;
9793 
9794 	memset(&cmd, 0, sizeof(cmd));
9795 
9796 	input = &cmd.edid_cea.data.input;
9797 
9798 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
9799 	cmd.edid_cea.header.sub_type = 0;
9800 	cmd.edid_cea.header.payload_bytes =
9801 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
9802 	input->offset = offset;
9803 	input->length = length;
9804 	input->cea_total_length = total_length;
9805 	memcpy(input->payload, data, length);
9806 
9807 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
9808 	if (!res) {
9809 		DRM_ERROR("EDID CEA parser failed\n");
9810 		return false;
9811 	}
9812 
9813 	output = &cmd.edid_cea.data.output;
9814 
9815 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
9816 		if (!output->ack.success) {
9817 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
9818 					output->ack.offset);
9819 		}
9820 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
9821 		if (!output->amd_vsdb.vsdb_found)
9822 			return false;
9823 
9824 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
9825 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
9826 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
9827 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
9828 	} else {
9829 		DRM_WARN("Unknown EDID CEA parser results\n");
9830 		return false;
9831 	}
9832 
9833 	return true;
9834 }
9835 
9836 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
9837 		uint8_t *edid_ext, int len,
9838 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9839 {
9840 	int i;
9841 
9842 	/* send extension block to DMCU for parsing */
9843 	for (i = 0; i < len; i += 8) {
9844 		bool res;
9845 		int offset;
9846 
9847 		/* send 8 bytes a time */
9848 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
9849 			return false;
9850 
9851 		if (i+8 == len) {
9852 			/* EDID block sent completed, expect result */
9853 			int version, min_rate, max_rate;
9854 
9855 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
9856 			if (res) {
9857 				/* amd vsdb found */
9858 				vsdb_info->freesync_supported = 1;
9859 				vsdb_info->amd_vsdb_version = version;
9860 				vsdb_info->min_refresh_rate_hz = min_rate;
9861 				vsdb_info->max_refresh_rate_hz = max_rate;
9862 				return true;
9863 			}
9864 			/* not amd vsdb */
9865 			return false;
9866 		}
9867 
9868 		/* check for ack*/
9869 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
9870 		if (!res)
9871 			return false;
9872 	}
9873 
9874 	return false;
9875 }
9876 
9877 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
9878 		uint8_t *edid_ext, int len,
9879 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9880 {
9881 	int i;
9882 
9883 	/* send extension block to DMCU for parsing */
9884 	for (i = 0; i < len; i += 8) {
9885 		/* send 8 bytes a time */
9886 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
9887 			return false;
9888 	}
9889 
9890 	return vsdb_info->freesync_supported;
9891 }
9892 
9893 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
9894 		uint8_t *edid_ext, int len,
9895 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
9896 {
9897 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
9898 
9899 	if (adev->dm.dmub_srv)
9900 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
9901 	else
9902 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
9903 }
9904 
9905 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
9906 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
9907 {
9908 	uint8_t *edid_ext = NULL;
9909 	int i;
9910 	bool valid_vsdb_found = false;
9911 
9912 	/*----- drm_find_cea_extension() -----*/
9913 	/* No EDID or EDID extensions */
9914 	if (edid == NULL || edid->extensions == 0)
9915 		return -ENODEV;
9916 
9917 	/* Find CEA extension */
9918 	for (i = 0; i < edid->extensions; i++) {
9919 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
9920 		if (edid_ext[0] == CEA_EXT)
9921 			break;
9922 	}
9923 
9924 	if (i == edid->extensions)
9925 		return -ENODEV;
9926 
9927 	/*----- cea_db_offsets() -----*/
9928 	if (edid_ext[0] != CEA_EXT)
9929 		return -ENODEV;
9930 
9931 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
9932 
9933 	return valid_vsdb_found ? i : -ENODEV;
9934 }
9935 
9936 /**
9937  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
9938  *
9939  * @aconnector: Connector to query.
9940  *
9941  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
9942  * track of some of the display information in the internal data struct used by
9943  * amdgpu_dm. This function checks which type of connector we need to set the
9944  * FreeSync parameters.
9945  */
9946 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9947 				    struct edid *edid)
9948 {
9949 	int i = 0;
9950 	struct detailed_timing *timing;
9951 	struct detailed_non_pixel *data;
9952 	struct detailed_data_monitor_range *range;
9953 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9954 			to_amdgpu_dm_connector(connector);
9955 	struct dm_connector_state *dm_con_state = NULL;
9956 	struct dc_sink *sink;
9957 
9958 	struct drm_device *dev = connector->dev;
9959 	struct amdgpu_device *adev = drm_to_adev(dev);
9960 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
9961 	bool freesync_capable = false;
9962 
9963 	if (!connector->state) {
9964 		DRM_ERROR("%s - Connector has no state", __func__);
9965 		goto update;
9966 	}
9967 
9968 	sink = amdgpu_dm_connector->dc_sink ?
9969 		amdgpu_dm_connector->dc_sink :
9970 		amdgpu_dm_connector->dc_em_sink;
9971 
9972 	if (!edid || !sink) {
9973 		dm_con_state = to_dm_connector_state(connector->state);
9974 
9975 		amdgpu_dm_connector->min_vfreq = 0;
9976 		amdgpu_dm_connector->max_vfreq = 0;
9977 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9978 		connector->display_info.monitor_range.min_vfreq = 0;
9979 		connector->display_info.monitor_range.max_vfreq = 0;
9980 		freesync_capable = false;
9981 
9982 		goto update;
9983 	}
9984 
9985 	dm_con_state = to_dm_connector_state(connector->state);
9986 
9987 	if (!adev->dm.freesync_module)
9988 		goto update;
9989 
9990 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9991 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
9992 		bool edid_check_required = false;
9993 
9994 		if (edid) {
9995 			edid_check_required = is_dp_capable_without_timing_msa(
9996 						adev->dm.dc,
9997 						amdgpu_dm_connector);
9998 		}
9999 
10000 		if (edid_check_required == true && (edid->version > 1 ||
10001 		   (edid->version == 1 && edid->revision > 1))) {
10002 			for (i = 0; i < 4; i++) {
10003 
10004 				timing	= &edid->detailed_timings[i];
10005 				data	= &timing->data.other_data;
10006 				range	= &data->data.range;
10007 				/*
10008 				 * Check if monitor has continuous frequency mode
10009 				 */
10010 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10011 					continue;
10012 				/*
10013 				 * Check for flag range limits only. If flag == 1 then
10014 				 * no additional timing information provided.
10015 				 * Default GTF, GTF Secondary curve and CVT are not
10016 				 * supported
10017 				 */
10018 				if (range->flags != 1)
10019 					continue;
10020 
10021 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10022 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10023 				amdgpu_dm_connector->pixel_clock_mhz =
10024 					range->pixel_clock_mhz * 10;
10025 
10026 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10027 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10028 
10029 				break;
10030 			}
10031 
10032 			if (amdgpu_dm_connector->max_vfreq -
10033 			    amdgpu_dm_connector->min_vfreq > 10) {
10034 
10035 				freesync_capable = true;
10036 			}
10037 		}
10038 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10039 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10040 		if (i >= 0 && vsdb_info.freesync_supported) {
10041 			timing  = &edid->detailed_timings[i];
10042 			data    = &timing->data.other_data;
10043 
10044 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10045 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10046 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10047 				freesync_capable = true;
10048 
10049 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10050 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10051 		}
10052 	}
10053 
10054 update:
10055 	if (dm_con_state)
10056 		dm_con_state->freesync_capable = freesync_capable;
10057 
10058 	if (connector->vrr_capable_property)
10059 		drm_connector_set_vrr_capable_property(connector,
10060 						       freesync_capable);
10061 }
10062 
10063 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10064 {
10065 	struct amdgpu_device *adev = drm_to_adev(dev);
10066 	struct dc *dc = adev->dm.dc;
10067 	int i;
10068 
10069 	mutex_lock(&adev->dm.dc_lock);
10070 	if (dc->current_state) {
10071 		for (i = 0; i < dc->current_state->stream_count; ++i)
10072 			dc->current_state->streams[i]
10073 				->triggered_crtc_reset.enabled =
10074 				adev->dm.force_timing_sync;
10075 
10076 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10077 		dc_trigger_sync(dc, dc->current_state);
10078 	}
10079 	mutex_unlock(&adev->dm.dc_lock);
10080 }
10081 
10082 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10083 		       uint32_t value, const char *func_name)
10084 {
10085 #ifdef DM_CHECK_ADDR_0
10086 	if (address == 0) {
10087 		DC_ERR("invalid register write. address = 0");
10088 		return;
10089 	}
10090 #endif
10091 	cgs_write_register(ctx->cgs_device, address, value);
10092 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10093 }
10094 
10095 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10096 			  const char *func_name)
10097 {
10098 	uint32_t value;
10099 #ifdef DM_CHECK_ADDR_0
10100 	if (address == 0) {
10101 		DC_ERR("invalid register read; address = 0\n");
10102 		return 0;
10103 	}
10104 #endif
10105 
10106 	if (ctx->dmub_srv &&
10107 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10108 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10109 		ASSERT(false);
10110 		return 0;
10111 	}
10112 
10113 	value = cgs_read_register(ctx->cgs_device, address);
10114 
10115 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10116 
10117 	return value;
10118 }
10119 
10120 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
10121 						struct dc_context *ctx,
10122 						uint8_t status_type,
10123 						uint32_t *operation_result)
10124 {
10125 	struct amdgpu_device *adev = ctx->driver_context;
10126 	int return_status = -1;
10127 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
10128 
10129 	if (is_cmd_aux) {
10130 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10131 			return_status = p_notify->aux_reply.length;
10132 			*operation_result = p_notify->result;
10133 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
10134 			*operation_result = AUX_RET_ERROR_TIMEOUT;
10135 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
10136 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10137 		} else {
10138 			*operation_result = AUX_RET_ERROR_UNKNOWN;
10139 		}
10140 	} else {
10141 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
10142 			return_status = 0;
10143 			*operation_result = p_notify->sc_status;
10144 		} else {
10145 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
10146 		}
10147 	}
10148 
10149 	return return_status;
10150 }
10151 
10152 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
10153 	unsigned int link_index, void *cmd_payload, void *operation_result)
10154 {
10155 	struct amdgpu_device *adev = ctx->driver_context;
10156 	int ret = 0;
10157 
10158 	if (is_cmd_aux) {
10159 		dc_process_dmub_aux_transfer_async(ctx->dc,
10160 			link_index, (struct aux_payload *)cmd_payload);
10161 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
10162 					(struct set_config_cmd_payload *)cmd_payload,
10163 					adev->dm.dmub_notify)) {
10164 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10165 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10166 					(uint32_t *)operation_result);
10167 	}
10168 
10169 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
10170 	if (ret == 0) {
10171 		DRM_ERROR("wait_for_completion_timeout timeout!");
10172 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10173 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
10174 				(uint32_t *)operation_result);
10175 	}
10176 
10177 	if (is_cmd_aux) {
10178 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
10179 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
10180 
10181 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10182 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
10183 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
10184 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
10185 				       adev->dm.dmub_notify->aux_reply.length);
10186 			}
10187 		}
10188 	}
10189 
10190 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
10191 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
10192 			(uint32_t *)operation_result);
10193 }
10194 
10195 /*
10196  * Check whether seamless boot is supported.
10197  *
10198  * So far we only support seamless boot on CHIP_VANGOGH.
10199  * If everything goes well, we may consider expanding
10200  * seamless boot to other ASICs.
10201  */
10202 bool check_seamless_boot_capability(struct amdgpu_device *adev)
10203 {
10204 	switch (adev->asic_type) {
10205 	case CHIP_VANGOGH:
10206 		if (!adev->mman.keep_stolen_vga_memory)
10207 			return true;
10208 		break;
10209 	default:
10210 		break;
10211 	}
10212 
10213 	return false;
10214 }
10215