xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 3ecb3b794e2c1793443b72a968cb09d829c01a10)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140 
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 				struct drm_plane *plane,
143 				unsigned long possible_crtcs,
144 				const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 			       struct drm_plane *plane,
147 			       uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
150 				    uint32_t link_index,
151 				    struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 				  struct amdgpu_encoder *aencoder,
154 				  uint32_t link_index);
155 
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157 
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 				   struct drm_atomic_state *state,
160 				   bool nonblock);
161 
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163 
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 				  struct drm_atomic_state *state);
166 
167 static void handle_cursor_update(struct drm_plane *plane,
168 				 struct drm_plane_state *old_plane_state);
169 
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 
175 
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 	if (crtc >= adev->mode_info.num_crtc)
192 		return 0;
193 	else {
194 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 				acrtc->base.state);
197 
198 
199 		if (acrtc_state->stream == NULL) {
200 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 				  crtc);
202 			return 0;
203 		}
204 
205 		return dc_stream_get_vblank_counter(acrtc_state->stream);
206 	}
207 }
208 
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 				  u32 *vbl, u32 *position)
211 {
212 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
213 
214 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 		return -EINVAL;
216 	else {
217 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 						acrtc->base.state);
220 
221 		if (acrtc_state->stream ==  NULL) {
222 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 				  crtc);
224 			return 0;
225 		}
226 
227 		/*
228 		 * TODO rework base driver to use values directly.
229 		 * for now parse it back into reg-format
230 		 */
231 		dc_stream_get_scanoutpos(acrtc_state->stream,
232 					 &v_blank_start,
233 					 &v_blank_end,
234 					 &h_position,
235 					 &v_position);
236 
237 		*position = v_position | (h_position << 16);
238 		*vbl = v_blank_start | (v_blank_end << 16);
239 	}
240 
241 	return 0;
242 }
243 
244 static bool dm_is_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return true;
248 }
249 
250 static int dm_wait_for_idle(void *handle)
251 {
252 	/* XXX todo */
253 	return 0;
254 }
255 
256 static bool dm_check_soft_reset(void *handle)
257 {
258 	return false;
259 }
260 
261 static int dm_soft_reset(void *handle)
262 {
263 	/* XXX todo */
264 	return 0;
265 }
266 
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 		     int otg_inst)
270 {
271 	struct drm_device *dev = adev->ddev;
272 	struct drm_crtc *crtc;
273 	struct amdgpu_crtc *amdgpu_crtc;
274 
275 	if (otg_inst == -1) {
276 		WARN_ON(1);
277 		return adev->mode_info.crtcs[0];
278 	}
279 
280 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 		amdgpu_crtc = to_amdgpu_crtc(crtc);
282 
283 		if (amdgpu_crtc->otg_inst == otg_inst)
284 			return amdgpu_crtc;
285 	}
286 
287 	return NULL;
288 }
289 
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295 
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 	struct amdgpu_crtc *amdgpu_crtc;
306 	struct common_irq_params *irq_params = interrupt_params;
307 	struct amdgpu_device *adev = irq_params->adev;
308 	unsigned long flags;
309 	struct drm_pending_vblank_event *e;
310 	struct dm_crtc_state *acrtc_state;
311 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 	bool vrr_active;
313 
314 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315 
316 	/* IRQ could occur when in initial stage */
317 	/* TODO work and BO cleanup */
318 	if (amdgpu_crtc == NULL) {
319 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 		return;
321 	}
322 
323 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
324 
325 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 						 amdgpu_crtc->pflip_status,
328 						 AMDGPU_FLIP_SUBMITTED,
329 						 amdgpu_crtc->crtc_id,
330 						 amdgpu_crtc);
331 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 		return;
333 	}
334 
335 	/* page flip completed. */
336 	e = amdgpu_crtc->event;
337 	amdgpu_crtc->event = NULL;
338 
339 	if (!e)
340 		WARN_ON(1);
341 
342 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344 
345 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 	if (!vrr_active ||
347 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 				      &v_blank_end, &hpos, &vpos) ||
349 	    (vpos < v_blank_start)) {
350 		/* Update to correct count and vblank timestamp if racing with
351 		 * vblank irq. This also updates to the correct vblank timestamp
352 		 * even in VRR mode, as scanout is past the front-porch atm.
353 		 */
354 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355 
356 		/* Wake up userspace by sending the pageflip event with proper
357 		 * count and timestamp of vblank of flip completion.
358 		 */
359 		if (e) {
360 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361 
362 			/* Event sent, so done with vblank for this flip */
363 			drm_crtc_vblank_put(&amdgpu_crtc->base);
364 		}
365 	} else if (e) {
366 		/* VRR active and inside front-porch: vblank count and
367 		 * timestamp for pageflip event will only be up to date after
368 		 * drm_crtc_handle_vblank() has been executed from late vblank
369 		 * irq handler after start of back-porch (vline 0). We queue the
370 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 		 * updated timestamp and count, once it runs after us.
372 		 *
373 		 * We need to open-code this instead of using the helper
374 		 * drm_crtc_arm_vblank_event(), as that helper would
375 		 * call drm_crtc_accurate_vblank_count(), which we must
376 		 * not call in VRR mode while we are in front-porch!
377 		 */
378 
379 		/* sequence will be replaced by real count during send-out. */
380 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 		e->pipe = amdgpu_crtc->crtc_id;
382 
383 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 		e = NULL;
385 	}
386 
387 	/* Keep track of vblank of this flip for flip throttling. We use the
388 	 * cooked hw counter, as that one incremented at start of this vblank
389 	 * of pageflip completion, so last_flip_vblank is the forbidden count
390 	 * for queueing new pageflips if vsync + VRR is enabled.
391 	 */
392 	amdgpu_crtc->last_flip_vblank =
393 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394 
395 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397 
398 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 			 vrr_active, (int) !e);
401 }
402 
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 	struct common_irq_params *irq_params = interrupt_params;
406 	struct amdgpu_device *adev = irq_params->adev;
407 	struct amdgpu_crtc *acrtc;
408 	struct dm_crtc_state *acrtc_state;
409 	unsigned long flags;
410 
411 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 
413 	if (acrtc) {
414 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
415 
416 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 			      acrtc->crtc_id,
418 			      amdgpu_dm_vrr_active(acrtc_state));
419 
420 		/* Core vblank handling is done here after end of front-porch in
421 		 * vrr mode, as vblank timestamping will give valid results
422 		 * while now done after front-porch. This will also deliver
423 		 * page-flip completion events that have been queued to us
424 		 * if a pageflip happened inside front-porch.
425 		 */
426 		if (amdgpu_dm_vrr_active(acrtc_state)) {
427 			drm_crtc_handle_vblank(&acrtc->base);
428 
429 			/* BTR processing for pre-DCE12 ASICs */
430 			if (acrtc_state->stream &&
431 			    adev->family < AMDGPU_FAMILY_AI) {
432 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 				mod_freesync_handle_v_update(
434 				    adev->dm.freesync_module,
435 				    acrtc_state->stream,
436 				    &acrtc_state->vrr_params);
437 
438 				dc_stream_adjust_vmin_vmax(
439 				    adev->dm.dc,
440 				    acrtc_state->stream,
441 				    &acrtc_state->vrr_params.adjust);
442 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 			}
444 		}
445 	}
446 }
447 
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct dm_crtc_state *acrtc_state;
461 	unsigned long flags;
462 
463 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 	if (!acrtc)
465 		return;
466 
467 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
468 
469 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 			 amdgpu_dm_vrr_active(acrtc_state),
471 			 acrtc_state->active_planes);
472 
473 	/**
474 	 * Core vblank handling at start of front-porch is only possible
475 	 * in non-vrr mode, as only there vblank timestamping will give
476 	 * valid results while done in front-porch. Otherwise defer it
477 	 * to dm_vupdate_high_irq after end of front-porch.
478 	 */
479 	if (!amdgpu_dm_vrr_active(acrtc_state))
480 		drm_crtc_handle_vblank(&acrtc->base);
481 
482 	/**
483 	 * Following stuff must happen at start of vblank, for crc
484 	 * computation and below-the-range btr support in vrr mode.
485 	 */
486 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487 
488 	/* BTR updates need to happen before VUPDATE on Vega and above. */
489 	if (adev->family < AMDGPU_FAMILY_AI)
490 		return;
491 
492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
493 
494 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 		mod_freesync_handle_v_update(adev->dm.freesync_module,
497 					     acrtc_state->stream,
498 					     &acrtc_state->vrr_params);
499 
500 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 					   &acrtc_state->vrr_params.adjust);
502 	}
503 
504 	/*
505 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 	 * In that case, pageflip completion interrupts won't fire and pageflip
507 	 * completion events won't get delivered. Prevent this by sending
508 	 * pending pageflip events from here if a flip is still pending.
509 	 *
510 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 	 * avoid race conditions between flip programming and completion,
512 	 * which could cause too early flip completion events.
513 	 */
514 	if (adev->family >= AMDGPU_FAMILY_RV &&
515 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 	    acrtc_state->active_planes == 0) {
517 		if (acrtc->event) {
518 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 			acrtc->event = NULL;
520 			drm_crtc_vblank_put(&acrtc->base);
521 		}
522 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 	}
524 
525 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527 
528 static int dm_set_clockgating_state(void *handle,
529 		  enum amd_clockgating_state state)
530 {
531 	return 0;
532 }
533 
534 static int dm_set_powergating_state(void *handle,
535 		  enum amd_powergating_state state)
536 {
537 	return 0;
538 }
539 
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542 
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 	struct drm_device *dev = connector->dev;
547 	struct amdgpu_device *adev = dev->dev_private;
548 	struct dm_comressor_info *compressor = &adev->dm.compressor;
549 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 	struct drm_display_mode *mode;
551 	unsigned long max_size = 0;
552 
553 	if (adev->dm.dc->fbc_compressor == NULL)
554 		return;
555 
556 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 		return;
558 
559 	if (compressor->bo_ptr)
560 		return;
561 
562 
563 	list_for_each_entry(mode, &connector->modes, head) {
564 		if (max_size < mode->htotal * mode->vtotal)
565 			max_size = mode->htotal * mode->vtotal;
566 	}
567 
568 	if (max_size) {
569 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 			    &compressor->gpu_addr, &compressor->cpu_addr);
572 
573 		if (r)
574 			DRM_ERROR("DM: Failed to initialize FBC\n");
575 		else {
576 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 		}
579 
580 	}
581 
582 }
583 
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 					  int pipe, bool *enabled,
586 					  unsigned char *buf, int max_bytes)
587 {
588 	struct drm_device *dev = dev_get_drvdata(kdev);
589 	struct amdgpu_device *adev = dev->dev_private;
590 	struct drm_connector *connector;
591 	struct drm_connector_list_iter conn_iter;
592 	struct amdgpu_dm_connector *aconnector;
593 	int ret = 0;
594 
595 	*enabled = false;
596 
597 	mutex_lock(&adev->dm.audio_lock);
598 
599 	drm_connector_list_iter_begin(dev, &conn_iter);
600 	drm_for_each_connector_iter(connector, &conn_iter) {
601 		aconnector = to_amdgpu_dm_connector(connector);
602 		if (aconnector->audio_inst != port)
603 			continue;
604 
605 		*enabled = true;
606 		ret = drm_eld_size(connector->eld);
607 		memcpy(buf, connector->eld, min(max_bytes, ret));
608 
609 		break;
610 	}
611 	drm_connector_list_iter_end(&conn_iter);
612 
613 	mutex_unlock(&adev->dm.audio_lock);
614 
615 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616 
617 	return ret;
618 }
619 
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 	.get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623 
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 				       struct device *hda_kdev, void *data)
626 {
627 	struct drm_device *dev = dev_get_drvdata(kdev);
628 	struct amdgpu_device *adev = dev->dev_private;
629 	struct drm_audio_component *acomp = data;
630 
631 	acomp->ops = &amdgpu_dm_audio_component_ops;
632 	acomp->dev = kdev;
633 	adev->dm.audio_component = acomp;
634 
635 	return 0;
636 }
637 
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 					  struct device *hda_kdev, void *data)
640 {
641 	struct drm_device *dev = dev_get_drvdata(kdev);
642 	struct amdgpu_device *adev = dev->dev_private;
643 	struct drm_audio_component *acomp = data;
644 
645 	acomp->ops = NULL;
646 	acomp->dev = NULL;
647 	adev->dm.audio_component = NULL;
648 }
649 
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 	.bind	= amdgpu_dm_audio_component_bind,
652 	.unbind	= amdgpu_dm_audio_component_unbind,
653 };
654 
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 	int i, ret;
658 
659 	if (!amdgpu_audio)
660 		return 0;
661 
662 	adev->mode_info.audio.enabled = true;
663 
664 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665 
666 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 		adev->mode_info.audio.pin[i].channels = -1;
668 		adev->mode_info.audio.pin[i].rate = -1;
669 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 		adev->mode_info.audio.pin[i].status_bits = 0;
671 		adev->mode_info.audio.pin[i].category_code = 0;
672 		adev->mode_info.audio.pin[i].connected = false;
673 		adev->mode_info.audio.pin[i].id =
674 			adev->dm.dc->res_pool->audios[i]->inst;
675 		adev->mode_info.audio.pin[i].offset = 0;
676 	}
677 
678 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 	if (ret < 0)
680 		return ret;
681 
682 	adev->dm.audio_registered = true;
683 
684 	return 0;
685 }
686 
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 	if (!amdgpu_audio)
690 		return;
691 
692 	if (!adev->mode_info.audio.enabled)
693 		return;
694 
695 	if (adev->dm.audio_registered) {
696 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 		adev->dm.audio_registered = false;
698 	}
699 
700 	/* TODO: Disable audio? */
701 
702 	adev->mode_info.audio.enabled = false;
703 }
704 
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 	struct drm_audio_component *acomp = adev->dm.audio_component;
708 
709 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711 
712 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 						 pin, -1);
714 	}
715 }
716 
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 	const struct dmcub_firmware_header_v1_0 *hdr;
720 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 	struct abm *abm = adev->dm.dc->res_pool->abm;
725 	struct dmub_srv_hw_params hw_params;
726 	enum dmub_status status;
727 	const unsigned char *fw_inst_const, *fw_bss_data;
728 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 	bool has_hw_support;
730 
731 	if (!dmub_srv)
732 		/* DMUB isn't supported on the ASIC. */
733 		return 0;
734 
735 	if (!fb_info) {
736 		DRM_ERROR("No framebuffer info for DMUB service.\n");
737 		return -EINVAL;
738 	}
739 
740 	if (!dmub_fw) {
741 		/* Firmware required for DMUB support. */
742 		DRM_ERROR("No firmware provided for DMUB.\n");
743 		return -EINVAL;
744 	}
745 
746 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 	if (status != DMUB_STATUS_OK) {
748 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 		return -EINVAL;
750 	}
751 
752 	if (!has_hw_support) {
753 		DRM_INFO("DMUB unsupported on ASIC\n");
754 		return 0;
755 	}
756 
757 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758 
759 	fw_inst_const = dmub_fw->data +
760 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 			PSP_HEADER_BYTES;
762 
763 	fw_bss_data = dmub_fw->data +
764 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 		      le32_to_cpu(hdr->inst_const_bytes);
766 
767 	/* Copy firmware and bios info into FB memory. */
768 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770 
771 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772 
773 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 	 * amdgpu_ucode_init_single_fw will load dmub firmware
775 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 	 * will be done by dm_dmub_hw_init
777 	 */
778 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 				fw_inst_const_size);
781 	}
782 
783 	if (fw_bss_data_size)
784 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 		       fw_bss_data, fw_bss_data_size);
786 
787 	/* Copy firmware bios info into FB memory. */
788 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 	       adev->bios_size);
790 
791 	/* Reset regions that need to be reset. */
792 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794 
795 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797 
798 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800 
801 	/* Initialize hardware. */
802 	memset(&hw_params, 0, sizeof(hw_params));
803 	hw_params.fb_base = adev->gmc.fb_start;
804 	hw_params.fb_offset = adev->gmc.aper_base;
805 
806 	/* backdoor load firmware and trigger dmub running */
807 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 		hw_params.load_inst_const = true;
809 
810 	if (dmcu)
811 		hw_params.psp_version = dmcu->psp_version;
812 
813 	for (i = 0; i < fb_info->num_fb; ++i)
814 		hw_params.fb[i] = &fb_info->fb[i];
815 
816 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 	if (status != DMUB_STATUS_OK) {
818 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 		return -EINVAL;
820 	}
821 
822 	/* Wait for firmware load to finish. */
823 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 	if (status != DMUB_STATUS_OK)
825 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826 
827 	/* Init DMCU and ABM if available. */
828 	if (dmcu && abm) {
829 		dmcu->funcs->dmcu_init(dmcu);
830 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 	}
832 
833 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 	if (!adev->dm.dc->ctx->dmub_srv) {
835 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 		return -ENOMEM;
837 	}
838 
839 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 		 adev->dm.dmcub_fw_version);
841 
842 	return 0;
843 }
844 
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 	struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 	struct dc_callback_init init_params;
850 #endif
851 	int r;
852 
853 	adev->dm.ddev = adev->ddev;
854 	adev->dm.adev = adev;
855 
856 	/* Zero all the fields */
857 	memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 	memset(&init_params, 0, sizeof(init_params));
860 #endif
861 
862 	mutex_init(&adev->dm.dc_lock);
863 	mutex_init(&adev->dm.audio_lock);
864 
865 	if(amdgpu_dm_irq_init(adev)) {
866 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 		goto error;
868 	}
869 
870 	init_data.asic_id.chip_family = adev->family;
871 
872 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874 
875 	init_data.asic_id.vram_width = adev->gmc.vram_width;
876 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 	init_data.asic_id.atombios_base_address =
878 		adev->mode_info.atom_context->bios;
879 
880 	init_data.driver = adev;
881 
882 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883 
884 	if (!adev->dm.cgs_device) {
885 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 		goto error;
887 	}
888 
889 	init_data.cgs_device = adev->dm.cgs_device;
890 
891 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892 
893 	switch (adev->asic_type) {
894 	case CHIP_CARRIZO:
895 	case CHIP_STONEY:
896 	case CHIP_RAVEN:
897 	case CHIP_RENOIR:
898 		init_data.flags.gpu_vm_support = true;
899 		break;
900 	default:
901 		break;
902 	}
903 
904 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 		init_data.flags.fbc_support = true;
906 
907 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 		init_data.flags.multi_mon_pp_mclk_switch = true;
909 
910 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 		init_data.flags.disable_fractional_pwm = true;
912 
913 	init_data.flags.power_down_display_on_boot = true;
914 
915 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916 
917 	/* Display Core create. */
918 	adev->dm.dc = dc_create(&init_data);
919 
920 	if (adev->dm.dc) {
921 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 	} else {
923 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 		goto error;
925 	}
926 
927 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 	}
931 
932 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 		adev->dm.dc->debug.disable_stutter = true;
937 
938 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 		adev->dm.dc->debug.disable_dsc = true;
940 
941 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 		adev->dm.dc->debug.disable_clock_gate = true;
943 
944 	r = dm_dmub_hw_init(adev);
945 	if (r) {
946 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 		goto error;
948 	}
949 
950 	dc_hardware_init(adev->dm.dc);
951 
952 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 	if (!adev->dm.freesync_module) {
954 		DRM_ERROR(
955 		"amdgpu: failed to initialize freesync_module.\n");
956 	} else
957 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 				adev->dm.freesync_module);
959 
960 	amdgpu_dm_init_color_mod();
961 
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 	if (adev->asic_type >= CHIP_RAVEN) {
964 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965 
966 		if (!adev->dm.hdcp_workqueue)
967 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 		else
969 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 
971 		dc_init_callbacks(adev->dm.dc, &init_params);
972 	}
973 #endif
974 	if (amdgpu_dm_initialize_drm_device(adev)) {
975 		DRM_ERROR(
976 		"amdgpu: failed to initialize sw for display support.\n");
977 		goto error;
978 	}
979 
980 	/* Update the actual used number of crtc */
981 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 
983 	/* create fake encoders for MST */
984 	dm_dp_create_fake_mst_encoders(adev);
985 
986 	/* TODO: Add_display_info? */
987 
988 	/* TODO use dynamic cursor width */
989 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991 
992 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 		DRM_ERROR(
994 		"amdgpu: failed to initialize sw for display support.\n");
995 		goto error;
996 	}
997 
998 	DRM_DEBUG_DRIVER("KMS initialized.\n");
999 
1000 	return 0;
1001 error:
1002 	amdgpu_dm_fini(adev);
1003 
1004 	return -EINVAL;
1005 }
1006 
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 	}
1014 
1015 	amdgpu_dm_audio_fini(adev);
1016 
1017 	amdgpu_dm_destroy_drm_device(&adev->dm);
1018 
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 	if (adev->dm.hdcp_workqueue) {
1021 		hdcp_destroy(adev->dm.hdcp_workqueue);
1022 		adev->dm.hdcp_workqueue = NULL;
1023 	}
1024 
1025 	if (adev->dm.dc)
1026 		dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 	if (adev->dm.dc->ctx->dmub_srv) {
1029 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 		adev->dm.dc->ctx->dmub_srv = NULL;
1031 	}
1032 
1033 	if (adev->dm.dmub_bo)
1034 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 				      &adev->dm.dmub_bo_gpu_addr,
1036 				      &adev->dm.dmub_bo_cpu_addr);
1037 
1038 	/* DC Destroy TODO: Replace destroy DAL */
1039 	if (adev->dm.dc)
1040 		dc_destroy(&adev->dm.dc);
1041 	/*
1042 	 * TODO: pageflip, vlank interrupt
1043 	 *
1044 	 * amdgpu_dm_irq_fini(adev);
1045 	 */
1046 
1047 	if (adev->dm.cgs_device) {
1048 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 		adev->dm.cgs_device = NULL;
1050 	}
1051 	if (adev->dm.freesync_module) {
1052 		mod_freesync_destroy(adev->dm.freesync_module);
1053 		adev->dm.freesync_module = NULL;
1054 	}
1055 
1056 	mutex_destroy(&adev->dm.audio_lock);
1057 	mutex_destroy(&adev->dm.dc_lock);
1058 
1059 	return;
1060 }
1061 
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 	const char *fw_name_dmcu = NULL;
1065 	int r;
1066 	const struct dmcu_firmware_header_v1_0 *hdr;
1067 
1068 	switch(adev->asic_type) {
1069 #if defined(CONFIG_DRM_AMD_DC_SI)
1070 	case CHIP_TAHITI:
1071 	case CHIP_PITCAIRN:
1072 	case CHIP_VERDE:
1073 	case CHIP_OLAND:
1074 #endif
1075 	case CHIP_BONAIRE:
1076 	case CHIP_HAWAII:
1077 	case CHIP_KAVERI:
1078 	case CHIP_KABINI:
1079 	case CHIP_MULLINS:
1080 	case CHIP_TONGA:
1081 	case CHIP_FIJI:
1082 	case CHIP_CARRIZO:
1083 	case CHIP_STONEY:
1084 	case CHIP_POLARIS11:
1085 	case CHIP_POLARIS10:
1086 	case CHIP_POLARIS12:
1087 	case CHIP_VEGAM:
1088 	case CHIP_VEGA10:
1089 	case CHIP_VEGA12:
1090 	case CHIP_VEGA20:
1091 	case CHIP_NAVI10:
1092 	case CHIP_NAVI14:
1093 	case CHIP_RENOIR:
1094 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1095 	case CHIP_SIENNA_CICHLID:
1096 	case CHIP_NAVY_FLOUNDER:
1097 #endif
1098 		return 0;
1099 	case CHIP_NAVI12:
1100 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1101 		break;
1102 	case CHIP_RAVEN:
1103 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1104 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1105 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1106 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1107 		else
1108 			return 0;
1109 		break;
1110 	default:
1111 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1112 		return -EINVAL;
1113 	}
1114 
1115 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1116 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1117 		return 0;
1118 	}
1119 
1120 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1121 	if (r == -ENOENT) {
1122 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1123 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1124 		adev->dm.fw_dmcu = NULL;
1125 		return 0;
1126 	}
1127 	if (r) {
1128 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1129 			fw_name_dmcu);
1130 		return r;
1131 	}
1132 
1133 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1134 	if (r) {
1135 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1136 			fw_name_dmcu);
1137 		release_firmware(adev->dm.fw_dmcu);
1138 		adev->dm.fw_dmcu = NULL;
1139 		return r;
1140 	}
1141 
1142 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1143 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1144 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1145 	adev->firmware.fw_size +=
1146 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1147 
1148 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1149 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1150 	adev->firmware.fw_size +=
1151 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1152 
1153 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1154 
1155 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1156 
1157 	return 0;
1158 }
1159 
1160 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1161 {
1162 	struct amdgpu_device *adev = ctx;
1163 
1164 	return dm_read_reg(adev->dm.dc->ctx, address);
1165 }
1166 
1167 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1168 				     uint32_t value)
1169 {
1170 	struct amdgpu_device *adev = ctx;
1171 
1172 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1173 }
1174 
1175 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1176 {
1177 	struct dmub_srv_create_params create_params;
1178 	struct dmub_srv_region_params region_params;
1179 	struct dmub_srv_region_info region_info;
1180 	struct dmub_srv_fb_params fb_params;
1181 	struct dmub_srv_fb_info *fb_info;
1182 	struct dmub_srv *dmub_srv;
1183 	const struct dmcub_firmware_header_v1_0 *hdr;
1184 	const char *fw_name_dmub;
1185 	enum dmub_asic dmub_asic;
1186 	enum dmub_status status;
1187 	int r;
1188 
1189 	switch (adev->asic_type) {
1190 	case CHIP_RENOIR:
1191 		dmub_asic = DMUB_ASIC_DCN21;
1192 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1193 		break;
1194 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1195 	case CHIP_SIENNA_CICHLID:
1196 		dmub_asic = DMUB_ASIC_DCN30;
1197 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1198 		break;
1199 	case CHIP_NAVY_FLOUNDER:
1200 		dmub_asic = DMUB_ASIC_DCN30;
1201 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1202 		break;
1203 #endif
1204 
1205 	default:
1206 		/* ASIC doesn't support DMUB. */
1207 		return 0;
1208 	}
1209 
1210 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1211 	if (r) {
1212 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1213 		return 0;
1214 	}
1215 
1216 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1217 	if (r) {
1218 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1219 		return 0;
1220 	}
1221 
1222 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1223 
1224 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1225 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1226 			AMDGPU_UCODE_ID_DMCUB;
1227 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1228 			adev->dm.dmub_fw;
1229 		adev->firmware.fw_size +=
1230 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1231 
1232 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1233 			 adev->dm.dmcub_fw_version);
1234 	}
1235 
1236 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1237 
1238 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1239 	dmub_srv = adev->dm.dmub_srv;
1240 
1241 	if (!dmub_srv) {
1242 		DRM_ERROR("Failed to allocate DMUB service!\n");
1243 		return -ENOMEM;
1244 	}
1245 
1246 	memset(&create_params, 0, sizeof(create_params));
1247 	create_params.user_ctx = adev;
1248 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1249 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1250 	create_params.asic = dmub_asic;
1251 
1252 	/* Create the DMUB service. */
1253 	status = dmub_srv_create(dmub_srv, &create_params);
1254 	if (status != DMUB_STATUS_OK) {
1255 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1256 		return -EINVAL;
1257 	}
1258 
1259 	/* Calculate the size of all the regions for the DMUB service. */
1260 	memset(&region_params, 0, sizeof(region_params));
1261 
1262 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1263 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1264 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1265 	region_params.vbios_size = adev->bios_size;
1266 	region_params.fw_bss_data = region_params.bss_data_size ?
1267 		adev->dm.dmub_fw->data +
1268 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1269 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1270 	region_params.fw_inst_const =
1271 		adev->dm.dmub_fw->data +
1272 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1273 		PSP_HEADER_BYTES;
1274 
1275 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1276 					   &region_info);
1277 
1278 	if (status != DMUB_STATUS_OK) {
1279 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1280 		return -EINVAL;
1281 	}
1282 
1283 	/*
1284 	 * Allocate a framebuffer based on the total size of all the regions.
1285 	 * TODO: Move this into GART.
1286 	 */
1287 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1288 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1289 				    &adev->dm.dmub_bo_gpu_addr,
1290 				    &adev->dm.dmub_bo_cpu_addr);
1291 	if (r)
1292 		return r;
1293 
1294 	/* Rebase the regions on the framebuffer address. */
1295 	memset(&fb_params, 0, sizeof(fb_params));
1296 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1297 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1298 	fb_params.region_info = &region_info;
1299 
1300 	adev->dm.dmub_fb_info =
1301 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1302 	fb_info = adev->dm.dmub_fb_info;
1303 
1304 	if (!fb_info) {
1305 		DRM_ERROR(
1306 			"Failed to allocate framebuffer info for DMUB service!\n");
1307 		return -ENOMEM;
1308 	}
1309 
1310 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1311 	if (status != DMUB_STATUS_OK) {
1312 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1313 		return -EINVAL;
1314 	}
1315 
1316 	return 0;
1317 }
1318 
1319 static int dm_sw_init(void *handle)
1320 {
1321 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322 	int r;
1323 
1324 	r = dm_dmub_sw_init(adev);
1325 	if (r)
1326 		return r;
1327 
1328 	return load_dmcu_fw(adev);
1329 }
1330 
1331 static int dm_sw_fini(void *handle)
1332 {
1333 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1334 
1335 	kfree(adev->dm.dmub_fb_info);
1336 	adev->dm.dmub_fb_info = NULL;
1337 
1338 	if (adev->dm.dmub_srv) {
1339 		dmub_srv_destroy(adev->dm.dmub_srv);
1340 		adev->dm.dmub_srv = NULL;
1341 	}
1342 
1343 	release_firmware(adev->dm.dmub_fw);
1344 	adev->dm.dmub_fw = NULL;
1345 
1346 	release_firmware(adev->dm.fw_dmcu);
1347 	adev->dm.fw_dmcu = NULL;
1348 
1349 	return 0;
1350 }
1351 
1352 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1353 {
1354 	struct amdgpu_dm_connector *aconnector;
1355 	struct drm_connector *connector;
1356 	struct drm_connector_list_iter iter;
1357 	int ret = 0;
1358 
1359 	drm_connector_list_iter_begin(dev, &iter);
1360 	drm_for_each_connector_iter(connector, &iter) {
1361 		aconnector = to_amdgpu_dm_connector(connector);
1362 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1363 		    aconnector->mst_mgr.aux) {
1364 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1365 					 aconnector,
1366 					 aconnector->base.base.id);
1367 
1368 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1369 			if (ret < 0) {
1370 				DRM_ERROR("DM_MST: Failed to start MST\n");
1371 				aconnector->dc_link->type =
1372 					dc_connection_single;
1373 				break;
1374 			}
1375 		}
1376 	}
1377 	drm_connector_list_iter_end(&iter);
1378 
1379 	return ret;
1380 }
1381 
1382 static int dm_late_init(void *handle)
1383 {
1384 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1385 
1386 	struct dmcu_iram_parameters params;
1387 	unsigned int linear_lut[16];
1388 	int i;
1389 	struct dmcu *dmcu = NULL;
1390 	bool ret = true;
1391 
1392 	if (!adev->dm.fw_dmcu)
1393 		return detect_mst_link_for_all_connectors(adev->ddev);
1394 
1395 	dmcu = adev->dm.dc->res_pool->dmcu;
1396 
1397 	for (i = 0; i < 16; i++)
1398 		linear_lut[i] = 0xFFFF * i / 15;
1399 
1400 	params.set = 0;
1401 	params.backlight_ramping_start = 0xCCCC;
1402 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1403 	params.backlight_lut_array_size = 16;
1404 	params.backlight_lut_array = linear_lut;
1405 
1406 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1407 	 * 0xFFFF x 0.01 = 0x28F
1408 	 */
1409 	params.min_abm_backlight = 0x28F;
1410 
1411 	/* In the case where abm is implemented on dmcub,
1412 	 * dmcu object will be null.
1413 	 * ABM 2.4 and up are implemented on dmcub.
1414 	 */
1415 	if (dmcu)
1416 		ret = dmcu_load_iram(dmcu, params);
1417 	else if (adev->dm.dc->ctx->dmub_srv)
1418 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1419 
1420 	if (!ret)
1421 		return -EINVAL;
1422 
1423 	return detect_mst_link_for_all_connectors(adev->ddev);
1424 }
1425 
1426 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1427 {
1428 	struct amdgpu_dm_connector *aconnector;
1429 	struct drm_connector *connector;
1430 	struct drm_connector_list_iter iter;
1431 	struct drm_dp_mst_topology_mgr *mgr;
1432 	int ret;
1433 	bool need_hotplug = false;
1434 
1435 	drm_connector_list_iter_begin(dev, &iter);
1436 	drm_for_each_connector_iter(connector, &iter) {
1437 		aconnector = to_amdgpu_dm_connector(connector);
1438 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1439 		    aconnector->mst_port)
1440 			continue;
1441 
1442 		mgr = &aconnector->mst_mgr;
1443 
1444 		if (suspend) {
1445 			drm_dp_mst_topology_mgr_suspend(mgr);
1446 		} else {
1447 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1448 			if (ret < 0) {
1449 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1450 				need_hotplug = true;
1451 			}
1452 		}
1453 	}
1454 	drm_connector_list_iter_end(&iter);
1455 
1456 	if (need_hotplug)
1457 		drm_kms_helper_hotplug_event(dev);
1458 }
1459 
1460 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1461 {
1462 	struct smu_context *smu = &adev->smu;
1463 	int ret = 0;
1464 
1465 	if (!is_support_sw_smu(adev))
1466 		return 0;
1467 
1468 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1469 	 * on window driver dc implementation.
1470 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1471 	 * should be passed to smu during boot up and resume from s3.
1472 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1473 	 * dcn20_resource_construct
1474 	 * then call pplib functions below to pass the settings to smu:
1475 	 * smu_set_watermarks_for_clock_ranges
1476 	 * smu_set_watermarks_table
1477 	 * navi10_set_watermarks_table
1478 	 * smu_write_watermarks_table
1479 	 *
1480 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1481 	 * dc has implemented different flow for window driver:
1482 	 * dc_hardware_init / dc_set_power_state
1483 	 * dcn10_init_hw
1484 	 * notify_wm_ranges
1485 	 * set_wm_ranges
1486 	 * -- Linux
1487 	 * smu_set_watermarks_for_clock_ranges
1488 	 * renoir_set_watermarks_table
1489 	 * smu_write_watermarks_table
1490 	 *
1491 	 * For Linux,
1492 	 * dc_hardware_init -> amdgpu_dm_init
1493 	 * dc_set_power_state --> dm_resume
1494 	 *
1495 	 * therefore, this function apply to navi10/12/14 but not Renoir
1496 	 * *
1497 	 */
1498 	switch(adev->asic_type) {
1499 	case CHIP_NAVI10:
1500 	case CHIP_NAVI14:
1501 	case CHIP_NAVI12:
1502 		break;
1503 	default:
1504 		return 0;
1505 	}
1506 
1507 	ret = smu_write_watermarks_table(smu);
1508 	if (ret) {
1509 		DRM_ERROR("Failed to update WMTABLE!\n");
1510 		return ret;
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 	/* Create DAL display manager */
1540 	amdgpu_dm_init(adev);
1541 	amdgpu_dm_hpd_init(adev);
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557 
1558 	amdgpu_dm_hpd_fini(adev);
1559 
1560 	amdgpu_dm_irq_fini(adev);
1561 	amdgpu_dm_fini(adev);
1562 	return 0;
1563 }
1564 
1565 
1566 static int dm_enable_vblank(struct drm_crtc *crtc);
1567 static void dm_disable_vblank(struct drm_crtc *crtc);
1568 
1569 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1570 				 struct dc_state *state, bool enable)
1571 {
1572 	enum dc_irq_source irq_source;
1573 	struct amdgpu_crtc *acrtc;
1574 	int rc = -EBUSY;
1575 	int i = 0;
1576 
1577 	for (i = 0; i < state->stream_count; i++) {
1578 		acrtc = get_crtc_by_otg_inst(
1579 				adev, state->stream_status[i].primary_otg_inst);
1580 
1581 		if (acrtc && state->stream_status[i].plane_count != 0) {
1582 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1583 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1584 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1585 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1586 			if (rc)
1587 				DRM_WARN("Failed to %s pflip interrupts\n",
1588 					 enable ? "enable" : "disable");
1589 
1590 			if (enable) {
1591 				rc = dm_enable_vblank(&acrtc->base);
1592 				if (rc)
1593 					DRM_WARN("Failed to enable vblank interrupts\n");
1594 			} else {
1595 				dm_disable_vblank(&acrtc->base);
1596 			}
1597 
1598 		}
1599 	}
1600 
1601 }
1602 
1603 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1604 {
1605 	struct dc_state *context = NULL;
1606 	enum dc_status res = DC_ERROR_UNEXPECTED;
1607 	int i;
1608 	struct dc_stream_state *del_streams[MAX_PIPES];
1609 	int del_streams_count = 0;
1610 
1611 	memset(del_streams, 0, sizeof(del_streams));
1612 
1613 	context = dc_create_state(dc);
1614 	if (context == NULL)
1615 		goto context_alloc_fail;
1616 
1617 	dc_resource_state_copy_construct_current(dc, context);
1618 
1619 	/* First remove from context all streams */
1620 	for (i = 0; i < context->stream_count; i++) {
1621 		struct dc_stream_state *stream = context->streams[i];
1622 
1623 		del_streams[del_streams_count++] = stream;
1624 	}
1625 
1626 	/* Remove all planes for removed streams and then remove the streams */
1627 	for (i = 0; i < del_streams_count; i++) {
1628 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1629 			res = DC_FAIL_DETACH_SURFACES;
1630 			goto fail;
1631 		}
1632 
1633 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1634 		if (res != DC_OK)
1635 			goto fail;
1636 	}
1637 
1638 
1639 	res = dc_validate_global_state(dc, context, false);
1640 
1641 	if (res != DC_OK) {
1642 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1643 		goto fail;
1644 	}
1645 
1646 	res = dc_commit_state(dc, context);
1647 
1648 fail:
1649 	dc_release_state(context);
1650 
1651 context_alloc_fail:
1652 	return res;
1653 }
1654 
1655 static int dm_suspend(void *handle)
1656 {
1657 	struct amdgpu_device *adev = handle;
1658 	struct amdgpu_display_manager *dm = &adev->dm;
1659 	int ret = 0;
1660 
1661 	if (amdgpu_in_reset(adev)) {
1662 		mutex_lock(&dm->dc_lock);
1663 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1664 
1665 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1666 
1667 		amdgpu_dm_commit_zero_streams(dm->dc);
1668 
1669 		amdgpu_dm_irq_suspend(adev);
1670 
1671 		return ret;
1672 	}
1673 
1674 	WARN_ON(adev->dm.cached_state);
1675 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1676 
1677 	s3_handle_mst(adev->ddev, true);
1678 
1679 	amdgpu_dm_irq_suspend(adev);
1680 
1681 
1682 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1683 
1684 	return 0;
1685 }
1686 
1687 static struct amdgpu_dm_connector *
1688 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1689 					     struct drm_crtc *crtc)
1690 {
1691 	uint32_t i;
1692 	struct drm_connector_state *new_con_state;
1693 	struct drm_connector *connector;
1694 	struct drm_crtc *crtc_from_state;
1695 
1696 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1697 		crtc_from_state = new_con_state->crtc;
1698 
1699 		if (crtc_from_state == crtc)
1700 			return to_amdgpu_dm_connector(connector);
1701 	}
1702 
1703 	return NULL;
1704 }
1705 
1706 static void emulated_link_detect(struct dc_link *link)
1707 {
1708 	struct dc_sink_init_data sink_init_data = { 0 };
1709 	struct display_sink_capability sink_caps = { 0 };
1710 	enum dc_edid_status edid_status;
1711 	struct dc_context *dc_ctx = link->ctx;
1712 	struct dc_sink *sink = NULL;
1713 	struct dc_sink *prev_sink = NULL;
1714 
1715 	link->type = dc_connection_none;
1716 	prev_sink = link->local_sink;
1717 
1718 	if (prev_sink != NULL)
1719 		dc_sink_retain(prev_sink);
1720 
1721 	switch (link->connector_signal) {
1722 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1723 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1725 		break;
1726 	}
1727 
1728 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1729 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1731 		break;
1732 	}
1733 
1734 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1735 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1737 		break;
1738 	}
1739 
1740 	case SIGNAL_TYPE_LVDS: {
1741 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1742 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1743 		break;
1744 	}
1745 
1746 	case SIGNAL_TYPE_EDP: {
1747 		sink_caps.transaction_type =
1748 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1749 		sink_caps.signal = SIGNAL_TYPE_EDP;
1750 		break;
1751 	}
1752 
1753 	case SIGNAL_TYPE_DISPLAY_PORT: {
1754 		sink_caps.transaction_type =
1755 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1756 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1757 		break;
1758 	}
1759 
1760 	default:
1761 		DC_ERROR("Invalid connector type! signal:%d\n",
1762 			link->connector_signal);
1763 		return;
1764 	}
1765 
1766 	sink_init_data.link = link;
1767 	sink_init_data.sink_signal = sink_caps.signal;
1768 
1769 	sink = dc_sink_create(&sink_init_data);
1770 	if (!sink) {
1771 		DC_ERROR("Failed to create sink!\n");
1772 		return;
1773 	}
1774 
1775 	/* dc_sink_create returns a new reference */
1776 	link->local_sink = sink;
1777 
1778 	edid_status = dm_helpers_read_local_edid(
1779 			link->ctx,
1780 			link,
1781 			sink);
1782 
1783 	if (edid_status != EDID_OK)
1784 		DC_ERROR("Failed to read EDID");
1785 
1786 }
1787 
1788 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1789 				     struct amdgpu_display_manager *dm)
1790 {
1791 	struct {
1792 		struct dc_surface_update surface_updates[MAX_SURFACES];
1793 		struct dc_plane_info plane_infos[MAX_SURFACES];
1794 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1795 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1796 		struct dc_stream_update stream_update;
1797 	} * bundle;
1798 	int k, m;
1799 
1800 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1801 
1802 	if (!bundle) {
1803 		dm_error("Failed to allocate update bundle\n");
1804 		goto cleanup;
1805 	}
1806 
1807 	for (k = 0; k < dc_state->stream_count; k++) {
1808 		bundle->stream_update.stream = dc_state->streams[k];
1809 
1810 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1811 			bundle->surface_updates[m].surface =
1812 				dc_state->stream_status->plane_states[m];
1813 			bundle->surface_updates[m].surface->force_full_update =
1814 				true;
1815 		}
1816 		dc_commit_updates_for_stream(
1817 			dm->dc, bundle->surface_updates,
1818 			dc_state->stream_status->plane_count,
1819 			dc_state->streams[k], &bundle->stream_update, dc_state);
1820 	}
1821 
1822 cleanup:
1823 	kfree(bundle);
1824 
1825 	return;
1826 }
1827 
1828 static int dm_resume(void *handle)
1829 {
1830 	struct amdgpu_device *adev = handle;
1831 	struct drm_device *ddev = adev->ddev;
1832 	struct amdgpu_display_manager *dm = &adev->dm;
1833 	struct amdgpu_dm_connector *aconnector;
1834 	struct drm_connector *connector;
1835 	struct drm_connector_list_iter iter;
1836 	struct drm_crtc *crtc;
1837 	struct drm_crtc_state *new_crtc_state;
1838 	struct dm_crtc_state *dm_new_crtc_state;
1839 	struct drm_plane *plane;
1840 	struct drm_plane_state *new_plane_state;
1841 	struct dm_plane_state *dm_new_plane_state;
1842 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1843 	enum dc_connection_type new_connection_type = dc_connection_none;
1844 	struct dc_state *dc_state;
1845 	int i, r, j;
1846 
1847 	if (amdgpu_in_reset(adev)) {
1848 		dc_state = dm->cached_dc_state;
1849 
1850 		r = dm_dmub_hw_init(adev);
1851 		if (r)
1852 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1853 
1854 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1855 		dc_resume(dm->dc);
1856 
1857 		amdgpu_dm_irq_resume_early(adev);
1858 
1859 		for (i = 0; i < dc_state->stream_count; i++) {
1860 			dc_state->streams[i]->mode_changed = true;
1861 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1862 				dc_state->stream_status->plane_states[j]->update_flags.raw
1863 					= 0xffffffff;
1864 			}
1865 		}
1866 
1867 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1868 
1869 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1870 
1871 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1872 
1873 		dc_release_state(dm->cached_dc_state);
1874 		dm->cached_dc_state = NULL;
1875 
1876 		amdgpu_dm_irq_resume_late(adev);
1877 
1878 		mutex_unlock(&dm->dc_lock);
1879 
1880 		return 0;
1881 	}
1882 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1883 	dc_release_state(dm_state->context);
1884 	dm_state->context = dc_create_state(dm->dc);
1885 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1886 	dc_resource_state_construct(dm->dc, dm_state->context);
1887 
1888 	/* Before powering on DC we need to re-initialize DMUB. */
1889 	r = dm_dmub_hw_init(adev);
1890 	if (r)
1891 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1892 
1893 	/* power on hardware */
1894 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1895 
1896 	/* program HPD filter */
1897 	dc_resume(dm->dc);
1898 
1899 	/*
1900 	 * early enable HPD Rx IRQ, should be done before set mode as short
1901 	 * pulse interrupts are used for MST
1902 	 */
1903 	amdgpu_dm_irq_resume_early(adev);
1904 
1905 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1906 	s3_handle_mst(ddev, false);
1907 
1908 	/* Do detection*/
1909 	drm_connector_list_iter_begin(ddev, &iter);
1910 	drm_for_each_connector_iter(connector, &iter) {
1911 		aconnector = to_amdgpu_dm_connector(connector);
1912 
1913 		/*
1914 		 * this is the case when traversing through already created
1915 		 * MST connectors, should be skipped
1916 		 */
1917 		if (aconnector->mst_port)
1918 			continue;
1919 
1920 		mutex_lock(&aconnector->hpd_lock);
1921 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1922 			DRM_ERROR("KMS: Failed to detect connector\n");
1923 
1924 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1925 			emulated_link_detect(aconnector->dc_link);
1926 		else
1927 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1928 
1929 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1930 			aconnector->fake_enable = false;
1931 
1932 		if (aconnector->dc_sink)
1933 			dc_sink_release(aconnector->dc_sink);
1934 		aconnector->dc_sink = NULL;
1935 		amdgpu_dm_update_connector_after_detect(aconnector);
1936 		mutex_unlock(&aconnector->hpd_lock);
1937 	}
1938 	drm_connector_list_iter_end(&iter);
1939 
1940 	/* Force mode set in atomic commit */
1941 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1942 		new_crtc_state->active_changed = true;
1943 
1944 	/*
1945 	 * atomic_check is expected to create the dc states. We need to release
1946 	 * them here, since they were duplicated as part of the suspend
1947 	 * procedure.
1948 	 */
1949 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1950 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1951 		if (dm_new_crtc_state->stream) {
1952 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1953 			dc_stream_release(dm_new_crtc_state->stream);
1954 			dm_new_crtc_state->stream = NULL;
1955 		}
1956 	}
1957 
1958 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1959 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1960 		if (dm_new_plane_state->dc_state) {
1961 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1962 			dc_plane_state_release(dm_new_plane_state->dc_state);
1963 			dm_new_plane_state->dc_state = NULL;
1964 		}
1965 	}
1966 
1967 	drm_atomic_helper_resume(ddev, dm->cached_state);
1968 
1969 	dm->cached_state = NULL;
1970 
1971 	amdgpu_dm_irq_resume_late(adev);
1972 
1973 	amdgpu_dm_smu_write_watermarks_table(adev);
1974 
1975 	return 0;
1976 }
1977 
1978 /**
1979  * DOC: DM Lifecycle
1980  *
1981  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1982  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1983  * the base driver's device list to be initialized and torn down accordingly.
1984  *
1985  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1986  */
1987 
1988 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1989 	.name = "dm",
1990 	.early_init = dm_early_init,
1991 	.late_init = dm_late_init,
1992 	.sw_init = dm_sw_init,
1993 	.sw_fini = dm_sw_fini,
1994 	.hw_init = dm_hw_init,
1995 	.hw_fini = dm_hw_fini,
1996 	.suspend = dm_suspend,
1997 	.resume = dm_resume,
1998 	.is_idle = dm_is_idle,
1999 	.wait_for_idle = dm_wait_for_idle,
2000 	.check_soft_reset = dm_check_soft_reset,
2001 	.soft_reset = dm_soft_reset,
2002 	.set_clockgating_state = dm_set_clockgating_state,
2003 	.set_powergating_state = dm_set_powergating_state,
2004 };
2005 
2006 const struct amdgpu_ip_block_version dm_ip_block =
2007 {
2008 	.type = AMD_IP_BLOCK_TYPE_DCE,
2009 	.major = 1,
2010 	.minor = 0,
2011 	.rev = 0,
2012 	.funcs = &amdgpu_dm_funcs,
2013 };
2014 
2015 
2016 /**
2017  * DOC: atomic
2018  *
2019  * *WIP*
2020  */
2021 
2022 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2023 	.fb_create = amdgpu_display_user_framebuffer_create,
2024 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2025 	.atomic_check = amdgpu_dm_atomic_check,
2026 	.atomic_commit = amdgpu_dm_atomic_commit,
2027 };
2028 
2029 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2030 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2031 };
2032 
2033 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2034 {
2035 	u32 max_cll, min_cll, max, min, q, r;
2036 	struct amdgpu_dm_backlight_caps *caps;
2037 	struct amdgpu_display_manager *dm;
2038 	struct drm_connector *conn_base;
2039 	struct amdgpu_device *adev;
2040 	struct dc_link *link = NULL;
2041 	static const u8 pre_computed_values[] = {
2042 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2043 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2044 
2045 	if (!aconnector || !aconnector->dc_link)
2046 		return;
2047 
2048 	link = aconnector->dc_link;
2049 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2050 		return;
2051 
2052 	conn_base = &aconnector->base;
2053 	adev = conn_base->dev->dev_private;
2054 	dm = &adev->dm;
2055 	caps = &dm->backlight_caps;
2056 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2057 	caps->aux_support = false;
2058 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2059 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2060 
2061 	if (caps->ext_caps->bits.oled == 1 ||
2062 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2063 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2064 		caps->aux_support = true;
2065 
2066 	/* From the specification (CTA-861-G), for calculating the maximum
2067 	 * luminance we need to use:
2068 	 *	Luminance = 50*2**(CV/32)
2069 	 * Where CV is a one-byte value.
2070 	 * For calculating this expression we may need float point precision;
2071 	 * to avoid this complexity level, we take advantage that CV is divided
2072 	 * by a constant. From the Euclids division algorithm, we know that CV
2073 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2074 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2075 	 * need to pre-compute the value of r/32. For pre-computing the values
2076 	 * We just used the following Ruby line:
2077 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2078 	 * The results of the above expressions can be verified at
2079 	 * pre_computed_values.
2080 	 */
2081 	q = max_cll >> 5;
2082 	r = max_cll % 32;
2083 	max = (1 << q) * pre_computed_values[r];
2084 
2085 	// min luminance: maxLum * (CV/255)^2 / 100
2086 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2087 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2088 
2089 	caps->aux_max_input_signal = max;
2090 	caps->aux_min_input_signal = min;
2091 }
2092 
2093 void amdgpu_dm_update_connector_after_detect(
2094 		struct amdgpu_dm_connector *aconnector)
2095 {
2096 	struct drm_connector *connector = &aconnector->base;
2097 	struct drm_device *dev = connector->dev;
2098 	struct dc_sink *sink;
2099 
2100 	/* MST handled by drm_mst framework */
2101 	if (aconnector->mst_mgr.mst_state == true)
2102 		return;
2103 
2104 
2105 	sink = aconnector->dc_link->local_sink;
2106 	if (sink)
2107 		dc_sink_retain(sink);
2108 
2109 	/*
2110 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2111 	 * the connector sink is set to either fake or physical sink depends on link status.
2112 	 * Skip if already done during boot.
2113 	 */
2114 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2115 			&& aconnector->dc_em_sink) {
2116 
2117 		/*
2118 		 * For S3 resume with headless use eml_sink to fake stream
2119 		 * because on resume connector->sink is set to NULL
2120 		 */
2121 		mutex_lock(&dev->mode_config.mutex);
2122 
2123 		if (sink) {
2124 			if (aconnector->dc_sink) {
2125 				amdgpu_dm_update_freesync_caps(connector, NULL);
2126 				/*
2127 				 * retain and release below are used to
2128 				 * bump up refcount for sink because the link doesn't point
2129 				 * to it anymore after disconnect, so on next crtc to connector
2130 				 * reshuffle by UMD we will get into unwanted dc_sink release
2131 				 */
2132 				dc_sink_release(aconnector->dc_sink);
2133 			}
2134 			aconnector->dc_sink = sink;
2135 			dc_sink_retain(aconnector->dc_sink);
2136 			amdgpu_dm_update_freesync_caps(connector,
2137 					aconnector->edid);
2138 		} else {
2139 			amdgpu_dm_update_freesync_caps(connector, NULL);
2140 			if (!aconnector->dc_sink) {
2141 				aconnector->dc_sink = aconnector->dc_em_sink;
2142 				dc_sink_retain(aconnector->dc_sink);
2143 			}
2144 		}
2145 
2146 		mutex_unlock(&dev->mode_config.mutex);
2147 
2148 		if (sink)
2149 			dc_sink_release(sink);
2150 		return;
2151 	}
2152 
2153 	/*
2154 	 * TODO: temporary guard to look for proper fix
2155 	 * if this sink is MST sink, we should not do anything
2156 	 */
2157 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2158 		dc_sink_release(sink);
2159 		return;
2160 	}
2161 
2162 	if (aconnector->dc_sink == sink) {
2163 		/*
2164 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2165 		 * Do nothing!!
2166 		 */
2167 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2168 				aconnector->connector_id);
2169 		if (sink)
2170 			dc_sink_release(sink);
2171 		return;
2172 	}
2173 
2174 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2175 		aconnector->connector_id, aconnector->dc_sink, sink);
2176 
2177 	mutex_lock(&dev->mode_config.mutex);
2178 
2179 	/*
2180 	 * 1. Update status of the drm connector
2181 	 * 2. Send an event and let userspace tell us what to do
2182 	 */
2183 	if (sink) {
2184 		/*
2185 		 * TODO: check if we still need the S3 mode update workaround.
2186 		 * If yes, put it here.
2187 		 */
2188 		if (aconnector->dc_sink)
2189 			amdgpu_dm_update_freesync_caps(connector, NULL);
2190 
2191 		aconnector->dc_sink = sink;
2192 		dc_sink_retain(aconnector->dc_sink);
2193 		if (sink->dc_edid.length == 0) {
2194 			aconnector->edid = NULL;
2195 			if (aconnector->dc_link->aux_mode) {
2196 				drm_dp_cec_unset_edid(
2197 					&aconnector->dm_dp_aux.aux);
2198 			}
2199 		} else {
2200 			aconnector->edid =
2201 				(struct edid *)sink->dc_edid.raw_edid;
2202 
2203 			drm_connector_update_edid_property(connector,
2204 							   aconnector->edid);
2205 
2206 			if (aconnector->dc_link->aux_mode)
2207 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2208 						    aconnector->edid);
2209 		}
2210 
2211 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2212 		update_connector_ext_caps(aconnector);
2213 	} else {
2214 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2215 		amdgpu_dm_update_freesync_caps(connector, NULL);
2216 		drm_connector_update_edid_property(connector, NULL);
2217 		aconnector->num_modes = 0;
2218 		dc_sink_release(aconnector->dc_sink);
2219 		aconnector->dc_sink = NULL;
2220 		aconnector->edid = NULL;
2221 #ifdef CONFIG_DRM_AMD_DC_HDCP
2222 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2223 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2224 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2225 #endif
2226 	}
2227 
2228 	mutex_unlock(&dev->mode_config.mutex);
2229 
2230 	if (sink)
2231 		dc_sink_release(sink);
2232 }
2233 
2234 static void handle_hpd_irq(void *param)
2235 {
2236 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2237 	struct drm_connector *connector = &aconnector->base;
2238 	struct drm_device *dev = connector->dev;
2239 	enum dc_connection_type new_connection_type = dc_connection_none;
2240 #ifdef CONFIG_DRM_AMD_DC_HDCP
2241 	struct amdgpu_device *adev = dev->dev_private;
2242 #endif
2243 
2244 	/*
2245 	 * In case of failure or MST no need to update connector status or notify the OS
2246 	 * since (for MST case) MST does this in its own context.
2247 	 */
2248 	mutex_lock(&aconnector->hpd_lock);
2249 
2250 #ifdef CONFIG_DRM_AMD_DC_HDCP
2251 	if (adev->dm.hdcp_workqueue)
2252 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2253 #endif
2254 	if (aconnector->fake_enable)
2255 		aconnector->fake_enable = false;
2256 
2257 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2258 		DRM_ERROR("KMS: Failed to detect connector\n");
2259 
2260 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2261 		emulated_link_detect(aconnector->dc_link);
2262 
2263 
2264 		drm_modeset_lock_all(dev);
2265 		dm_restore_drm_connector_state(dev, connector);
2266 		drm_modeset_unlock_all(dev);
2267 
2268 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2269 			drm_kms_helper_hotplug_event(dev);
2270 
2271 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2272 		amdgpu_dm_update_connector_after_detect(aconnector);
2273 
2274 
2275 		drm_modeset_lock_all(dev);
2276 		dm_restore_drm_connector_state(dev, connector);
2277 		drm_modeset_unlock_all(dev);
2278 
2279 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2280 			drm_kms_helper_hotplug_event(dev);
2281 	}
2282 	mutex_unlock(&aconnector->hpd_lock);
2283 
2284 }
2285 
2286 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2287 {
2288 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2289 	uint8_t dret;
2290 	bool new_irq_handled = false;
2291 	int dpcd_addr;
2292 	int dpcd_bytes_to_read;
2293 
2294 	const int max_process_count = 30;
2295 	int process_count = 0;
2296 
2297 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2298 
2299 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2300 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2301 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2302 		dpcd_addr = DP_SINK_COUNT;
2303 	} else {
2304 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2305 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2306 		dpcd_addr = DP_SINK_COUNT_ESI;
2307 	}
2308 
2309 	dret = drm_dp_dpcd_read(
2310 		&aconnector->dm_dp_aux.aux,
2311 		dpcd_addr,
2312 		esi,
2313 		dpcd_bytes_to_read);
2314 
2315 	while (dret == dpcd_bytes_to_read &&
2316 		process_count < max_process_count) {
2317 		uint8_t retry;
2318 		dret = 0;
2319 
2320 		process_count++;
2321 
2322 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2323 		/* handle HPD short pulse irq */
2324 		if (aconnector->mst_mgr.mst_state)
2325 			drm_dp_mst_hpd_irq(
2326 				&aconnector->mst_mgr,
2327 				esi,
2328 				&new_irq_handled);
2329 
2330 		if (new_irq_handled) {
2331 			/* ACK at DPCD to notify down stream */
2332 			const int ack_dpcd_bytes_to_write =
2333 				dpcd_bytes_to_read - 1;
2334 
2335 			for (retry = 0; retry < 3; retry++) {
2336 				uint8_t wret;
2337 
2338 				wret = drm_dp_dpcd_write(
2339 					&aconnector->dm_dp_aux.aux,
2340 					dpcd_addr + 1,
2341 					&esi[1],
2342 					ack_dpcd_bytes_to_write);
2343 				if (wret == ack_dpcd_bytes_to_write)
2344 					break;
2345 			}
2346 
2347 			/* check if there is new irq to be handled */
2348 			dret = drm_dp_dpcd_read(
2349 				&aconnector->dm_dp_aux.aux,
2350 				dpcd_addr,
2351 				esi,
2352 				dpcd_bytes_to_read);
2353 
2354 			new_irq_handled = false;
2355 		} else {
2356 			break;
2357 		}
2358 	}
2359 
2360 	if (process_count == max_process_count)
2361 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2362 }
2363 
2364 static void handle_hpd_rx_irq(void *param)
2365 {
2366 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2367 	struct drm_connector *connector = &aconnector->base;
2368 	struct drm_device *dev = connector->dev;
2369 	struct dc_link *dc_link = aconnector->dc_link;
2370 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2371 	enum dc_connection_type new_connection_type = dc_connection_none;
2372 #ifdef CONFIG_DRM_AMD_DC_HDCP
2373 	union hpd_irq_data hpd_irq_data;
2374 	struct amdgpu_device *adev = dev->dev_private;
2375 
2376 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2377 #endif
2378 
2379 	/*
2380 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2381 	 * conflict, after implement i2c helper, this mutex should be
2382 	 * retired.
2383 	 */
2384 	if (dc_link->type != dc_connection_mst_branch)
2385 		mutex_lock(&aconnector->hpd_lock);
2386 
2387 
2388 #ifdef CONFIG_DRM_AMD_DC_HDCP
2389 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2390 #else
2391 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2392 #endif
2393 			!is_mst_root_connector) {
2394 		/* Downstream Port status changed. */
2395 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2396 			DRM_ERROR("KMS: Failed to detect connector\n");
2397 
2398 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2399 			emulated_link_detect(dc_link);
2400 
2401 			if (aconnector->fake_enable)
2402 				aconnector->fake_enable = false;
2403 
2404 			amdgpu_dm_update_connector_after_detect(aconnector);
2405 
2406 
2407 			drm_modeset_lock_all(dev);
2408 			dm_restore_drm_connector_state(dev, connector);
2409 			drm_modeset_unlock_all(dev);
2410 
2411 			drm_kms_helper_hotplug_event(dev);
2412 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2413 
2414 			if (aconnector->fake_enable)
2415 				aconnector->fake_enable = false;
2416 
2417 			amdgpu_dm_update_connector_after_detect(aconnector);
2418 
2419 
2420 			drm_modeset_lock_all(dev);
2421 			dm_restore_drm_connector_state(dev, connector);
2422 			drm_modeset_unlock_all(dev);
2423 
2424 			drm_kms_helper_hotplug_event(dev);
2425 		}
2426 	}
2427 #ifdef CONFIG_DRM_AMD_DC_HDCP
2428 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2429 		if (adev->dm.hdcp_workqueue)
2430 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2431 	}
2432 #endif
2433 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2434 	    (dc_link->type == dc_connection_mst_branch))
2435 		dm_handle_hpd_rx_irq(aconnector);
2436 
2437 	if (dc_link->type != dc_connection_mst_branch) {
2438 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2439 		mutex_unlock(&aconnector->hpd_lock);
2440 	}
2441 }
2442 
2443 static void register_hpd_handlers(struct amdgpu_device *adev)
2444 {
2445 	struct drm_device *dev = adev->ddev;
2446 	struct drm_connector *connector;
2447 	struct amdgpu_dm_connector *aconnector;
2448 	const struct dc_link *dc_link;
2449 	struct dc_interrupt_params int_params = {0};
2450 
2451 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2452 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2453 
2454 	list_for_each_entry(connector,
2455 			&dev->mode_config.connector_list, head)	{
2456 
2457 		aconnector = to_amdgpu_dm_connector(connector);
2458 		dc_link = aconnector->dc_link;
2459 
2460 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2461 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2462 			int_params.irq_source = dc_link->irq_source_hpd;
2463 
2464 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2465 					handle_hpd_irq,
2466 					(void *) aconnector);
2467 		}
2468 
2469 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2470 
2471 			/* Also register for DP short pulse (hpd_rx). */
2472 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2473 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2474 
2475 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2476 					handle_hpd_rx_irq,
2477 					(void *) aconnector);
2478 		}
2479 	}
2480 }
2481 
2482 #if defined(CONFIG_DRM_AMD_DC_SI)
2483 /* Register IRQ sources and initialize IRQ callbacks */
2484 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2485 {
2486 	struct dc *dc = adev->dm.dc;
2487 	struct common_irq_params *c_irq_params;
2488 	struct dc_interrupt_params int_params = {0};
2489 	int r;
2490 	int i;
2491 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2492 
2493 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2494 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2495 
2496 	/*
2497 	 * Actions of amdgpu_irq_add_id():
2498 	 * 1. Register a set() function with base driver.
2499 	 *    Base driver will call set() function to enable/disable an
2500 	 *    interrupt in DC hardware.
2501 	 * 2. Register amdgpu_dm_irq_handler().
2502 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2503 	 *    coming from DC hardware.
2504 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2505 	 *    for acknowledging and handling. */
2506 
2507 	/* Use VBLANK interrupt */
2508 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2509 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2510 		if (r) {
2511 			DRM_ERROR("Failed to add crtc irq id!\n");
2512 			return r;
2513 		}
2514 
2515 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2516 		int_params.irq_source =
2517 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2518 
2519 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2520 
2521 		c_irq_params->adev = adev;
2522 		c_irq_params->irq_src = int_params.irq_source;
2523 
2524 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2525 				dm_crtc_high_irq, c_irq_params);
2526 	}
2527 
2528 	/* Use GRPH_PFLIP interrupt */
2529 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2530 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2531 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2532 		if (r) {
2533 			DRM_ERROR("Failed to add page flip irq id!\n");
2534 			return r;
2535 		}
2536 
2537 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2538 		int_params.irq_source =
2539 			dc_interrupt_to_irq_source(dc, i, 0);
2540 
2541 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2542 
2543 		c_irq_params->adev = adev;
2544 		c_irq_params->irq_src = int_params.irq_source;
2545 
2546 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2547 				dm_pflip_high_irq, c_irq_params);
2548 
2549 	}
2550 
2551 	/* HPD */
2552 	r = amdgpu_irq_add_id(adev, client_id,
2553 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2554 	if (r) {
2555 		DRM_ERROR("Failed to add hpd irq id!\n");
2556 		return r;
2557 	}
2558 
2559 	register_hpd_handlers(adev);
2560 
2561 	return 0;
2562 }
2563 #endif
2564 
2565 /* Register IRQ sources and initialize IRQ callbacks */
2566 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2567 {
2568 	struct dc *dc = adev->dm.dc;
2569 	struct common_irq_params *c_irq_params;
2570 	struct dc_interrupt_params int_params = {0};
2571 	int r;
2572 	int i;
2573 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2574 
2575 	if (adev->asic_type >= CHIP_VEGA10)
2576 		client_id = SOC15_IH_CLIENTID_DCE;
2577 
2578 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2579 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2580 
2581 	/*
2582 	 * Actions of amdgpu_irq_add_id():
2583 	 * 1. Register a set() function with base driver.
2584 	 *    Base driver will call set() function to enable/disable an
2585 	 *    interrupt in DC hardware.
2586 	 * 2. Register amdgpu_dm_irq_handler().
2587 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2588 	 *    coming from DC hardware.
2589 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2590 	 *    for acknowledging and handling. */
2591 
2592 	/* Use VBLANK interrupt */
2593 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2594 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2595 		if (r) {
2596 			DRM_ERROR("Failed to add crtc irq id!\n");
2597 			return r;
2598 		}
2599 
2600 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2601 		int_params.irq_source =
2602 			dc_interrupt_to_irq_source(dc, i, 0);
2603 
2604 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2605 
2606 		c_irq_params->adev = adev;
2607 		c_irq_params->irq_src = int_params.irq_source;
2608 
2609 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2610 				dm_crtc_high_irq, c_irq_params);
2611 	}
2612 
2613 	/* Use VUPDATE interrupt */
2614 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2615 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2616 		if (r) {
2617 			DRM_ERROR("Failed to add vupdate irq id!\n");
2618 			return r;
2619 		}
2620 
2621 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2622 		int_params.irq_source =
2623 			dc_interrupt_to_irq_source(dc, i, 0);
2624 
2625 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2626 
2627 		c_irq_params->adev = adev;
2628 		c_irq_params->irq_src = int_params.irq_source;
2629 
2630 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2631 				dm_vupdate_high_irq, c_irq_params);
2632 	}
2633 
2634 	/* Use GRPH_PFLIP interrupt */
2635 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2636 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2637 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2638 		if (r) {
2639 			DRM_ERROR("Failed to add page flip irq id!\n");
2640 			return r;
2641 		}
2642 
2643 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2644 		int_params.irq_source =
2645 			dc_interrupt_to_irq_source(dc, i, 0);
2646 
2647 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2648 
2649 		c_irq_params->adev = adev;
2650 		c_irq_params->irq_src = int_params.irq_source;
2651 
2652 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2653 				dm_pflip_high_irq, c_irq_params);
2654 
2655 	}
2656 
2657 	/* HPD */
2658 	r = amdgpu_irq_add_id(adev, client_id,
2659 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2660 	if (r) {
2661 		DRM_ERROR("Failed to add hpd irq id!\n");
2662 		return r;
2663 	}
2664 
2665 	register_hpd_handlers(adev);
2666 
2667 	return 0;
2668 }
2669 
2670 #if defined(CONFIG_DRM_AMD_DC_DCN)
2671 /* Register IRQ sources and initialize IRQ callbacks */
2672 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2673 {
2674 	struct dc *dc = adev->dm.dc;
2675 	struct common_irq_params *c_irq_params;
2676 	struct dc_interrupt_params int_params = {0};
2677 	int r;
2678 	int i;
2679 
2680 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2681 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2682 
2683 	/*
2684 	 * Actions of amdgpu_irq_add_id():
2685 	 * 1. Register a set() function with base driver.
2686 	 *    Base driver will call set() function to enable/disable an
2687 	 *    interrupt in DC hardware.
2688 	 * 2. Register amdgpu_dm_irq_handler().
2689 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2690 	 *    coming from DC hardware.
2691 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2692 	 *    for acknowledging and handling.
2693 	 */
2694 
2695 	/* Use VSTARTUP interrupt */
2696 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2697 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2698 			i++) {
2699 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2700 
2701 		if (r) {
2702 			DRM_ERROR("Failed to add crtc irq id!\n");
2703 			return r;
2704 		}
2705 
2706 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2707 		int_params.irq_source =
2708 			dc_interrupt_to_irq_source(dc, i, 0);
2709 
2710 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2711 
2712 		c_irq_params->adev = adev;
2713 		c_irq_params->irq_src = int_params.irq_source;
2714 
2715 		amdgpu_dm_irq_register_interrupt(
2716 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2717 	}
2718 
2719 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2720 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2721 	 * to trigger at end of each vblank, regardless of state of the lock,
2722 	 * matching DCE behaviour.
2723 	 */
2724 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2725 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2726 	     i++) {
2727 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2728 
2729 		if (r) {
2730 			DRM_ERROR("Failed to add vupdate irq id!\n");
2731 			return r;
2732 		}
2733 
2734 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2735 		int_params.irq_source =
2736 			dc_interrupt_to_irq_source(dc, i, 0);
2737 
2738 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2739 
2740 		c_irq_params->adev = adev;
2741 		c_irq_params->irq_src = int_params.irq_source;
2742 
2743 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2744 				dm_vupdate_high_irq, c_irq_params);
2745 	}
2746 
2747 	/* Use GRPH_PFLIP interrupt */
2748 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2749 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2750 			i++) {
2751 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2752 		if (r) {
2753 			DRM_ERROR("Failed to add page flip irq id!\n");
2754 			return r;
2755 		}
2756 
2757 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2758 		int_params.irq_source =
2759 			dc_interrupt_to_irq_source(dc, i, 0);
2760 
2761 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2762 
2763 		c_irq_params->adev = adev;
2764 		c_irq_params->irq_src = int_params.irq_source;
2765 
2766 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2767 				dm_pflip_high_irq, c_irq_params);
2768 
2769 	}
2770 
2771 	/* HPD */
2772 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2773 			&adev->hpd_irq);
2774 	if (r) {
2775 		DRM_ERROR("Failed to add hpd irq id!\n");
2776 		return r;
2777 	}
2778 
2779 	register_hpd_handlers(adev);
2780 
2781 	return 0;
2782 }
2783 #endif
2784 
2785 /*
2786  * Acquires the lock for the atomic state object and returns
2787  * the new atomic state.
2788  *
2789  * This should only be called during atomic check.
2790  */
2791 static int dm_atomic_get_state(struct drm_atomic_state *state,
2792 			       struct dm_atomic_state **dm_state)
2793 {
2794 	struct drm_device *dev = state->dev;
2795 	struct amdgpu_device *adev = dev->dev_private;
2796 	struct amdgpu_display_manager *dm = &adev->dm;
2797 	struct drm_private_state *priv_state;
2798 
2799 	if (*dm_state)
2800 		return 0;
2801 
2802 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2803 	if (IS_ERR(priv_state))
2804 		return PTR_ERR(priv_state);
2805 
2806 	*dm_state = to_dm_atomic_state(priv_state);
2807 
2808 	return 0;
2809 }
2810 
2811 static struct dm_atomic_state *
2812 dm_atomic_get_new_state(struct drm_atomic_state *state)
2813 {
2814 	struct drm_device *dev = state->dev;
2815 	struct amdgpu_device *adev = dev->dev_private;
2816 	struct amdgpu_display_manager *dm = &adev->dm;
2817 	struct drm_private_obj *obj;
2818 	struct drm_private_state *new_obj_state;
2819 	int i;
2820 
2821 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2822 		if (obj->funcs == dm->atomic_obj.funcs)
2823 			return to_dm_atomic_state(new_obj_state);
2824 	}
2825 
2826 	return NULL;
2827 }
2828 
2829 static struct dm_atomic_state *
2830 dm_atomic_get_old_state(struct drm_atomic_state *state)
2831 {
2832 	struct drm_device *dev = state->dev;
2833 	struct amdgpu_device *adev = dev->dev_private;
2834 	struct amdgpu_display_manager *dm = &adev->dm;
2835 	struct drm_private_obj *obj;
2836 	struct drm_private_state *old_obj_state;
2837 	int i;
2838 
2839 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2840 		if (obj->funcs == dm->atomic_obj.funcs)
2841 			return to_dm_atomic_state(old_obj_state);
2842 	}
2843 
2844 	return NULL;
2845 }
2846 
2847 static struct drm_private_state *
2848 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2849 {
2850 	struct dm_atomic_state *old_state, *new_state;
2851 
2852 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2853 	if (!new_state)
2854 		return NULL;
2855 
2856 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2857 
2858 	old_state = to_dm_atomic_state(obj->state);
2859 
2860 	if (old_state && old_state->context)
2861 		new_state->context = dc_copy_state(old_state->context);
2862 
2863 	if (!new_state->context) {
2864 		kfree(new_state);
2865 		return NULL;
2866 	}
2867 
2868 	return &new_state->base;
2869 }
2870 
2871 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2872 				    struct drm_private_state *state)
2873 {
2874 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2875 
2876 	if (dm_state && dm_state->context)
2877 		dc_release_state(dm_state->context);
2878 
2879 	kfree(dm_state);
2880 }
2881 
2882 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2883 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2884 	.atomic_destroy_state = dm_atomic_destroy_state,
2885 };
2886 
2887 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2888 {
2889 	struct dm_atomic_state *state;
2890 	int r;
2891 
2892 	adev->mode_info.mode_config_initialized = true;
2893 
2894 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2895 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2896 
2897 	adev->ddev->mode_config.max_width = 16384;
2898 	adev->ddev->mode_config.max_height = 16384;
2899 
2900 	adev->ddev->mode_config.preferred_depth = 24;
2901 	adev->ddev->mode_config.prefer_shadow = 1;
2902 	/* indicates support for immediate flip */
2903 	adev->ddev->mode_config.async_page_flip = true;
2904 
2905 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2906 
2907 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2908 	if (!state)
2909 		return -ENOMEM;
2910 
2911 	state->context = dc_create_state(adev->dm.dc);
2912 	if (!state->context) {
2913 		kfree(state);
2914 		return -ENOMEM;
2915 	}
2916 
2917 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2918 
2919 	drm_atomic_private_obj_init(adev->ddev,
2920 				    &adev->dm.atomic_obj,
2921 				    &state->base,
2922 				    &dm_atomic_state_funcs);
2923 
2924 	r = amdgpu_display_modeset_create_props(adev);
2925 	if (r)
2926 		return r;
2927 
2928 	r = amdgpu_dm_audio_init(adev);
2929 	if (r)
2930 		return r;
2931 
2932 	return 0;
2933 }
2934 
2935 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2936 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2937 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2938 
2939 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2940 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2941 
2942 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2943 {
2944 #if defined(CONFIG_ACPI)
2945 	struct amdgpu_dm_backlight_caps caps;
2946 
2947 	if (dm->backlight_caps.caps_valid)
2948 		return;
2949 
2950 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2951 	if (caps.caps_valid) {
2952 		dm->backlight_caps.caps_valid = true;
2953 		if (caps.aux_support)
2954 			return;
2955 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2956 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2957 	} else {
2958 		dm->backlight_caps.min_input_signal =
2959 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2960 		dm->backlight_caps.max_input_signal =
2961 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2962 	}
2963 #else
2964 	if (dm->backlight_caps.aux_support)
2965 		return;
2966 
2967 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2968 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2969 #endif
2970 }
2971 
2972 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2973 {
2974 	bool rc;
2975 
2976 	if (!link)
2977 		return 1;
2978 
2979 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2980 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2981 
2982 	return rc ? 0 : 1;
2983 }
2984 
2985 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2986 			      const uint32_t user_brightness)
2987 {
2988 	u32 min, max, conversion_pace;
2989 	u32 brightness = user_brightness;
2990 
2991 	if (!caps)
2992 		goto out;
2993 
2994 	if (!caps->aux_support) {
2995 		max = caps->max_input_signal;
2996 		min = caps->min_input_signal;
2997 		/*
2998 		 * The brightness input is in the range 0-255
2999 		 * It needs to be rescaled to be between the
3000 		 * requested min and max input signal
3001 		 * It also needs to be scaled up by 0x101 to
3002 		 * match the DC interface which has a range of
3003 		 * 0 to 0xffff
3004 		 */
3005 		conversion_pace = 0x101;
3006 		brightness =
3007 			user_brightness
3008 			* conversion_pace
3009 			* (max - min)
3010 			/ AMDGPU_MAX_BL_LEVEL
3011 			+ min * conversion_pace;
3012 	} else {
3013 		/* TODO
3014 		 * We are doing a linear interpolation here, which is OK but
3015 		 * does not provide the optimal result. We probably want
3016 		 * something close to the Perceptual Quantizer (PQ) curve.
3017 		 */
3018 		max = caps->aux_max_input_signal;
3019 		min = caps->aux_min_input_signal;
3020 
3021 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
3022 			       + user_brightness * max;
3023 		// Multiple the value by 1000 since we use millinits
3024 		brightness *= 1000;
3025 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
3026 	}
3027 
3028 out:
3029 	return brightness;
3030 }
3031 
3032 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3033 {
3034 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3035 	struct amdgpu_dm_backlight_caps caps;
3036 	struct dc_link *link = NULL;
3037 	u32 brightness;
3038 	bool rc;
3039 
3040 	amdgpu_dm_update_backlight_caps(dm);
3041 	caps = dm->backlight_caps;
3042 
3043 	link = (struct dc_link *)dm->backlight_link;
3044 
3045 	brightness = convert_brightness(&caps, bd->props.brightness);
3046 	// Change brightness based on AUX property
3047 	if (caps.aux_support)
3048 		return set_backlight_via_aux(link, brightness);
3049 
3050 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3051 
3052 	return rc ? 0 : 1;
3053 }
3054 
3055 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3056 {
3057 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3058 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3059 
3060 	if (ret == DC_ERROR_UNEXPECTED)
3061 		return bd->props.brightness;
3062 	return ret;
3063 }
3064 
3065 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3066 	.options = BL_CORE_SUSPENDRESUME,
3067 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3068 	.update_status	= amdgpu_dm_backlight_update_status,
3069 };
3070 
3071 static void
3072 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3073 {
3074 	char bl_name[16];
3075 	struct backlight_properties props = { 0 };
3076 
3077 	amdgpu_dm_update_backlight_caps(dm);
3078 
3079 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3080 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3081 	props.type = BACKLIGHT_RAW;
3082 
3083 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3084 			dm->adev->ddev->primary->index);
3085 
3086 	dm->backlight_dev = backlight_device_register(bl_name,
3087 			dm->adev->ddev->dev,
3088 			dm,
3089 			&amdgpu_dm_backlight_ops,
3090 			&props);
3091 
3092 	if (IS_ERR(dm->backlight_dev))
3093 		DRM_ERROR("DM: Backlight registration failed!\n");
3094 	else
3095 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3096 }
3097 
3098 #endif
3099 
3100 static int initialize_plane(struct amdgpu_display_manager *dm,
3101 			    struct amdgpu_mode_info *mode_info, int plane_id,
3102 			    enum drm_plane_type plane_type,
3103 			    const struct dc_plane_cap *plane_cap)
3104 {
3105 	struct drm_plane *plane;
3106 	unsigned long possible_crtcs;
3107 	int ret = 0;
3108 
3109 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3110 	if (!plane) {
3111 		DRM_ERROR("KMS: Failed to allocate plane\n");
3112 		return -ENOMEM;
3113 	}
3114 	plane->type = plane_type;
3115 
3116 	/*
3117 	 * HACK: IGT tests expect that the primary plane for a CRTC
3118 	 * can only have one possible CRTC. Only expose support for
3119 	 * any CRTC if they're not going to be used as a primary plane
3120 	 * for a CRTC - like overlay or underlay planes.
3121 	 */
3122 	possible_crtcs = 1 << plane_id;
3123 	if (plane_id >= dm->dc->caps.max_streams)
3124 		possible_crtcs = 0xff;
3125 
3126 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3127 
3128 	if (ret) {
3129 		DRM_ERROR("KMS: Failed to initialize plane\n");
3130 		kfree(plane);
3131 		return ret;
3132 	}
3133 
3134 	if (mode_info)
3135 		mode_info->planes[plane_id] = plane;
3136 
3137 	return ret;
3138 }
3139 
3140 
3141 static void register_backlight_device(struct amdgpu_display_manager *dm,
3142 				      struct dc_link *link)
3143 {
3144 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3145 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3146 
3147 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3148 	    link->type != dc_connection_none) {
3149 		/*
3150 		 * Event if registration failed, we should continue with
3151 		 * DM initialization because not having a backlight control
3152 		 * is better then a black screen.
3153 		 */
3154 		amdgpu_dm_register_backlight_device(dm);
3155 
3156 		if (dm->backlight_dev)
3157 			dm->backlight_link = link;
3158 	}
3159 #endif
3160 }
3161 
3162 
3163 /*
3164  * In this architecture, the association
3165  * connector -> encoder -> crtc
3166  * id not really requried. The crtc and connector will hold the
3167  * display_index as an abstraction to use with DAL component
3168  *
3169  * Returns 0 on success
3170  */
3171 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3172 {
3173 	struct amdgpu_display_manager *dm = &adev->dm;
3174 	int32_t i;
3175 	struct amdgpu_dm_connector *aconnector = NULL;
3176 	struct amdgpu_encoder *aencoder = NULL;
3177 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3178 	uint32_t link_cnt;
3179 	int32_t primary_planes;
3180 	enum dc_connection_type new_connection_type = dc_connection_none;
3181 	const struct dc_plane_cap *plane;
3182 
3183 	link_cnt = dm->dc->caps.max_links;
3184 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3185 		DRM_ERROR("DM: Failed to initialize mode config\n");
3186 		return -EINVAL;
3187 	}
3188 
3189 	/* There is one primary plane per CRTC */
3190 	primary_planes = dm->dc->caps.max_streams;
3191 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3192 
3193 	/*
3194 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3195 	 * Order is reversed to match iteration order in atomic check.
3196 	 */
3197 	for (i = (primary_planes - 1); i >= 0; i--) {
3198 		plane = &dm->dc->caps.planes[i];
3199 
3200 		if (initialize_plane(dm, mode_info, i,
3201 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3202 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3203 			goto fail;
3204 		}
3205 	}
3206 
3207 	/*
3208 	 * Initialize overlay planes, index starting after primary planes.
3209 	 * These planes have a higher DRM index than the primary planes since
3210 	 * they should be considered as having a higher z-order.
3211 	 * Order is reversed to match iteration order in atomic check.
3212 	 *
3213 	 * Only support DCN for now, and only expose one so we don't encourage
3214 	 * userspace to use up all the pipes.
3215 	 */
3216 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3217 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3218 
3219 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3220 			continue;
3221 
3222 		if (!plane->blends_with_above || !plane->blends_with_below)
3223 			continue;
3224 
3225 		if (!plane->pixel_format_support.argb8888)
3226 			continue;
3227 
3228 		if (initialize_plane(dm, NULL, primary_planes + i,
3229 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3230 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3231 			goto fail;
3232 		}
3233 
3234 		/* Only create one overlay plane. */
3235 		break;
3236 	}
3237 
3238 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3239 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3240 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3241 			goto fail;
3242 		}
3243 
3244 	dm->display_indexes_num = dm->dc->caps.max_streams;
3245 
3246 	/* loops over all connectors on the board */
3247 	for (i = 0; i < link_cnt; i++) {
3248 		struct dc_link *link = NULL;
3249 
3250 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3251 			DRM_ERROR(
3252 				"KMS: Cannot support more than %d display indexes\n",
3253 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3254 			continue;
3255 		}
3256 
3257 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3258 		if (!aconnector)
3259 			goto fail;
3260 
3261 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3262 		if (!aencoder)
3263 			goto fail;
3264 
3265 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3266 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3267 			goto fail;
3268 		}
3269 
3270 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3271 			DRM_ERROR("KMS: Failed to initialize connector\n");
3272 			goto fail;
3273 		}
3274 
3275 		link = dc_get_link_at_index(dm->dc, i);
3276 
3277 		if (!dc_link_detect_sink(link, &new_connection_type))
3278 			DRM_ERROR("KMS: Failed to detect connector\n");
3279 
3280 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3281 			emulated_link_detect(link);
3282 			amdgpu_dm_update_connector_after_detect(aconnector);
3283 
3284 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3285 			amdgpu_dm_update_connector_after_detect(aconnector);
3286 			register_backlight_device(dm, link);
3287 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3288 				amdgpu_dm_set_psr_caps(link);
3289 		}
3290 
3291 
3292 	}
3293 
3294 	/* Software is initialized. Now we can register interrupt handlers. */
3295 	switch (adev->asic_type) {
3296 #if defined(CONFIG_DRM_AMD_DC_SI)
3297 	case CHIP_TAHITI:
3298 	case CHIP_PITCAIRN:
3299 	case CHIP_VERDE:
3300 	case CHIP_OLAND:
3301 		if (dce60_register_irq_handlers(dm->adev)) {
3302 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3303 			goto fail;
3304 		}
3305 		break;
3306 #endif
3307 	case CHIP_BONAIRE:
3308 	case CHIP_HAWAII:
3309 	case CHIP_KAVERI:
3310 	case CHIP_KABINI:
3311 	case CHIP_MULLINS:
3312 	case CHIP_TONGA:
3313 	case CHIP_FIJI:
3314 	case CHIP_CARRIZO:
3315 	case CHIP_STONEY:
3316 	case CHIP_POLARIS11:
3317 	case CHIP_POLARIS10:
3318 	case CHIP_POLARIS12:
3319 	case CHIP_VEGAM:
3320 	case CHIP_VEGA10:
3321 	case CHIP_VEGA12:
3322 	case CHIP_VEGA20:
3323 		if (dce110_register_irq_handlers(dm->adev)) {
3324 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3325 			goto fail;
3326 		}
3327 		break;
3328 #if defined(CONFIG_DRM_AMD_DC_DCN)
3329 	case CHIP_RAVEN:
3330 	case CHIP_NAVI12:
3331 	case CHIP_NAVI10:
3332 	case CHIP_NAVI14:
3333 	case CHIP_RENOIR:
3334 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3335 	case CHIP_SIENNA_CICHLID:
3336 	case CHIP_NAVY_FLOUNDER:
3337 #endif
3338 		if (dcn10_register_irq_handlers(dm->adev)) {
3339 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3340 			goto fail;
3341 		}
3342 		break;
3343 #endif
3344 	default:
3345 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3346 		goto fail;
3347 	}
3348 
3349 	/* No userspace support. */
3350 	dm->dc->debug.disable_tri_buf = true;
3351 
3352 	return 0;
3353 fail:
3354 	kfree(aencoder);
3355 	kfree(aconnector);
3356 
3357 	return -EINVAL;
3358 }
3359 
3360 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3361 {
3362 	drm_mode_config_cleanup(dm->ddev);
3363 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3364 	return;
3365 }
3366 
3367 /******************************************************************************
3368  * amdgpu_display_funcs functions
3369  *****************************************************************************/
3370 
3371 /*
3372  * dm_bandwidth_update - program display watermarks
3373  *
3374  * @adev: amdgpu_device pointer
3375  *
3376  * Calculate and program the display watermarks and line buffer allocation.
3377  */
3378 static void dm_bandwidth_update(struct amdgpu_device *adev)
3379 {
3380 	/* TODO: implement later */
3381 }
3382 
3383 static const struct amdgpu_display_funcs dm_display_funcs = {
3384 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3385 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3386 	.backlight_set_level = NULL, /* never called for DC */
3387 	.backlight_get_level = NULL, /* never called for DC */
3388 	.hpd_sense = NULL,/* called unconditionally */
3389 	.hpd_set_polarity = NULL, /* called unconditionally */
3390 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3391 	.page_flip_get_scanoutpos =
3392 		dm_crtc_get_scanoutpos,/* called unconditionally */
3393 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3394 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3395 };
3396 
3397 #if defined(CONFIG_DEBUG_KERNEL_DC)
3398 
3399 static ssize_t s3_debug_store(struct device *device,
3400 			      struct device_attribute *attr,
3401 			      const char *buf,
3402 			      size_t count)
3403 {
3404 	int ret;
3405 	int s3_state;
3406 	struct drm_device *drm_dev = dev_get_drvdata(device);
3407 	struct amdgpu_device *adev = drm_dev->dev_private;
3408 
3409 	ret = kstrtoint(buf, 0, &s3_state);
3410 
3411 	if (ret == 0) {
3412 		if (s3_state) {
3413 			dm_resume(adev);
3414 			drm_kms_helper_hotplug_event(adev->ddev);
3415 		} else
3416 			dm_suspend(adev);
3417 	}
3418 
3419 	return ret == 0 ? count : 0;
3420 }
3421 
3422 DEVICE_ATTR_WO(s3_debug);
3423 
3424 #endif
3425 
3426 static int dm_early_init(void *handle)
3427 {
3428 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3429 
3430 	switch (adev->asic_type) {
3431 #if defined(CONFIG_DRM_AMD_DC_SI)
3432 	case CHIP_TAHITI:
3433 	case CHIP_PITCAIRN:
3434 	case CHIP_VERDE:
3435 		adev->mode_info.num_crtc = 6;
3436 		adev->mode_info.num_hpd = 6;
3437 		adev->mode_info.num_dig = 6;
3438 		break;
3439 	case CHIP_OLAND:
3440 		adev->mode_info.num_crtc = 2;
3441 		adev->mode_info.num_hpd = 2;
3442 		adev->mode_info.num_dig = 2;
3443 		break;
3444 #endif
3445 	case CHIP_BONAIRE:
3446 	case CHIP_HAWAII:
3447 		adev->mode_info.num_crtc = 6;
3448 		adev->mode_info.num_hpd = 6;
3449 		adev->mode_info.num_dig = 6;
3450 		break;
3451 	case CHIP_KAVERI:
3452 		adev->mode_info.num_crtc = 4;
3453 		adev->mode_info.num_hpd = 6;
3454 		adev->mode_info.num_dig = 7;
3455 		break;
3456 	case CHIP_KABINI:
3457 	case CHIP_MULLINS:
3458 		adev->mode_info.num_crtc = 2;
3459 		adev->mode_info.num_hpd = 6;
3460 		adev->mode_info.num_dig = 6;
3461 		break;
3462 	case CHIP_FIJI:
3463 	case CHIP_TONGA:
3464 		adev->mode_info.num_crtc = 6;
3465 		adev->mode_info.num_hpd = 6;
3466 		adev->mode_info.num_dig = 7;
3467 		break;
3468 	case CHIP_CARRIZO:
3469 		adev->mode_info.num_crtc = 3;
3470 		adev->mode_info.num_hpd = 6;
3471 		adev->mode_info.num_dig = 9;
3472 		break;
3473 	case CHIP_STONEY:
3474 		adev->mode_info.num_crtc = 2;
3475 		adev->mode_info.num_hpd = 6;
3476 		adev->mode_info.num_dig = 9;
3477 		break;
3478 	case CHIP_POLARIS11:
3479 	case CHIP_POLARIS12:
3480 		adev->mode_info.num_crtc = 5;
3481 		adev->mode_info.num_hpd = 5;
3482 		adev->mode_info.num_dig = 5;
3483 		break;
3484 	case CHIP_POLARIS10:
3485 	case CHIP_VEGAM:
3486 		adev->mode_info.num_crtc = 6;
3487 		adev->mode_info.num_hpd = 6;
3488 		adev->mode_info.num_dig = 6;
3489 		break;
3490 	case CHIP_VEGA10:
3491 	case CHIP_VEGA12:
3492 	case CHIP_VEGA20:
3493 		adev->mode_info.num_crtc = 6;
3494 		adev->mode_info.num_hpd = 6;
3495 		adev->mode_info.num_dig = 6;
3496 		break;
3497 #if defined(CONFIG_DRM_AMD_DC_DCN)
3498 	case CHIP_RAVEN:
3499 		adev->mode_info.num_crtc = 4;
3500 		adev->mode_info.num_hpd = 4;
3501 		adev->mode_info.num_dig = 4;
3502 		break;
3503 #endif
3504 	case CHIP_NAVI10:
3505 	case CHIP_NAVI12:
3506 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3507 	case CHIP_SIENNA_CICHLID:
3508 	case CHIP_NAVY_FLOUNDER:
3509 #endif
3510 		adev->mode_info.num_crtc = 6;
3511 		adev->mode_info.num_hpd = 6;
3512 		adev->mode_info.num_dig = 6;
3513 		break;
3514 	case CHIP_NAVI14:
3515 		adev->mode_info.num_crtc = 5;
3516 		adev->mode_info.num_hpd = 5;
3517 		adev->mode_info.num_dig = 5;
3518 		break;
3519 	case CHIP_RENOIR:
3520 		adev->mode_info.num_crtc = 4;
3521 		adev->mode_info.num_hpd = 4;
3522 		adev->mode_info.num_dig = 4;
3523 		break;
3524 	default:
3525 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3526 		return -EINVAL;
3527 	}
3528 
3529 	amdgpu_dm_set_irq_funcs(adev);
3530 
3531 	if (adev->mode_info.funcs == NULL)
3532 		adev->mode_info.funcs = &dm_display_funcs;
3533 
3534 	/*
3535 	 * Note: Do NOT change adev->audio_endpt_rreg and
3536 	 * adev->audio_endpt_wreg because they are initialised in
3537 	 * amdgpu_device_init()
3538 	 */
3539 #if defined(CONFIG_DEBUG_KERNEL_DC)
3540 	device_create_file(
3541 		adev->ddev->dev,
3542 		&dev_attr_s3_debug);
3543 #endif
3544 
3545 	return 0;
3546 }
3547 
3548 static bool modeset_required(struct drm_crtc_state *crtc_state,
3549 			     struct dc_stream_state *new_stream,
3550 			     struct dc_stream_state *old_stream)
3551 {
3552 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3553 }
3554 
3555 static bool modereset_required(struct drm_crtc_state *crtc_state)
3556 {
3557 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3558 }
3559 
3560 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3561 {
3562 	drm_encoder_cleanup(encoder);
3563 	kfree(encoder);
3564 }
3565 
3566 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3567 	.destroy = amdgpu_dm_encoder_destroy,
3568 };
3569 
3570 
3571 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3572 				struct dc_scaling_info *scaling_info)
3573 {
3574 	int scale_w, scale_h;
3575 
3576 	memset(scaling_info, 0, sizeof(*scaling_info));
3577 
3578 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3579 	scaling_info->src_rect.x = state->src_x >> 16;
3580 	scaling_info->src_rect.y = state->src_y >> 16;
3581 
3582 	scaling_info->src_rect.width = state->src_w >> 16;
3583 	if (scaling_info->src_rect.width == 0)
3584 		return -EINVAL;
3585 
3586 	scaling_info->src_rect.height = state->src_h >> 16;
3587 	if (scaling_info->src_rect.height == 0)
3588 		return -EINVAL;
3589 
3590 	scaling_info->dst_rect.x = state->crtc_x;
3591 	scaling_info->dst_rect.y = state->crtc_y;
3592 
3593 	if (state->crtc_w == 0)
3594 		return -EINVAL;
3595 
3596 	scaling_info->dst_rect.width = state->crtc_w;
3597 
3598 	if (state->crtc_h == 0)
3599 		return -EINVAL;
3600 
3601 	scaling_info->dst_rect.height = state->crtc_h;
3602 
3603 	/* DRM doesn't specify clipping on destination output. */
3604 	scaling_info->clip_rect = scaling_info->dst_rect;
3605 
3606 	/* TODO: Validate scaling per-format with DC plane caps */
3607 	scale_w = scaling_info->dst_rect.width * 1000 /
3608 		  scaling_info->src_rect.width;
3609 
3610 	if (scale_w < 250 || scale_w > 16000)
3611 		return -EINVAL;
3612 
3613 	scale_h = scaling_info->dst_rect.height * 1000 /
3614 		  scaling_info->src_rect.height;
3615 
3616 	if (scale_h < 250 || scale_h > 16000)
3617 		return -EINVAL;
3618 
3619 	/*
3620 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3621 	 * assume reasonable defaults based on the format.
3622 	 */
3623 
3624 	return 0;
3625 }
3626 
3627 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3628 		       uint64_t *tiling_flags, bool *tmz_surface)
3629 {
3630 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3631 	int r = amdgpu_bo_reserve(rbo, false);
3632 
3633 	if (unlikely(r)) {
3634 		/* Don't show error message when returning -ERESTARTSYS */
3635 		if (r != -ERESTARTSYS)
3636 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3637 		return r;
3638 	}
3639 
3640 	if (tiling_flags)
3641 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3642 
3643 	if (tmz_surface)
3644 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3645 
3646 	amdgpu_bo_unreserve(rbo);
3647 
3648 	return r;
3649 }
3650 
3651 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3652 {
3653 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3654 
3655 	return offset ? (address + offset * 256) : 0;
3656 }
3657 
3658 static int
3659 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3660 			  const struct amdgpu_framebuffer *afb,
3661 			  const enum surface_pixel_format format,
3662 			  const enum dc_rotation_angle rotation,
3663 			  const struct plane_size *plane_size,
3664 			  const union dc_tiling_info *tiling_info,
3665 			  const uint64_t info,
3666 			  struct dc_plane_dcc_param *dcc,
3667 			  struct dc_plane_address *address,
3668 			  bool force_disable_dcc)
3669 {
3670 	struct dc *dc = adev->dm.dc;
3671 	struct dc_dcc_surface_param input;
3672 	struct dc_surface_dcc_cap output;
3673 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3674 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3675 	uint64_t dcc_address;
3676 
3677 	memset(&input, 0, sizeof(input));
3678 	memset(&output, 0, sizeof(output));
3679 
3680 	if (force_disable_dcc)
3681 		return 0;
3682 
3683 	if (!offset)
3684 		return 0;
3685 
3686 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3687 		return 0;
3688 
3689 	if (!dc->cap_funcs.get_dcc_compression_cap)
3690 		return -EINVAL;
3691 
3692 	input.format = format;
3693 	input.surface_size.width = plane_size->surface_size.width;
3694 	input.surface_size.height = plane_size->surface_size.height;
3695 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3696 
3697 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3698 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3699 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3700 		input.scan = SCAN_DIRECTION_VERTICAL;
3701 
3702 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3703 		return -EINVAL;
3704 
3705 	if (!output.capable)
3706 		return -EINVAL;
3707 
3708 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3709 		return -EINVAL;
3710 
3711 	dcc->enable = 1;
3712 	dcc->meta_pitch =
3713 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3714 	dcc->independent_64b_blks = i64b;
3715 
3716 	dcc_address = get_dcc_address(afb->address, info);
3717 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3718 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3719 
3720 	return 0;
3721 }
3722 
3723 static int
3724 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3725 			     const struct amdgpu_framebuffer *afb,
3726 			     const enum surface_pixel_format format,
3727 			     const enum dc_rotation_angle rotation,
3728 			     const uint64_t tiling_flags,
3729 			     union dc_tiling_info *tiling_info,
3730 			     struct plane_size *plane_size,
3731 			     struct dc_plane_dcc_param *dcc,
3732 			     struct dc_plane_address *address,
3733 			     bool tmz_surface,
3734 			     bool force_disable_dcc)
3735 {
3736 	const struct drm_framebuffer *fb = &afb->base;
3737 	int ret;
3738 
3739 	memset(tiling_info, 0, sizeof(*tiling_info));
3740 	memset(plane_size, 0, sizeof(*plane_size));
3741 	memset(dcc, 0, sizeof(*dcc));
3742 	memset(address, 0, sizeof(*address));
3743 
3744 	address->tmz_surface = tmz_surface;
3745 
3746 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3747 		plane_size->surface_size.x = 0;
3748 		plane_size->surface_size.y = 0;
3749 		plane_size->surface_size.width = fb->width;
3750 		plane_size->surface_size.height = fb->height;
3751 		plane_size->surface_pitch =
3752 			fb->pitches[0] / fb->format->cpp[0];
3753 
3754 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3755 		address->grph.addr.low_part = lower_32_bits(afb->address);
3756 		address->grph.addr.high_part = upper_32_bits(afb->address);
3757 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3758 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3759 
3760 		plane_size->surface_size.x = 0;
3761 		plane_size->surface_size.y = 0;
3762 		plane_size->surface_size.width = fb->width;
3763 		plane_size->surface_size.height = fb->height;
3764 		plane_size->surface_pitch =
3765 			fb->pitches[0] / fb->format->cpp[0];
3766 
3767 		plane_size->chroma_size.x = 0;
3768 		plane_size->chroma_size.y = 0;
3769 		/* TODO: set these based on surface format */
3770 		plane_size->chroma_size.width = fb->width / 2;
3771 		plane_size->chroma_size.height = fb->height / 2;
3772 
3773 		plane_size->chroma_pitch =
3774 			fb->pitches[1] / fb->format->cpp[1];
3775 
3776 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3777 		address->video_progressive.luma_addr.low_part =
3778 			lower_32_bits(afb->address);
3779 		address->video_progressive.luma_addr.high_part =
3780 			upper_32_bits(afb->address);
3781 		address->video_progressive.chroma_addr.low_part =
3782 			lower_32_bits(chroma_addr);
3783 		address->video_progressive.chroma_addr.high_part =
3784 			upper_32_bits(chroma_addr);
3785 	}
3786 
3787 	/* Fill GFX8 params */
3788 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3789 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3790 
3791 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3792 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3793 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3794 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3795 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3796 
3797 		/* XXX fix me for VI */
3798 		tiling_info->gfx8.num_banks = num_banks;
3799 		tiling_info->gfx8.array_mode =
3800 				DC_ARRAY_2D_TILED_THIN1;
3801 		tiling_info->gfx8.tile_split = tile_split;
3802 		tiling_info->gfx8.bank_width = bankw;
3803 		tiling_info->gfx8.bank_height = bankh;
3804 		tiling_info->gfx8.tile_aspect = mtaspect;
3805 		tiling_info->gfx8.tile_mode =
3806 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3807 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3808 			== DC_ARRAY_1D_TILED_THIN1) {
3809 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3810 	}
3811 
3812 	tiling_info->gfx8.pipe_config =
3813 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3814 
3815 	if (adev->asic_type == CHIP_VEGA10 ||
3816 	    adev->asic_type == CHIP_VEGA12 ||
3817 	    adev->asic_type == CHIP_VEGA20 ||
3818 	    adev->asic_type == CHIP_NAVI10 ||
3819 	    adev->asic_type == CHIP_NAVI14 ||
3820 	    adev->asic_type == CHIP_NAVI12 ||
3821 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3822 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3823 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3824 #endif
3825 	    adev->asic_type == CHIP_RENOIR ||
3826 	    adev->asic_type == CHIP_RAVEN) {
3827 		/* Fill GFX9 params */
3828 		tiling_info->gfx9.num_pipes =
3829 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3830 		tiling_info->gfx9.num_banks =
3831 			adev->gfx.config.gb_addr_config_fields.num_banks;
3832 		tiling_info->gfx9.pipe_interleave =
3833 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3834 		tiling_info->gfx9.num_shader_engines =
3835 			adev->gfx.config.gb_addr_config_fields.num_se;
3836 		tiling_info->gfx9.max_compressed_frags =
3837 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3838 		tiling_info->gfx9.num_rb_per_se =
3839 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3840 		tiling_info->gfx9.swizzle =
3841 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3842 		tiling_info->gfx9.shaderEnable = 1;
3843 
3844 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3845 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3846 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3847 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3848 #endif
3849 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3850 						plane_size, tiling_info,
3851 						tiling_flags, dcc, address,
3852 						force_disable_dcc);
3853 		if (ret)
3854 			return ret;
3855 	}
3856 
3857 	return 0;
3858 }
3859 
3860 static void
3861 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3862 			       bool *per_pixel_alpha, bool *global_alpha,
3863 			       int *global_alpha_value)
3864 {
3865 	*per_pixel_alpha = false;
3866 	*global_alpha = false;
3867 	*global_alpha_value = 0xff;
3868 
3869 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3870 		return;
3871 
3872 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3873 		static const uint32_t alpha_formats[] = {
3874 			DRM_FORMAT_ARGB8888,
3875 			DRM_FORMAT_RGBA8888,
3876 			DRM_FORMAT_ABGR8888,
3877 		};
3878 		uint32_t format = plane_state->fb->format->format;
3879 		unsigned int i;
3880 
3881 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3882 			if (format == alpha_formats[i]) {
3883 				*per_pixel_alpha = true;
3884 				break;
3885 			}
3886 		}
3887 	}
3888 
3889 	if (plane_state->alpha < 0xffff) {
3890 		*global_alpha = true;
3891 		*global_alpha_value = plane_state->alpha >> 8;
3892 	}
3893 }
3894 
3895 static int
3896 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3897 			    const enum surface_pixel_format format,
3898 			    enum dc_color_space *color_space)
3899 {
3900 	bool full_range;
3901 
3902 	*color_space = COLOR_SPACE_SRGB;
3903 
3904 	/* DRM color properties only affect non-RGB formats. */
3905 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3906 		return 0;
3907 
3908 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3909 
3910 	switch (plane_state->color_encoding) {
3911 	case DRM_COLOR_YCBCR_BT601:
3912 		if (full_range)
3913 			*color_space = COLOR_SPACE_YCBCR601;
3914 		else
3915 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3916 		break;
3917 
3918 	case DRM_COLOR_YCBCR_BT709:
3919 		if (full_range)
3920 			*color_space = COLOR_SPACE_YCBCR709;
3921 		else
3922 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3923 		break;
3924 
3925 	case DRM_COLOR_YCBCR_BT2020:
3926 		if (full_range)
3927 			*color_space = COLOR_SPACE_2020_YCBCR;
3928 		else
3929 			return -EINVAL;
3930 		break;
3931 
3932 	default:
3933 		return -EINVAL;
3934 	}
3935 
3936 	return 0;
3937 }
3938 
3939 static int
3940 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3941 			    const struct drm_plane_state *plane_state,
3942 			    const uint64_t tiling_flags,
3943 			    struct dc_plane_info *plane_info,
3944 			    struct dc_plane_address *address,
3945 			    bool tmz_surface,
3946 			    bool force_disable_dcc)
3947 {
3948 	const struct drm_framebuffer *fb = plane_state->fb;
3949 	const struct amdgpu_framebuffer *afb =
3950 		to_amdgpu_framebuffer(plane_state->fb);
3951 	struct drm_format_name_buf format_name;
3952 	int ret;
3953 
3954 	memset(plane_info, 0, sizeof(*plane_info));
3955 
3956 	switch (fb->format->format) {
3957 	case DRM_FORMAT_C8:
3958 		plane_info->format =
3959 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3960 		break;
3961 	case DRM_FORMAT_RGB565:
3962 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3963 		break;
3964 	case DRM_FORMAT_XRGB8888:
3965 	case DRM_FORMAT_ARGB8888:
3966 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3967 		break;
3968 	case DRM_FORMAT_XRGB2101010:
3969 	case DRM_FORMAT_ARGB2101010:
3970 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3971 		break;
3972 	case DRM_FORMAT_XBGR2101010:
3973 	case DRM_FORMAT_ABGR2101010:
3974 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3975 		break;
3976 	case DRM_FORMAT_XBGR8888:
3977 	case DRM_FORMAT_ABGR8888:
3978 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3979 		break;
3980 	case DRM_FORMAT_NV21:
3981 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3982 		break;
3983 	case DRM_FORMAT_NV12:
3984 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3985 		break;
3986 	case DRM_FORMAT_P010:
3987 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3988 		break;
3989 	case DRM_FORMAT_XRGB16161616F:
3990 	case DRM_FORMAT_ARGB16161616F:
3991 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3992 		break;
3993 	case DRM_FORMAT_XBGR16161616F:
3994 	case DRM_FORMAT_ABGR16161616F:
3995 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3996 		break;
3997 	default:
3998 		DRM_ERROR(
3999 			"Unsupported screen format %s\n",
4000 			drm_get_format_name(fb->format->format, &format_name));
4001 		return -EINVAL;
4002 	}
4003 
4004 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4005 	case DRM_MODE_ROTATE_0:
4006 		plane_info->rotation = ROTATION_ANGLE_0;
4007 		break;
4008 	case DRM_MODE_ROTATE_90:
4009 		plane_info->rotation = ROTATION_ANGLE_90;
4010 		break;
4011 	case DRM_MODE_ROTATE_180:
4012 		plane_info->rotation = ROTATION_ANGLE_180;
4013 		break;
4014 	case DRM_MODE_ROTATE_270:
4015 		plane_info->rotation = ROTATION_ANGLE_270;
4016 		break;
4017 	default:
4018 		plane_info->rotation = ROTATION_ANGLE_0;
4019 		break;
4020 	}
4021 
4022 	plane_info->visible = true;
4023 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4024 
4025 	plane_info->layer_index = 0;
4026 
4027 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4028 					  &plane_info->color_space);
4029 	if (ret)
4030 		return ret;
4031 
4032 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4033 					   plane_info->rotation, tiling_flags,
4034 					   &plane_info->tiling_info,
4035 					   &plane_info->plane_size,
4036 					   &plane_info->dcc, address, tmz_surface,
4037 					   force_disable_dcc);
4038 	if (ret)
4039 		return ret;
4040 
4041 	fill_blending_from_plane_state(
4042 		plane_state, &plane_info->per_pixel_alpha,
4043 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4044 
4045 	return 0;
4046 }
4047 
4048 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4049 				    struct dc_plane_state *dc_plane_state,
4050 				    struct drm_plane_state *plane_state,
4051 				    struct drm_crtc_state *crtc_state)
4052 {
4053 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4054 	const struct amdgpu_framebuffer *amdgpu_fb =
4055 		to_amdgpu_framebuffer(plane_state->fb);
4056 	struct dc_scaling_info scaling_info;
4057 	struct dc_plane_info plane_info;
4058 	uint64_t tiling_flags;
4059 	int ret;
4060 	bool tmz_surface = false;
4061 	bool force_disable_dcc = false;
4062 
4063 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4064 	if (ret)
4065 		return ret;
4066 
4067 	dc_plane_state->src_rect = scaling_info.src_rect;
4068 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4069 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4070 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4071 
4072 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
4073 	if (ret)
4074 		return ret;
4075 
4076 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4077 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
4078 					  &plane_info,
4079 					  &dc_plane_state->address,
4080 					  tmz_surface,
4081 					  force_disable_dcc);
4082 	if (ret)
4083 		return ret;
4084 
4085 	dc_plane_state->format = plane_info.format;
4086 	dc_plane_state->color_space = plane_info.color_space;
4087 	dc_plane_state->format = plane_info.format;
4088 	dc_plane_state->plane_size = plane_info.plane_size;
4089 	dc_plane_state->rotation = plane_info.rotation;
4090 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4091 	dc_plane_state->stereo_format = plane_info.stereo_format;
4092 	dc_plane_state->tiling_info = plane_info.tiling_info;
4093 	dc_plane_state->visible = plane_info.visible;
4094 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4095 	dc_plane_state->global_alpha = plane_info.global_alpha;
4096 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4097 	dc_plane_state->dcc = plane_info.dcc;
4098 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4099 
4100 	/*
4101 	 * Always set input transfer function, since plane state is refreshed
4102 	 * every time.
4103 	 */
4104 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4105 	if (ret)
4106 		return ret;
4107 
4108 	return 0;
4109 }
4110 
4111 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4112 					   const struct dm_connector_state *dm_state,
4113 					   struct dc_stream_state *stream)
4114 {
4115 	enum amdgpu_rmx_type rmx_type;
4116 
4117 	struct rect src = { 0 }; /* viewport in composition space*/
4118 	struct rect dst = { 0 }; /* stream addressable area */
4119 
4120 	/* no mode. nothing to be done */
4121 	if (!mode)
4122 		return;
4123 
4124 	/* Full screen scaling by default */
4125 	src.width = mode->hdisplay;
4126 	src.height = mode->vdisplay;
4127 	dst.width = stream->timing.h_addressable;
4128 	dst.height = stream->timing.v_addressable;
4129 
4130 	if (dm_state) {
4131 		rmx_type = dm_state->scaling;
4132 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4133 			if (src.width * dst.height <
4134 					src.height * dst.width) {
4135 				/* height needs less upscaling/more downscaling */
4136 				dst.width = src.width *
4137 						dst.height / src.height;
4138 			} else {
4139 				/* width needs less upscaling/more downscaling */
4140 				dst.height = src.height *
4141 						dst.width / src.width;
4142 			}
4143 		} else if (rmx_type == RMX_CENTER) {
4144 			dst = src;
4145 		}
4146 
4147 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4148 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4149 
4150 		if (dm_state->underscan_enable) {
4151 			dst.x += dm_state->underscan_hborder / 2;
4152 			dst.y += dm_state->underscan_vborder / 2;
4153 			dst.width -= dm_state->underscan_hborder;
4154 			dst.height -= dm_state->underscan_vborder;
4155 		}
4156 	}
4157 
4158 	stream->src = src;
4159 	stream->dst = dst;
4160 
4161 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4162 			dst.x, dst.y, dst.width, dst.height);
4163 
4164 }
4165 
4166 static enum dc_color_depth
4167 convert_color_depth_from_display_info(const struct drm_connector *connector,
4168 				      bool is_y420, int requested_bpc)
4169 {
4170 	uint8_t bpc;
4171 
4172 	if (is_y420) {
4173 		bpc = 8;
4174 
4175 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4176 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4177 			bpc = 16;
4178 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4179 			bpc = 12;
4180 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4181 			bpc = 10;
4182 	} else {
4183 		bpc = (uint8_t)connector->display_info.bpc;
4184 		/* Assume 8 bpc by default if no bpc is specified. */
4185 		bpc = bpc ? bpc : 8;
4186 	}
4187 
4188 	if (requested_bpc > 0) {
4189 		/*
4190 		 * Cap display bpc based on the user requested value.
4191 		 *
4192 		 * The value for state->max_bpc may not correctly updated
4193 		 * depending on when the connector gets added to the state
4194 		 * or if this was called outside of atomic check, so it
4195 		 * can't be used directly.
4196 		 */
4197 		bpc = min_t(u8, bpc, requested_bpc);
4198 
4199 		/* Round down to the nearest even number. */
4200 		bpc = bpc - (bpc & 1);
4201 	}
4202 
4203 	switch (bpc) {
4204 	case 0:
4205 		/*
4206 		 * Temporary Work around, DRM doesn't parse color depth for
4207 		 * EDID revision before 1.4
4208 		 * TODO: Fix edid parsing
4209 		 */
4210 		return COLOR_DEPTH_888;
4211 	case 6:
4212 		return COLOR_DEPTH_666;
4213 	case 8:
4214 		return COLOR_DEPTH_888;
4215 	case 10:
4216 		return COLOR_DEPTH_101010;
4217 	case 12:
4218 		return COLOR_DEPTH_121212;
4219 	case 14:
4220 		return COLOR_DEPTH_141414;
4221 	case 16:
4222 		return COLOR_DEPTH_161616;
4223 	default:
4224 		return COLOR_DEPTH_UNDEFINED;
4225 	}
4226 }
4227 
4228 static enum dc_aspect_ratio
4229 get_aspect_ratio(const struct drm_display_mode *mode_in)
4230 {
4231 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4232 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4233 }
4234 
4235 static enum dc_color_space
4236 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4237 {
4238 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4239 
4240 	switch (dc_crtc_timing->pixel_encoding)	{
4241 	case PIXEL_ENCODING_YCBCR422:
4242 	case PIXEL_ENCODING_YCBCR444:
4243 	case PIXEL_ENCODING_YCBCR420:
4244 	{
4245 		/*
4246 		 * 27030khz is the separation point between HDTV and SDTV
4247 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4248 		 * respectively
4249 		 */
4250 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4251 			if (dc_crtc_timing->flags.Y_ONLY)
4252 				color_space =
4253 					COLOR_SPACE_YCBCR709_LIMITED;
4254 			else
4255 				color_space = COLOR_SPACE_YCBCR709;
4256 		} else {
4257 			if (dc_crtc_timing->flags.Y_ONLY)
4258 				color_space =
4259 					COLOR_SPACE_YCBCR601_LIMITED;
4260 			else
4261 				color_space = COLOR_SPACE_YCBCR601;
4262 		}
4263 
4264 	}
4265 	break;
4266 	case PIXEL_ENCODING_RGB:
4267 		color_space = COLOR_SPACE_SRGB;
4268 		break;
4269 
4270 	default:
4271 		WARN_ON(1);
4272 		break;
4273 	}
4274 
4275 	return color_space;
4276 }
4277 
4278 static bool adjust_colour_depth_from_display_info(
4279 	struct dc_crtc_timing *timing_out,
4280 	const struct drm_display_info *info)
4281 {
4282 	enum dc_color_depth depth = timing_out->display_color_depth;
4283 	int normalized_clk;
4284 	do {
4285 		normalized_clk = timing_out->pix_clk_100hz / 10;
4286 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4287 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4288 			normalized_clk /= 2;
4289 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4290 		switch (depth) {
4291 		case COLOR_DEPTH_888:
4292 			break;
4293 		case COLOR_DEPTH_101010:
4294 			normalized_clk = (normalized_clk * 30) / 24;
4295 			break;
4296 		case COLOR_DEPTH_121212:
4297 			normalized_clk = (normalized_clk * 36) / 24;
4298 			break;
4299 		case COLOR_DEPTH_161616:
4300 			normalized_clk = (normalized_clk * 48) / 24;
4301 			break;
4302 		default:
4303 			/* The above depths are the only ones valid for HDMI. */
4304 			return false;
4305 		}
4306 		if (normalized_clk <= info->max_tmds_clock) {
4307 			timing_out->display_color_depth = depth;
4308 			return true;
4309 		}
4310 	} while (--depth > COLOR_DEPTH_666);
4311 	return false;
4312 }
4313 
4314 static void fill_stream_properties_from_drm_display_mode(
4315 	struct dc_stream_state *stream,
4316 	const struct drm_display_mode *mode_in,
4317 	const struct drm_connector *connector,
4318 	const struct drm_connector_state *connector_state,
4319 	const struct dc_stream_state *old_stream,
4320 	int requested_bpc)
4321 {
4322 	struct dc_crtc_timing *timing_out = &stream->timing;
4323 	const struct drm_display_info *info = &connector->display_info;
4324 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4325 	struct hdmi_vendor_infoframe hv_frame;
4326 	struct hdmi_avi_infoframe avi_frame;
4327 
4328 	memset(&hv_frame, 0, sizeof(hv_frame));
4329 	memset(&avi_frame, 0, sizeof(avi_frame));
4330 
4331 	timing_out->h_border_left = 0;
4332 	timing_out->h_border_right = 0;
4333 	timing_out->v_border_top = 0;
4334 	timing_out->v_border_bottom = 0;
4335 	/* TODO: un-hardcode */
4336 	if (drm_mode_is_420_only(info, mode_in)
4337 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4338 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4339 	else if (drm_mode_is_420_also(info, mode_in)
4340 			&& aconnector->force_yuv420_output)
4341 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4342 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4343 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4344 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4345 	else
4346 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4347 
4348 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4349 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4350 		connector,
4351 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4352 		requested_bpc);
4353 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4354 	timing_out->hdmi_vic = 0;
4355 
4356 	if(old_stream) {
4357 		timing_out->vic = old_stream->timing.vic;
4358 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4359 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4360 	} else {
4361 		timing_out->vic = drm_match_cea_mode(mode_in);
4362 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4363 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4364 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4365 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4366 	}
4367 
4368 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4369 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4370 		timing_out->vic = avi_frame.video_code;
4371 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4372 		timing_out->hdmi_vic = hv_frame.vic;
4373 	}
4374 
4375 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4376 	timing_out->h_total = mode_in->crtc_htotal;
4377 	timing_out->h_sync_width =
4378 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4379 	timing_out->h_front_porch =
4380 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4381 	timing_out->v_total = mode_in->crtc_vtotal;
4382 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4383 	timing_out->v_front_porch =
4384 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4385 	timing_out->v_sync_width =
4386 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4387 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4388 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4389 
4390 	stream->output_color_space = get_output_color_space(timing_out);
4391 
4392 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4393 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4394 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4395 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4396 		    drm_mode_is_420_also(info, mode_in) &&
4397 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4398 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4399 			adjust_colour_depth_from_display_info(timing_out, info);
4400 		}
4401 	}
4402 }
4403 
4404 static void fill_audio_info(struct audio_info *audio_info,
4405 			    const struct drm_connector *drm_connector,
4406 			    const struct dc_sink *dc_sink)
4407 {
4408 	int i = 0;
4409 	int cea_revision = 0;
4410 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4411 
4412 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4413 	audio_info->product_id = edid_caps->product_id;
4414 
4415 	cea_revision = drm_connector->display_info.cea_rev;
4416 
4417 	strscpy(audio_info->display_name,
4418 		edid_caps->display_name,
4419 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4420 
4421 	if (cea_revision >= 3) {
4422 		audio_info->mode_count = edid_caps->audio_mode_count;
4423 
4424 		for (i = 0; i < audio_info->mode_count; ++i) {
4425 			audio_info->modes[i].format_code =
4426 					(enum audio_format_code)
4427 					(edid_caps->audio_modes[i].format_code);
4428 			audio_info->modes[i].channel_count =
4429 					edid_caps->audio_modes[i].channel_count;
4430 			audio_info->modes[i].sample_rates.all =
4431 					edid_caps->audio_modes[i].sample_rate;
4432 			audio_info->modes[i].sample_size =
4433 					edid_caps->audio_modes[i].sample_size;
4434 		}
4435 	}
4436 
4437 	audio_info->flags.all = edid_caps->speaker_flags;
4438 
4439 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4440 	if (drm_connector->latency_present[0]) {
4441 		audio_info->video_latency = drm_connector->video_latency[0];
4442 		audio_info->audio_latency = drm_connector->audio_latency[0];
4443 	}
4444 
4445 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4446 
4447 }
4448 
4449 static void
4450 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4451 				      struct drm_display_mode *dst_mode)
4452 {
4453 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4454 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4455 	dst_mode->crtc_clock = src_mode->crtc_clock;
4456 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4457 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4458 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4459 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4460 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4461 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4462 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4463 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4464 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4465 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4466 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4467 }
4468 
4469 static void
4470 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4471 					const struct drm_display_mode *native_mode,
4472 					bool scale_enabled)
4473 {
4474 	if (scale_enabled) {
4475 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4476 	} else if (native_mode->clock == drm_mode->clock &&
4477 			native_mode->htotal == drm_mode->htotal &&
4478 			native_mode->vtotal == drm_mode->vtotal) {
4479 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4480 	} else {
4481 		/* no scaling nor amdgpu inserted, no need to patch */
4482 	}
4483 }
4484 
4485 static struct dc_sink *
4486 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4487 {
4488 	struct dc_sink_init_data sink_init_data = { 0 };
4489 	struct dc_sink *sink = NULL;
4490 	sink_init_data.link = aconnector->dc_link;
4491 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4492 
4493 	sink = dc_sink_create(&sink_init_data);
4494 	if (!sink) {
4495 		DRM_ERROR("Failed to create sink!\n");
4496 		return NULL;
4497 	}
4498 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4499 
4500 	return sink;
4501 }
4502 
4503 static void set_multisync_trigger_params(
4504 		struct dc_stream_state *stream)
4505 {
4506 	if (stream->triggered_crtc_reset.enabled) {
4507 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4508 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4509 	}
4510 }
4511 
4512 static void set_master_stream(struct dc_stream_state *stream_set[],
4513 			      int stream_count)
4514 {
4515 	int j, highest_rfr = 0, master_stream = 0;
4516 
4517 	for (j = 0;  j < stream_count; j++) {
4518 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4519 			int refresh_rate = 0;
4520 
4521 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4522 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4523 			if (refresh_rate > highest_rfr) {
4524 				highest_rfr = refresh_rate;
4525 				master_stream = j;
4526 			}
4527 		}
4528 	}
4529 	for (j = 0;  j < stream_count; j++) {
4530 		if (stream_set[j])
4531 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4532 	}
4533 }
4534 
4535 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4536 {
4537 	int i = 0;
4538 
4539 	if (context->stream_count < 2)
4540 		return;
4541 	for (i = 0; i < context->stream_count ; i++) {
4542 		if (!context->streams[i])
4543 			continue;
4544 		/*
4545 		 * TODO: add a function to read AMD VSDB bits and set
4546 		 * crtc_sync_master.multi_sync_enabled flag
4547 		 * For now it's set to false
4548 		 */
4549 		set_multisync_trigger_params(context->streams[i]);
4550 	}
4551 	set_master_stream(context->streams, context->stream_count);
4552 }
4553 
4554 static struct dc_stream_state *
4555 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4556 		       const struct drm_display_mode *drm_mode,
4557 		       const struct dm_connector_state *dm_state,
4558 		       const struct dc_stream_state *old_stream,
4559 		       int requested_bpc)
4560 {
4561 	struct drm_display_mode *preferred_mode = NULL;
4562 	struct drm_connector *drm_connector;
4563 	const struct drm_connector_state *con_state =
4564 		dm_state ? &dm_state->base : NULL;
4565 	struct dc_stream_state *stream = NULL;
4566 	struct drm_display_mode mode = *drm_mode;
4567 	bool native_mode_found = false;
4568 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4569 	int mode_refresh;
4570 	int preferred_refresh = 0;
4571 #if defined(CONFIG_DRM_AMD_DC_DCN)
4572 	struct dsc_dec_dpcd_caps dsc_caps;
4573 #endif
4574 	uint32_t link_bandwidth_kbps;
4575 
4576 	struct dc_sink *sink = NULL;
4577 	if (aconnector == NULL) {
4578 		DRM_ERROR("aconnector is NULL!\n");
4579 		return stream;
4580 	}
4581 
4582 	drm_connector = &aconnector->base;
4583 
4584 	if (!aconnector->dc_sink) {
4585 		sink = create_fake_sink(aconnector);
4586 		if (!sink)
4587 			return stream;
4588 	} else {
4589 		sink = aconnector->dc_sink;
4590 		dc_sink_retain(sink);
4591 	}
4592 
4593 	stream = dc_create_stream_for_sink(sink);
4594 
4595 	if (stream == NULL) {
4596 		DRM_ERROR("Failed to create stream for sink!\n");
4597 		goto finish;
4598 	}
4599 
4600 	stream->dm_stream_context = aconnector;
4601 
4602 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4603 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4604 
4605 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4606 		/* Search for preferred mode */
4607 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4608 			native_mode_found = true;
4609 			break;
4610 		}
4611 	}
4612 	if (!native_mode_found)
4613 		preferred_mode = list_first_entry_or_null(
4614 				&aconnector->base.modes,
4615 				struct drm_display_mode,
4616 				head);
4617 
4618 	mode_refresh = drm_mode_vrefresh(&mode);
4619 
4620 	if (preferred_mode == NULL) {
4621 		/*
4622 		 * This may not be an error, the use case is when we have no
4623 		 * usermode calls to reset and set mode upon hotplug. In this
4624 		 * case, we call set mode ourselves to restore the previous mode
4625 		 * and the modelist may not be filled in in time.
4626 		 */
4627 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4628 	} else {
4629 		decide_crtc_timing_for_drm_display_mode(
4630 				&mode, preferred_mode,
4631 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4632 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4633 	}
4634 
4635 	if (!dm_state)
4636 		drm_mode_set_crtcinfo(&mode, 0);
4637 
4638 	/*
4639 	* If scaling is enabled and refresh rate didn't change
4640 	* we copy the vic and polarities of the old timings
4641 	*/
4642 	if (!scale || mode_refresh != preferred_refresh)
4643 		fill_stream_properties_from_drm_display_mode(stream,
4644 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4645 	else
4646 		fill_stream_properties_from_drm_display_mode(stream,
4647 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4648 
4649 	stream->timing.flags.DSC = 0;
4650 
4651 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4652 #if defined(CONFIG_DRM_AMD_DC_DCN)
4653 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4654 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4655 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4656 				      &dsc_caps);
4657 #endif
4658 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4659 							     dc_link_get_link_cap(aconnector->dc_link));
4660 
4661 #if defined(CONFIG_DRM_AMD_DC_DCN)
4662 		if (dsc_caps.is_dsc_supported)
4663 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4664 						  &dsc_caps,
4665 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4666 						  link_bandwidth_kbps,
4667 						  &stream->timing,
4668 						  &stream->timing.dsc_cfg))
4669 				stream->timing.flags.DSC = 1;
4670 #endif
4671 	}
4672 
4673 	update_stream_scaling_settings(&mode, dm_state, stream);
4674 
4675 	fill_audio_info(
4676 		&stream->audio_info,
4677 		drm_connector,
4678 		sink);
4679 
4680 	update_stream_signal(stream, sink);
4681 
4682 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4683 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4684 	if (stream->link->psr_settings.psr_feature_enabled) {
4685 		//
4686 		// should decide stream support vsc sdp colorimetry capability
4687 		// before building vsc info packet
4688 		//
4689 		stream->use_vsc_sdp_for_colorimetry = false;
4690 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4691 			stream->use_vsc_sdp_for_colorimetry =
4692 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4693 		} else {
4694 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4695 				stream->use_vsc_sdp_for_colorimetry = true;
4696 		}
4697 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4698 	}
4699 finish:
4700 	dc_sink_release(sink);
4701 
4702 	return stream;
4703 }
4704 
4705 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4706 {
4707 	drm_crtc_cleanup(crtc);
4708 	kfree(crtc);
4709 }
4710 
4711 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4712 				  struct drm_crtc_state *state)
4713 {
4714 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4715 
4716 	/* TODO Destroy dc_stream objects are stream object is flattened */
4717 	if (cur->stream)
4718 		dc_stream_release(cur->stream);
4719 
4720 
4721 	__drm_atomic_helper_crtc_destroy_state(state);
4722 
4723 
4724 	kfree(state);
4725 }
4726 
4727 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4728 {
4729 	struct dm_crtc_state *state;
4730 
4731 	if (crtc->state)
4732 		dm_crtc_destroy_state(crtc, crtc->state);
4733 
4734 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4735 	if (WARN_ON(!state))
4736 		return;
4737 
4738 	crtc->state = &state->base;
4739 	crtc->state->crtc = crtc;
4740 
4741 }
4742 
4743 static struct drm_crtc_state *
4744 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4745 {
4746 	struct dm_crtc_state *state, *cur;
4747 
4748 	cur = to_dm_crtc_state(crtc->state);
4749 
4750 	if (WARN_ON(!crtc->state))
4751 		return NULL;
4752 
4753 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4754 	if (!state)
4755 		return NULL;
4756 
4757 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4758 
4759 	if (cur->stream) {
4760 		state->stream = cur->stream;
4761 		dc_stream_retain(state->stream);
4762 	}
4763 
4764 	state->active_planes = cur->active_planes;
4765 	state->vrr_params = cur->vrr_params;
4766 	state->vrr_infopacket = cur->vrr_infopacket;
4767 	state->abm_level = cur->abm_level;
4768 	state->vrr_supported = cur->vrr_supported;
4769 	state->freesync_config = cur->freesync_config;
4770 	state->crc_src = cur->crc_src;
4771 	state->cm_has_degamma = cur->cm_has_degamma;
4772 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4773 
4774 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4775 
4776 	return &state->base;
4777 }
4778 
4779 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4780 {
4781 	enum dc_irq_source irq_source;
4782 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4783 	struct amdgpu_device *adev = crtc->dev->dev_private;
4784 	int rc;
4785 
4786 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4787 
4788 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4789 
4790 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4791 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4792 	return rc;
4793 }
4794 
4795 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4796 {
4797 	enum dc_irq_source irq_source;
4798 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4799 	struct amdgpu_device *adev = crtc->dev->dev_private;
4800 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4801 	int rc = 0;
4802 
4803 	if (enable) {
4804 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4805 		if (amdgpu_dm_vrr_active(acrtc_state))
4806 			rc = dm_set_vupdate_irq(crtc, true);
4807 	} else {
4808 		/* vblank irq off -> vupdate irq off */
4809 		rc = dm_set_vupdate_irq(crtc, false);
4810 	}
4811 
4812 	if (rc)
4813 		return rc;
4814 
4815 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4816 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4817 }
4818 
4819 static int dm_enable_vblank(struct drm_crtc *crtc)
4820 {
4821 	return dm_set_vblank(crtc, true);
4822 }
4823 
4824 static void dm_disable_vblank(struct drm_crtc *crtc)
4825 {
4826 	dm_set_vblank(crtc, false);
4827 }
4828 
4829 /* Implemented only the options currently availible for the driver */
4830 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4831 	.reset = dm_crtc_reset_state,
4832 	.destroy = amdgpu_dm_crtc_destroy,
4833 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4834 	.set_config = drm_atomic_helper_set_config,
4835 	.page_flip = drm_atomic_helper_page_flip,
4836 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4837 	.atomic_destroy_state = dm_crtc_destroy_state,
4838 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4839 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4840 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4841 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4842 	.enable_vblank = dm_enable_vblank,
4843 	.disable_vblank = dm_disable_vblank,
4844 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4845 };
4846 
4847 static enum drm_connector_status
4848 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4849 {
4850 	bool connected;
4851 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4852 
4853 	/*
4854 	 * Notes:
4855 	 * 1. This interface is NOT called in context of HPD irq.
4856 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4857 	 * makes it a bad place for *any* MST-related activity.
4858 	 */
4859 
4860 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4861 	    !aconnector->fake_enable)
4862 		connected = (aconnector->dc_sink != NULL);
4863 	else
4864 		connected = (aconnector->base.force == DRM_FORCE_ON);
4865 
4866 	return (connected ? connector_status_connected :
4867 			connector_status_disconnected);
4868 }
4869 
4870 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4871 					    struct drm_connector_state *connector_state,
4872 					    struct drm_property *property,
4873 					    uint64_t val)
4874 {
4875 	struct drm_device *dev = connector->dev;
4876 	struct amdgpu_device *adev = dev->dev_private;
4877 	struct dm_connector_state *dm_old_state =
4878 		to_dm_connector_state(connector->state);
4879 	struct dm_connector_state *dm_new_state =
4880 		to_dm_connector_state(connector_state);
4881 
4882 	int ret = -EINVAL;
4883 
4884 	if (property == dev->mode_config.scaling_mode_property) {
4885 		enum amdgpu_rmx_type rmx_type;
4886 
4887 		switch (val) {
4888 		case DRM_MODE_SCALE_CENTER:
4889 			rmx_type = RMX_CENTER;
4890 			break;
4891 		case DRM_MODE_SCALE_ASPECT:
4892 			rmx_type = RMX_ASPECT;
4893 			break;
4894 		case DRM_MODE_SCALE_FULLSCREEN:
4895 			rmx_type = RMX_FULL;
4896 			break;
4897 		case DRM_MODE_SCALE_NONE:
4898 		default:
4899 			rmx_type = RMX_OFF;
4900 			break;
4901 		}
4902 
4903 		if (dm_old_state->scaling == rmx_type)
4904 			return 0;
4905 
4906 		dm_new_state->scaling = rmx_type;
4907 		ret = 0;
4908 	} else if (property == adev->mode_info.underscan_hborder_property) {
4909 		dm_new_state->underscan_hborder = val;
4910 		ret = 0;
4911 	} else if (property == adev->mode_info.underscan_vborder_property) {
4912 		dm_new_state->underscan_vborder = val;
4913 		ret = 0;
4914 	} else if (property == adev->mode_info.underscan_property) {
4915 		dm_new_state->underscan_enable = val;
4916 		ret = 0;
4917 	} else if (property == adev->mode_info.abm_level_property) {
4918 		dm_new_state->abm_level = val;
4919 		ret = 0;
4920 	}
4921 
4922 	return ret;
4923 }
4924 
4925 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4926 					    const struct drm_connector_state *state,
4927 					    struct drm_property *property,
4928 					    uint64_t *val)
4929 {
4930 	struct drm_device *dev = connector->dev;
4931 	struct amdgpu_device *adev = dev->dev_private;
4932 	struct dm_connector_state *dm_state =
4933 		to_dm_connector_state(state);
4934 	int ret = -EINVAL;
4935 
4936 	if (property == dev->mode_config.scaling_mode_property) {
4937 		switch (dm_state->scaling) {
4938 		case RMX_CENTER:
4939 			*val = DRM_MODE_SCALE_CENTER;
4940 			break;
4941 		case RMX_ASPECT:
4942 			*val = DRM_MODE_SCALE_ASPECT;
4943 			break;
4944 		case RMX_FULL:
4945 			*val = DRM_MODE_SCALE_FULLSCREEN;
4946 			break;
4947 		case RMX_OFF:
4948 		default:
4949 			*val = DRM_MODE_SCALE_NONE;
4950 			break;
4951 		}
4952 		ret = 0;
4953 	} else if (property == adev->mode_info.underscan_hborder_property) {
4954 		*val = dm_state->underscan_hborder;
4955 		ret = 0;
4956 	} else if (property == adev->mode_info.underscan_vborder_property) {
4957 		*val = dm_state->underscan_vborder;
4958 		ret = 0;
4959 	} else if (property == adev->mode_info.underscan_property) {
4960 		*val = dm_state->underscan_enable;
4961 		ret = 0;
4962 	} else if (property == adev->mode_info.abm_level_property) {
4963 		*val = dm_state->abm_level;
4964 		ret = 0;
4965 	}
4966 
4967 	return ret;
4968 }
4969 
4970 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4971 {
4972 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4973 
4974 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4975 }
4976 
4977 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4978 {
4979 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4980 	const struct dc_link *link = aconnector->dc_link;
4981 	struct amdgpu_device *adev = connector->dev->dev_private;
4982 	struct amdgpu_display_manager *dm = &adev->dm;
4983 
4984 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4985 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4986 
4987 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4988 	    link->type != dc_connection_none &&
4989 	    dm->backlight_dev) {
4990 		backlight_device_unregister(dm->backlight_dev);
4991 		dm->backlight_dev = NULL;
4992 	}
4993 #endif
4994 
4995 	if (aconnector->dc_em_sink)
4996 		dc_sink_release(aconnector->dc_em_sink);
4997 	aconnector->dc_em_sink = NULL;
4998 	if (aconnector->dc_sink)
4999 		dc_sink_release(aconnector->dc_sink);
5000 	aconnector->dc_sink = NULL;
5001 
5002 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5003 	drm_connector_unregister(connector);
5004 	drm_connector_cleanup(connector);
5005 	if (aconnector->i2c) {
5006 		i2c_del_adapter(&aconnector->i2c->base);
5007 		kfree(aconnector->i2c);
5008 	}
5009 	kfree(aconnector->dm_dp_aux.aux.name);
5010 
5011 	kfree(connector);
5012 }
5013 
5014 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5015 {
5016 	struct dm_connector_state *state =
5017 		to_dm_connector_state(connector->state);
5018 
5019 	if (connector->state)
5020 		__drm_atomic_helper_connector_destroy_state(connector->state);
5021 
5022 	kfree(state);
5023 
5024 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5025 
5026 	if (state) {
5027 		state->scaling = RMX_OFF;
5028 		state->underscan_enable = false;
5029 		state->underscan_hborder = 0;
5030 		state->underscan_vborder = 0;
5031 		state->base.max_requested_bpc = 8;
5032 		state->vcpi_slots = 0;
5033 		state->pbn = 0;
5034 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5035 			state->abm_level = amdgpu_dm_abm_level;
5036 
5037 		__drm_atomic_helper_connector_reset(connector, &state->base);
5038 	}
5039 }
5040 
5041 struct drm_connector_state *
5042 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5043 {
5044 	struct dm_connector_state *state =
5045 		to_dm_connector_state(connector->state);
5046 
5047 	struct dm_connector_state *new_state =
5048 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5049 
5050 	if (!new_state)
5051 		return NULL;
5052 
5053 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5054 
5055 	new_state->freesync_capable = state->freesync_capable;
5056 	new_state->abm_level = state->abm_level;
5057 	new_state->scaling = state->scaling;
5058 	new_state->underscan_enable = state->underscan_enable;
5059 	new_state->underscan_hborder = state->underscan_hborder;
5060 	new_state->underscan_vborder = state->underscan_vborder;
5061 	new_state->vcpi_slots = state->vcpi_slots;
5062 	new_state->pbn = state->pbn;
5063 	return &new_state->base;
5064 }
5065 
5066 static int
5067 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5068 {
5069 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5070 		to_amdgpu_dm_connector(connector);
5071 	int r;
5072 
5073 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5074 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5075 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5076 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5077 		if (r)
5078 			return r;
5079 	}
5080 
5081 #if defined(CONFIG_DEBUG_FS)
5082 	connector_debugfs_init(amdgpu_dm_connector);
5083 #endif
5084 
5085 	return 0;
5086 }
5087 
5088 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5089 	.reset = amdgpu_dm_connector_funcs_reset,
5090 	.detect = amdgpu_dm_connector_detect,
5091 	.fill_modes = drm_helper_probe_single_connector_modes,
5092 	.destroy = amdgpu_dm_connector_destroy,
5093 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5094 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5095 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5096 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5097 	.late_register = amdgpu_dm_connector_late_register,
5098 	.early_unregister = amdgpu_dm_connector_unregister
5099 };
5100 
5101 static int get_modes(struct drm_connector *connector)
5102 {
5103 	return amdgpu_dm_connector_get_modes(connector);
5104 }
5105 
5106 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5107 {
5108 	struct dc_sink_init_data init_params = {
5109 			.link = aconnector->dc_link,
5110 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5111 	};
5112 	struct edid *edid;
5113 
5114 	if (!aconnector->base.edid_blob_ptr) {
5115 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5116 				aconnector->base.name);
5117 
5118 		aconnector->base.force = DRM_FORCE_OFF;
5119 		aconnector->base.override_edid = false;
5120 		return;
5121 	}
5122 
5123 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5124 
5125 	aconnector->edid = edid;
5126 
5127 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5128 		aconnector->dc_link,
5129 		(uint8_t *)edid,
5130 		(edid->extensions + 1) * EDID_LENGTH,
5131 		&init_params);
5132 
5133 	if (aconnector->base.force == DRM_FORCE_ON) {
5134 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5135 		aconnector->dc_link->local_sink :
5136 		aconnector->dc_em_sink;
5137 		dc_sink_retain(aconnector->dc_sink);
5138 	}
5139 }
5140 
5141 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5142 {
5143 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5144 
5145 	/*
5146 	 * In case of headless boot with force on for DP managed connector
5147 	 * Those settings have to be != 0 to get initial modeset
5148 	 */
5149 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5150 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5151 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5152 	}
5153 
5154 
5155 	aconnector->base.override_edid = true;
5156 	create_eml_sink(aconnector);
5157 }
5158 
5159 static struct dc_stream_state *
5160 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5161 				const struct drm_display_mode *drm_mode,
5162 				const struct dm_connector_state *dm_state,
5163 				const struct dc_stream_state *old_stream)
5164 {
5165 	struct drm_connector *connector = &aconnector->base;
5166 	struct amdgpu_device *adev = connector->dev->dev_private;
5167 	struct dc_stream_state *stream;
5168 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5169 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5170 	enum dc_status dc_result = DC_OK;
5171 
5172 	do {
5173 		stream = create_stream_for_sink(aconnector, drm_mode,
5174 						dm_state, old_stream,
5175 						requested_bpc);
5176 		if (stream == NULL) {
5177 			DRM_ERROR("Failed to create stream for sink!\n");
5178 			break;
5179 		}
5180 
5181 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5182 
5183 		if (dc_result != DC_OK) {
5184 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5185 				      drm_mode->hdisplay,
5186 				      drm_mode->vdisplay,
5187 				      drm_mode->clock,
5188 				      dc_result,
5189 				      dc_status_to_str(dc_result));
5190 
5191 			dc_stream_release(stream);
5192 			stream = NULL;
5193 			requested_bpc -= 2; /* lower bpc to retry validation */
5194 		}
5195 
5196 	} while (stream == NULL && requested_bpc >= 6);
5197 
5198 	return stream;
5199 }
5200 
5201 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5202 				   struct drm_display_mode *mode)
5203 {
5204 	int result = MODE_ERROR;
5205 	struct dc_sink *dc_sink;
5206 	/* TODO: Unhardcode stream count */
5207 	struct dc_stream_state *stream;
5208 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5209 
5210 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5211 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5212 		return result;
5213 
5214 	/*
5215 	 * Only run this the first time mode_valid is called to initilialize
5216 	 * EDID mgmt
5217 	 */
5218 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5219 		!aconnector->dc_em_sink)
5220 		handle_edid_mgmt(aconnector);
5221 
5222 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5223 
5224 	if (dc_sink == NULL) {
5225 		DRM_ERROR("dc_sink is NULL!\n");
5226 		goto fail;
5227 	}
5228 
5229 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5230 	if (stream) {
5231 		dc_stream_release(stream);
5232 		result = MODE_OK;
5233 	}
5234 
5235 fail:
5236 	/* TODO: error handling*/
5237 	return result;
5238 }
5239 
5240 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5241 				struct dc_info_packet *out)
5242 {
5243 	struct hdmi_drm_infoframe frame;
5244 	unsigned char buf[30]; /* 26 + 4 */
5245 	ssize_t len;
5246 	int ret, i;
5247 
5248 	memset(out, 0, sizeof(*out));
5249 
5250 	if (!state->hdr_output_metadata)
5251 		return 0;
5252 
5253 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5254 	if (ret)
5255 		return ret;
5256 
5257 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5258 	if (len < 0)
5259 		return (int)len;
5260 
5261 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5262 	if (len != 30)
5263 		return -EINVAL;
5264 
5265 	/* Prepare the infopacket for DC. */
5266 	switch (state->connector->connector_type) {
5267 	case DRM_MODE_CONNECTOR_HDMIA:
5268 		out->hb0 = 0x87; /* type */
5269 		out->hb1 = 0x01; /* version */
5270 		out->hb2 = 0x1A; /* length */
5271 		out->sb[0] = buf[3]; /* checksum */
5272 		i = 1;
5273 		break;
5274 
5275 	case DRM_MODE_CONNECTOR_DisplayPort:
5276 	case DRM_MODE_CONNECTOR_eDP:
5277 		out->hb0 = 0x00; /* sdp id, zero */
5278 		out->hb1 = 0x87; /* type */
5279 		out->hb2 = 0x1D; /* payload len - 1 */
5280 		out->hb3 = (0x13 << 2); /* sdp version */
5281 		out->sb[0] = 0x01; /* version */
5282 		out->sb[1] = 0x1A; /* length */
5283 		i = 2;
5284 		break;
5285 
5286 	default:
5287 		return -EINVAL;
5288 	}
5289 
5290 	memcpy(&out->sb[i], &buf[4], 26);
5291 	out->valid = true;
5292 
5293 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5294 		       sizeof(out->sb), false);
5295 
5296 	return 0;
5297 }
5298 
5299 static bool
5300 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5301 			  const struct drm_connector_state *new_state)
5302 {
5303 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5304 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5305 
5306 	if (old_blob != new_blob) {
5307 		if (old_blob && new_blob &&
5308 		    old_blob->length == new_blob->length)
5309 			return memcmp(old_blob->data, new_blob->data,
5310 				      old_blob->length);
5311 
5312 		return true;
5313 	}
5314 
5315 	return false;
5316 }
5317 
5318 static int
5319 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5320 				 struct drm_atomic_state *state)
5321 {
5322 	struct drm_connector_state *new_con_state =
5323 		drm_atomic_get_new_connector_state(state, conn);
5324 	struct drm_connector_state *old_con_state =
5325 		drm_atomic_get_old_connector_state(state, conn);
5326 	struct drm_crtc *crtc = new_con_state->crtc;
5327 	struct drm_crtc_state *new_crtc_state;
5328 	int ret;
5329 
5330 	if (!crtc)
5331 		return 0;
5332 
5333 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5334 		struct dc_info_packet hdr_infopacket;
5335 
5336 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5337 		if (ret)
5338 			return ret;
5339 
5340 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5341 		if (IS_ERR(new_crtc_state))
5342 			return PTR_ERR(new_crtc_state);
5343 
5344 		/*
5345 		 * DC considers the stream backends changed if the
5346 		 * static metadata changes. Forcing the modeset also
5347 		 * gives a simple way for userspace to switch from
5348 		 * 8bpc to 10bpc when setting the metadata to enter
5349 		 * or exit HDR.
5350 		 *
5351 		 * Changing the static metadata after it's been
5352 		 * set is permissible, however. So only force a
5353 		 * modeset if we're entering or exiting HDR.
5354 		 */
5355 		new_crtc_state->mode_changed =
5356 			!old_con_state->hdr_output_metadata ||
5357 			!new_con_state->hdr_output_metadata;
5358 	}
5359 
5360 	return 0;
5361 }
5362 
5363 static const struct drm_connector_helper_funcs
5364 amdgpu_dm_connector_helper_funcs = {
5365 	/*
5366 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5367 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5368 	 * are missing after user start lightdm. So we need to renew modes list.
5369 	 * in get_modes call back, not just return the modes count
5370 	 */
5371 	.get_modes = get_modes,
5372 	.mode_valid = amdgpu_dm_connector_mode_valid,
5373 	.atomic_check = amdgpu_dm_connector_atomic_check,
5374 };
5375 
5376 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5377 {
5378 }
5379 
5380 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5381 {
5382 	struct drm_device *dev = new_crtc_state->crtc->dev;
5383 	struct drm_plane *plane;
5384 
5385 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5386 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5387 			return true;
5388 	}
5389 
5390 	return false;
5391 }
5392 
5393 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5394 {
5395 	struct drm_atomic_state *state = new_crtc_state->state;
5396 	struct drm_plane *plane;
5397 	int num_active = 0;
5398 
5399 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5400 		struct drm_plane_state *new_plane_state;
5401 
5402 		/* Cursor planes are "fake". */
5403 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5404 			continue;
5405 
5406 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5407 
5408 		if (!new_plane_state) {
5409 			/*
5410 			 * The plane is enable on the CRTC and hasn't changed
5411 			 * state. This means that it previously passed
5412 			 * validation and is therefore enabled.
5413 			 */
5414 			num_active += 1;
5415 			continue;
5416 		}
5417 
5418 		/* We need a framebuffer to be considered enabled. */
5419 		num_active += (new_plane_state->fb != NULL);
5420 	}
5421 
5422 	return num_active;
5423 }
5424 
5425 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5426 					 struct drm_crtc_state *new_crtc_state)
5427 {
5428 	struct dm_crtc_state *dm_new_crtc_state =
5429 		to_dm_crtc_state(new_crtc_state);
5430 
5431 	dm_new_crtc_state->active_planes = 0;
5432 
5433 	if (!dm_new_crtc_state->stream)
5434 		return;
5435 
5436 	dm_new_crtc_state->active_planes =
5437 		count_crtc_active_planes(new_crtc_state);
5438 }
5439 
5440 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5441 				       struct drm_crtc_state *state)
5442 {
5443 	struct amdgpu_device *adev = crtc->dev->dev_private;
5444 	struct dc *dc = adev->dm.dc;
5445 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5446 	int ret = -EINVAL;
5447 
5448 	dm_update_crtc_active_planes(crtc, state);
5449 
5450 	if (unlikely(!dm_crtc_state->stream &&
5451 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5452 		WARN_ON(1);
5453 		return ret;
5454 	}
5455 
5456 	/* In some use cases, like reset, no stream is attached */
5457 	if (!dm_crtc_state->stream)
5458 		return 0;
5459 
5460 	/*
5461 	 * We want at least one hardware plane enabled to use
5462 	 * the stream with a cursor enabled.
5463 	 */
5464 	if (state->enable && state->active &&
5465 	    does_crtc_have_active_cursor(state) &&
5466 	    dm_crtc_state->active_planes == 0)
5467 		return -EINVAL;
5468 
5469 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5470 		return 0;
5471 
5472 	return ret;
5473 }
5474 
5475 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5476 				      const struct drm_display_mode *mode,
5477 				      struct drm_display_mode *adjusted_mode)
5478 {
5479 	return true;
5480 }
5481 
5482 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5483 	.disable = dm_crtc_helper_disable,
5484 	.atomic_check = dm_crtc_helper_atomic_check,
5485 	.mode_fixup = dm_crtc_helper_mode_fixup,
5486 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5487 };
5488 
5489 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5490 {
5491 
5492 }
5493 
5494 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5495 {
5496 	switch (display_color_depth) {
5497 		case COLOR_DEPTH_666:
5498 			return 6;
5499 		case COLOR_DEPTH_888:
5500 			return 8;
5501 		case COLOR_DEPTH_101010:
5502 			return 10;
5503 		case COLOR_DEPTH_121212:
5504 			return 12;
5505 		case COLOR_DEPTH_141414:
5506 			return 14;
5507 		case COLOR_DEPTH_161616:
5508 			return 16;
5509 		default:
5510 			break;
5511 		}
5512 	return 0;
5513 }
5514 
5515 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5516 					  struct drm_crtc_state *crtc_state,
5517 					  struct drm_connector_state *conn_state)
5518 {
5519 	struct drm_atomic_state *state = crtc_state->state;
5520 	struct drm_connector *connector = conn_state->connector;
5521 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5522 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5523 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5524 	struct drm_dp_mst_topology_mgr *mst_mgr;
5525 	struct drm_dp_mst_port *mst_port;
5526 	enum dc_color_depth color_depth;
5527 	int clock, bpp = 0;
5528 	bool is_y420 = false;
5529 
5530 	if (!aconnector->port || !aconnector->dc_sink)
5531 		return 0;
5532 
5533 	mst_port = aconnector->port;
5534 	mst_mgr = &aconnector->mst_port->mst_mgr;
5535 
5536 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5537 		return 0;
5538 
5539 	if (!state->duplicated) {
5540 		int max_bpc = conn_state->max_requested_bpc;
5541 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5542 				aconnector->force_yuv420_output;
5543 		color_depth = convert_color_depth_from_display_info(connector,
5544 								    is_y420,
5545 								    max_bpc);
5546 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5547 		clock = adjusted_mode->clock;
5548 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5549 	}
5550 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5551 									   mst_mgr,
5552 									   mst_port,
5553 									   dm_new_connector_state->pbn,
5554 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5555 	if (dm_new_connector_state->vcpi_slots < 0) {
5556 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5557 		return dm_new_connector_state->vcpi_slots;
5558 	}
5559 	return 0;
5560 }
5561 
5562 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5563 	.disable = dm_encoder_helper_disable,
5564 	.atomic_check = dm_encoder_helper_atomic_check
5565 };
5566 
5567 #if defined(CONFIG_DRM_AMD_DC_DCN)
5568 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5569 					    struct dc_state *dc_state)
5570 {
5571 	struct dc_stream_state *stream = NULL;
5572 	struct drm_connector *connector;
5573 	struct drm_connector_state *new_con_state, *old_con_state;
5574 	struct amdgpu_dm_connector *aconnector;
5575 	struct dm_connector_state *dm_conn_state;
5576 	int i, j, clock, bpp;
5577 	int vcpi, pbn_div, pbn = 0;
5578 
5579 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5580 
5581 		aconnector = to_amdgpu_dm_connector(connector);
5582 
5583 		if (!aconnector->port)
5584 			continue;
5585 
5586 		if (!new_con_state || !new_con_state->crtc)
5587 			continue;
5588 
5589 		dm_conn_state = to_dm_connector_state(new_con_state);
5590 
5591 		for (j = 0; j < dc_state->stream_count; j++) {
5592 			stream = dc_state->streams[j];
5593 			if (!stream)
5594 				continue;
5595 
5596 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5597 				break;
5598 
5599 			stream = NULL;
5600 		}
5601 
5602 		if (!stream)
5603 			continue;
5604 
5605 		if (stream->timing.flags.DSC != 1) {
5606 			drm_dp_mst_atomic_enable_dsc(state,
5607 						     aconnector->port,
5608 						     dm_conn_state->pbn,
5609 						     0,
5610 						     false);
5611 			continue;
5612 		}
5613 
5614 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5615 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5616 		clock = stream->timing.pix_clk_100hz / 10;
5617 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5618 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5619 						    aconnector->port,
5620 						    pbn, pbn_div,
5621 						    true);
5622 		if (vcpi < 0)
5623 			return vcpi;
5624 
5625 		dm_conn_state->pbn = pbn;
5626 		dm_conn_state->vcpi_slots = vcpi;
5627 	}
5628 	return 0;
5629 }
5630 #endif
5631 
5632 static void dm_drm_plane_reset(struct drm_plane *plane)
5633 {
5634 	struct dm_plane_state *amdgpu_state = NULL;
5635 
5636 	if (plane->state)
5637 		plane->funcs->atomic_destroy_state(plane, plane->state);
5638 
5639 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5640 	WARN_ON(amdgpu_state == NULL);
5641 
5642 	if (amdgpu_state)
5643 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5644 }
5645 
5646 static struct drm_plane_state *
5647 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5648 {
5649 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5650 
5651 	old_dm_plane_state = to_dm_plane_state(plane->state);
5652 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5653 	if (!dm_plane_state)
5654 		return NULL;
5655 
5656 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5657 
5658 	if (old_dm_plane_state->dc_state) {
5659 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5660 		dc_plane_state_retain(dm_plane_state->dc_state);
5661 	}
5662 
5663 	return &dm_plane_state->base;
5664 }
5665 
5666 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5667 				struct drm_plane_state *state)
5668 {
5669 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5670 
5671 	if (dm_plane_state->dc_state)
5672 		dc_plane_state_release(dm_plane_state->dc_state);
5673 
5674 	drm_atomic_helper_plane_destroy_state(plane, state);
5675 }
5676 
5677 static const struct drm_plane_funcs dm_plane_funcs = {
5678 	.update_plane	= drm_atomic_helper_update_plane,
5679 	.disable_plane	= drm_atomic_helper_disable_plane,
5680 	.destroy	= drm_primary_helper_destroy,
5681 	.reset = dm_drm_plane_reset,
5682 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5683 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5684 };
5685 
5686 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5687 				      struct drm_plane_state *new_state)
5688 {
5689 	struct amdgpu_framebuffer *afb;
5690 	struct drm_gem_object *obj;
5691 	struct amdgpu_device *adev;
5692 	struct amdgpu_bo *rbo;
5693 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5694 	struct list_head list;
5695 	struct ttm_validate_buffer tv;
5696 	struct ww_acquire_ctx ticket;
5697 	uint64_t tiling_flags;
5698 	uint32_t domain;
5699 	int r;
5700 	bool tmz_surface = false;
5701 	bool force_disable_dcc = false;
5702 
5703 	dm_plane_state_old = to_dm_plane_state(plane->state);
5704 	dm_plane_state_new = to_dm_plane_state(new_state);
5705 
5706 	if (!new_state->fb) {
5707 		DRM_DEBUG_DRIVER("No FB bound\n");
5708 		return 0;
5709 	}
5710 
5711 	afb = to_amdgpu_framebuffer(new_state->fb);
5712 	obj = new_state->fb->obj[0];
5713 	rbo = gem_to_amdgpu_bo(obj);
5714 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5715 	INIT_LIST_HEAD(&list);
5716 
5717 	tv.bo = &rbo->tbo;
5718 	tv.num_shared = 1;
5719 	list_add(&tv.head, &list);
5720 
5721 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5722 	if (r) {
5723 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5724 		return r;
5725 	}
5726 
5727 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5728 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5729 	else
5730 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5731 
5732 	r = amdgpu_bo_pin(rbo, domain);
5733 	if (unlikely(r != 0)) {
5734 		if (r != -ERESTARTSYS)
5735 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5736 		ttm_eu_backoff_reservation(&ticket, &list);
5737 		return r;
5738 	}
5739 
5740 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5741 	if (unlikely(r != 0)) {
5742 		amdgpu_bo_unpin(rbo);
5743 		ttm_eu_backoff_reservation(&ticket, &list);
5744 		DRM_ERROR("%p bind failed\n", rbo);
5745 		return r;
5746 	}
5747 
5748 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5749 
5750 	tmz_surface = amdgpu_bo_encrypted(rbo);
5751 
5752 	ttm_eu_backoff_reservation(&ticket, &list);
5753 
5754 	afb->address = amdgpu_bo_gpu_offset(rbo);
5755 
5756 	amdgpu_bo_ref(rbo);
5757 
5758 	if (dm_plane_state_new->dc_state &&
5759 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5760 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5761 
5762 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5763 		fill_plane_buffer_attributes(
5764 			adev, afb, plane_state->format, plane_state->rotation,
5765 			tiling_flags, &plane_state->tiling_info,
5766 			&plane_state->plane_size, &plane_state->dcc,
5767 			&plane_state->address, tmz_surface,
5768 			force_disable_dcc);
5769 	}
5770 
5771 	return 0;
5772 }
5773 
5774 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5775 				       struct drm_plane_state *old_state)
5776 {
5777 	struct amdgpu_bo *rbo;
5778 	int r;
5779 
5780 	if (!old_state->fb)
5781 		return;
5782 
5783 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5784 	r = amdgpu_bo_reserve(rbo, false);
5785 	if (unlikely(r)) {
5786 		DRM_ERROR("failed to reserve rbo before unpin\n");
5787 		return;
5788 	}
5789 
5790 	amdgpu_bo_unpin(rbo);
5791 	amdgpu_bo_unreserve(rbo);
5792 	amdgpu_bo_unref(&rbo);
5793 }
5794 
5795 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5796 				       struct drm_crtc_state *new_crtc_state)
5797 {
5798 	int max_downscale = 0;
5799 	int max_upscale = INT_MAX;
5800 
5801 	/* TODO: These should be checked against DC plane caps */
5802 	return drm_atomic_helper_check_plane_state(
5803 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5804 }
5805 
5806 static int dm_plane_atomic_check(struct drm_plane *plane,
5807 				 struct drm_plane_state *state)
5808 {
5809 	struct amdgpu_device *adev = plane->dev->dev_private;
5810 	struct dc *dc = adev->dm.dc;
5811 	struct dm_plane_state *dm_plane_state;
5812 	struct dc_scaling_info scaling_info;
5813 	struct drm_crtc_state *new_crtc_state;
5814 	int ret;
5815 
5816 	dm_plane_state = to_dm_plane_state(state);
5817 
5818 	if (!dm_plane_state->dc_state)
5819 		return 0;
5820 
5821 	new_crtc_state =
5822 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5823 	if (!new_crtc_state)
5824 		return -EINVAL;
5825 
5826 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5827 	if (ret)
5828 		return ret;
5829 
5830 	ret = fill_dc_scaling_info(state, &scaling_info);
5831 	if (ret)
5832 		return ret;
5833 
5834 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5835 		return 0;
5836 
5837 	return -EINVAL;
5838 }
5839 
5840 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5841 				       struct drm_plane_state *new_plane_state)
5842 {
5843 	/* Only support async updates on cursor planes. */
5844 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5845 		return -EINVAL;
5846 
5847 	return 0;
5848 }
5849 
5850 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5851 					 struct drm_plane_state *new_state)
5852 {
5853 	struct drm_plane_state *old_state =
5854 		drm_atomic_get_old_plane_state(new_state->state, plane);
5855 
5856 	swap(plane->state->fb, new_state->fb);
5857 
5858 	plane->state->src_x = new_state->src_x;
5859 	plane->state->src_y = new_state->src_y;
5860 	plane->state->src_w = new_state->src_w;
5861 	plane->state->src_h = new_state->src_h;
5862 	plane->state->crtc_x = new_state->crtc_x;
5863 	plane->state->crtc_y = new_state->crtc_y;
5864 	plane->state->crtc_w = new_state->crtc_w;
5865 	plane->state->crtc_h = new_state->crtc_h;
5866 
5867 	handle_cursor_update(plane, old_state);
5868 }
5869 
5870 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5871 	.prepare_fb = dm_plane_helper_prepare_fb,
5872 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5873 	.atomic_check = dm_plane_atomic_check,
5874 	.atomic_async_check = dm_plane_atomic_async_check,
5875 	.atomic_async_update = dm_plane_atomic_async_update
5876 };
5877 
5878 /*
5879  * TODO: these are currently initialized to rgb formats only.
5880  * For future use cases we should either initialize them dynamically based on
5881  * plane capabilities, or initialize this array to all formats, so internal drm
5882  * check will succeed, and let DC implement proper check
5883  */
5884 static const uint32_t rgb_formats[] = {
5885 	DRM_FORMAT_XRGB8888,
5886 	DRM_FORMAT_ARGB8888,
5887 	DRM_FORMAT_RGBA8888,
5888 	DRM_FORMAT_XRGB2101010,
5889 	DRM_FORMAT_XBGR2101010,
5890 	DRM_FORMAT_ARGB2101010,
5891 	DRM_FORMAT_ABGR2101010,
5892 	DRM_FORMAT_XBGR8888,
5893 	DRM_FORMAT_ABGR8888,
5894 	DRM_FORMAT_RGB565,
5895 };
5896 
5897 static const uint32_t overlay_formats[] = {
5898 	DRM_FORMAT_XRGB8888,
5899 	DRM_FORMAT_ARGB8888,
5900 	DRM_FORMAT_RGBA8888,
5901 	DRM_FORMAT_XBGR8888,
5902 	DRM_FORMAT_ABGR8888,
5903 	DRM_FORMAT_RGB565
5904 };
5905 
5906 static const u32 cursor_formats[] = {
5907 	DRM_FORMAT_ARGB8888
5908 };
5909 
5910 static int get_plane_formats(const struct drm_plane *plane,
5911 			     const struct dc_plane_cap *plane_cap,
5912 			     uint32_t *formats, int max_formats)
5913 {
5914 	int i, num_formats = 0;
5915 
5916 	/*
5917 	 * TODO: Query support for each group of formats directly from
5918 	 * DC plane caps. This will require adding more formats to the
5919 	 * caps list.
5920 	 */
5921 
5922 	switch (plane->type) {
5923 	case DRM_PLANE_TYPE_PRIMARY:
5924 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5925 			if (num_formats >= max_formats)
5926 				break;
5927 
5928 			formats[num_formats++] = rgb_formats[i];
5929 		}
5930 
5931 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5932 			formats[num_formats++] = DRM_FORMAT_NV12;
5933 		if (plane_cap && plane_cap->pixel_format_support.p010)
5934 			formats[num_formats++] = DRM_FORMAT_P010;
5935 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5936 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5937 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5938 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5939 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5940 		}
5941 		break;
5942 
5943 	case DRM_PLANE_TYPE_OVERLAY:
5944 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5945 			if (num_formats >= max_formats)
5946 				break;
5947 
5948 			formats[num_formats++] = overlay_formats[i];
5949 		}
5950 		break;
5951 
5952 	case DRM_PLANE_TYPE_CURSOR:
5953 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5954 			if (num_formats >= max_formats)
5955 				break;
5956 
5957 			formats[num_formats++] = cursor_formats[i];
5958 		}
5959 		break;
5960 	}
5961 
5962 	return num_formats;
5963 }
5964 
5965 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5966 				struct drm_plane *plane,
5967 				unsigned long possible_crtcs,
5968 				const struct dc_plane_cap *plane_cap)
5969 {
5970 	uint32_t formats[32];
5971 	int num_formats;
5972 	int res = -EPERM;
5973 	unsigned int supported_rotations;
5974 
5975 	num_formats = get_plane_formats(plane, plane_cap, formats,
5976 					ARRAY_SIZE(formats));
5977 
5978 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5979 				       &dm_plane_funcs, formats, num_formats,
5980 				       NULL, plane->type, NULL);
5981 	if (res)
5982 		return res;
5983 
5984 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5985 	    plane_cap && plane_cap->per_pixel_alpha) {
5986 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5987 					  BIT(DRM_MODE_BLEND_PREMULTI);
5988 
5989 		drm_plane_create_alpha_property(plane);
5990 		drm_plane_create_blend_mode_property(plane, blend_caps);
5991 	}
5992 
5993 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5994 	    plane_cap &&
5995 	    (plane_cap->pixel_format_support.nv12 ||
5996 	     plane_cap->pixel_format_support.p010)) {
5997 		/* This only affects YUV formats. */
5998 		drm_plane_create_color_properties(
5999 			plane,
6000 			BIT(DRM_COLOR_YCBCR_BT601) |
6001 			BIT(DRM_COLOR_YCBCR_BT709) |
6002 			BIT(DRM_COLOR_YCBCR_BT2020),
6003 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6004 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6005 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6006 	}
6007 
6008 	supported_rotations =
6009 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6010 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6011 
6012 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6013 					   supported_rotations);
6014 
6015 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6016 
6017 	/* Create (reset) the plane state */
6018 	if (plane->funcs->reset)
6019 		plane->funcs->reset(plane);
6020 
6021 	return 0;
6022 }
6023 
6024 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6025 			       struct drm_plane *plane,
6026 			       uint32_t crtc_index)
6027 {
6028 	struct amdgpu_crtc *acrtc = NULL;
6029 	struct drm_plane *cursor_plane;
6030 
6031 	int res = -ENOMEM;
6032 
6033 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6034 	if (!cursor_plane)
6035 		goto fail;
6036 
6037 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6038 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6039 
6040 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6041 	if (!acrtc)
6042 		goto fail;
6043 
6044 	res = drm_crtc_init_with_planes(
6045 			dm->ddev,
6046 			&acrtc->base,
6047 			plane,
6048 			cursor_plane,
6049 			&amdgpu_dm_crtc_funcs, NULL);
6050 
6051 	if (res)
6052 		goto fail;
6053 
6054 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6055 
6056 	/* Create (reset) the plane state */
6057 	if (acrtc->base.funcs->reset)
6058 		acrtc->base.funcs->reset(&acrtc->base);
6059 
6060 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6061 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6062 
6063 	acrtc->crtc_id = crtc_index;
6064 	acrtc->base.enabled = false;
6065 	acrtc->otg_inst = -1;
6066 
6067 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6068 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6069 				   true, MAX_COLOR_LUT_ENTRIES);
6070 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6071 
6072 	return 0;
6073 
6074 fail:
6075 	kfree(acrtc);
6076 	kfree(cursor_plane);
6077 	return res;
6078 }
6079 
6080 
6081 static int to_drm_connector_type(enum signal_type st)
6082 {
6083 	switch (st) {
6084 	case SIGNAL_TYPE_HDMI_TYPE_A:
6085 		return DRM_MODE_CONNECTOR_HDMIA;
6086 	case SIGNAL_TYPE_EDP:
6087 		return DRM_MODE_CONNECTOR_eDP;
6088 	case SIGNAL_TYPE_LVDS:
6089 		return DRM_MODE_CONNECTOR_LVDS;
6090 	case SIGNAL_TYPE_RGB:
6091 		return DRM_MODE_CONNECTOR_VGA;
6092 	case SIGNAL_TYPE_DISPLAY_PORT:
6093 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6094 		return DRM_MODE_CONNECTOR_DisplayPort;
6095 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6096 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6097 		return DRM_MODE_CONNECTOR_DVID;
6098 	case SIGNAL_TYPE_VIRTUAL:
6099 		return DRM_MODE_CONNECTOR_VIRTUAL;
6100 
6101 	default:
6102 		return DRM_MODE_CONNECTOR_Unknown;
6103 	}
6104 }
6105 
6106 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6107 {
6108 	struct drm_encoder *encoder;
6109 
6110 	/* There is only one encoder per connector */
6111 	drm_connector_for_each_possible_encoder(connector, encoder)
6112 		return encoder;
6113 
6114 	return NULL;
6115 }
6116 
6117 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6118 {
6119 	struct drm_encoder *encoder;
6120 	struct amdgpu_encoder *amdgpu_encoder;
6121 
6122 	encoder = amdgpu_dm_connector_to_encoder(connector);
6123 
6124 	if (encoder == NULL)
6125 		return;
6126 
6127 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6128 
6129 	amdgpu_encoder->native_mode.clock = 0;
6130 
6131 	if (!list_empty(&connector->probed_modes)) {
6132 		struct drm_display_mode *preferred_mode = NULL;
6133 
6134 		list_for_each_entry(preferred_mode,
6135 				    &connector->probed_modes,
6136 				    head) {
6137 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6138 				amdgpu_encoder->native_mode = *preferred_mode;
6139 
6140 			break;
6141 		}
6142 
6143 	}
6144 }
6145 
6146 static struct drm_display_mode *
6147 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6148 			     char *name,
6149 			     int hdisplay, int vdisplay)
6150 {
6151 	struct drm_device *dev = encoder->dev;
6152 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6153 	struct drm_display_mode *mode = NULL;
6154 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6155 
6156 	mode = drm_mode_duplicate(dev, native_mode);
6157 
6158 	if (mode == NULL)
6159 		return NULL;
6160 
6161 	mode->hdisplay = hdisplay;
6162 	mode->vdisplay = vdisplay;
6163 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6164 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6165 
6166 	return mode;
6167 
6168 }
6169 
6170 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6171 						 struct drm_connector *connector)
6172 {
6173 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6174 	struct drm_display_mode *mode = NULL;
6175 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6176 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6177 				to_amdgpu_dm_connector(connector);
6178 	int i;
6179 	int n;
6180 	struct mode_size {
6181 		char name[DRM_DISPLAY_MODE_LEN];
6182 		int w;
6183 		int h;
6184 	} common_modes[] = {
6185 		{  "640x480",  640,  480},
6186 		{  "800x600",  800,  600},
6187 		{ "1024x768", 1024,  768},
6188 		{ "1280x720", 1280,  720},
6189 		{ "1280x800", 1280,  800},
6190 		{"1280x1024", 1280, 1024},
6191 		{ "1440x900", 1440,  900},
6192 		{"1680x1050", 1680, 1050},
6193 		{"1600x1200", 1600, 1200},
6194 		{"1920x1080", 1920, 1080},
6195 		{"1920x1200", 1920, 1200}
6196 	};
6197 
6198 	n = ARRAY_SIZE(common_modes);
6199 
6200 	for (i = 0; i < n; i++) {
6201 		struct drm_display_mode *curmode = NULL;
6202 		bool mode_existed = false;
6203 
6204 		if (common_modes[i].w > native_mode->hdisplay ||
6205 		    common_modes[i].h > native_mode->vdisplay ||
6206 		   (common_modes[i].w == native_mode->hdisplay &&
6207 		    common_modes[i].h == native_mode->vdisplay))
6208 			continue;
6209 
6210 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6211 			if (common_modes[i].w == curmode->hdisplay &&
6212 			    common_modes[i].h == curmode->vdisplay) {
6213 				mode_existed = true;
6214 				break;
6215 			}
6216 		}
6217 
6218 		if (mode_existed)
6219 			continue;
6220 
6221 		mode = amdgpu_dm_create_common_mode(encoder,
6222 				common_modes[i].name, common_modes[i].w,
6223 				common_modes[i].h);
6224 		drm_mode_probed_add(connector, mode);
6225 		amdgpu_dm_connector->num_modes++;
6226 	}
6227 }
6228 
6229 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6230 					      struct edid *edid)
6231 {
6232 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6233 			to_amdgpu_dm_connector(connector);
6234 
6235 	if (edid) {
6236 		/* empty probed_modes */
6237 		INIT_LIST_HEAD(&connector->probed_modes);
6238 		amdgpu_dm_connector->num_modes =
6239 				drm_add_edid_modes(connector, edid);
6240 
6241 		/* sorting the probed modes before calling function
6242 		 * amdgpu_dm_get_native_mode() since EDID can have
6243 		 * more than one preferred mode. The modes that are
6244 		 * later in the probed mode list could be of higher
6245 		 * and preferred resolution. For example, 3840x2160
6246 		 * resolution in base EDID preferred timing and 4096x2160
6247 		 * preferred resolution in DID extension block later.
6248 		 */
6249 		drm_mode_sort(&connector->probed_modes);
6250 		amdgpu_dm_get_native_mode(connector);
6251 	} else {
6252 		amdgpu_dm_connector->num_modes = 0;
6253 	}
6254 }
6255 
6256 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6257 {
6258 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6259 			to_amdgpu_dm_connector(connector);
6260 	struct drm_encoder *encoder;
6261 	struct edid *edid = amdgpu_dm_connector->edid;
6262 
6263 	encoder = amdgpu_dm_connector_to_encoder(connector);
6264 
6265 	if (!edid || !drm_edid_is_valid(edid)) {
6266 		amdgpu_dm_connector->num_modes =
6267 				drm_add_modes_noedid(connector, 640, 480);
6268 	} else {
6269 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6270 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6271 	}
6272 	amdgpu_dm_fbc_init(connector);
6273 
6274 	return amdgpu_dm_connector->num_modes;
6275 }
6276 
6277 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6278 				     struct amdgpu_dm_connector *aconnector,
6279 				     int connector_type,
6280 				     struct dc_link *link,
6281 				     int link_index)
6282 {
6283 	struct amdgpu_device *adev = dm->ddev->dev_private;
6284 
6285 	/*
6286 	 * Some of the properties below require access to state, like bpc.
6287 	 * Allocate some default initial connector state with our reset helper.
6288 	 */
6289 	if (aconnector->base.funcs->reset)
6290 		aconnector->base.funcs->reset(&aconnector->base);
6291 
6292 	aconnector->connector_id = link_index;
6293 	aconnector->dc_link = link;
6294 	aconnector->base.interlace_allowed = false;
6295 	aconnector->base.doublescan_allowed = false;
6296 	aconnector->base.stereo_allowed = false;
6297 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6298 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6299 	aconnector->audio_inst = -1;
6300 	mutex_init(&aconnector->hpd_lock);
6301 
6302 	/*
6303 	 * configure support HPD hot plug connector_>polled default value is 0
6304 	 * which means HPD hot plug not supported
6305 	 */
6306 	switch (connector_type) {
6307 	case DRM_MODE_CONNECTOR_HDMIA:
6308 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6309 		aconnector->base.ycbcr_420_allowed =
6310 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6311 		break;
6312 	case DRM_MODE_CONNECTOR_DisplayPort:
6313 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6314 		aconnector->base.ycbcr_420_allowed =
6315 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6316 		break;
6317 	case DRM_MODE_CONNECTOR_DVID:
6318 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6319 		break;
6320 	default:
6321 		break;
6322 	}
6323 
6324 	drm_object_attach_property(&aconnector->base.base,
6325 				dm->ddev->mode_config.scaling_mode_property,
6326 				DRM_MODE_SCALE_NONE);
6327 
6328 	drm_object_attach_property(&aconnector->base.base,
6329 				adev->mode_info.underscan_property,
6330 				UNDERSCAN_OFF);
6331 	drm_object_attach_property(&aconnector->base.base,
6332 				adev->mode_info.underscan_hborder_property,
6333 				0);
6334 	drm_object_attach_property(&aconnector->base.base,
6335 				adev->mode_info.underscan_vborder_property,
6336 				0);
6337 
6338 	if (!aconnector->mst_port)
6339 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6340 
6341 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6342 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6343 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6344 
6345 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6346 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6347 		drm_object_attach_property(&aconnector->base.base,
6348 				adev->mode_info.abm_level_property, 0);
6349 	}
6350 
6351 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6352 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6353 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6354 		drm_object_attach_property(
6355 			&aconnector->base.base,
6356 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6357 
6358 		if (!aconnector->mst_port)
6359 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6360 
6361 #ifdef CONFIG_DRM_AMD_DC_HDCP
6362 		if (adev->dm.hdcp_workqueue)
6363 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6364 #endif
6365 	}
6366 }
6367 
6368 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6369 			      struct i2c_msg *msgs, int num)
6370 {
6371 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6372 	struct ddc_service *ddc_service = i2c->ddc_service;
6373 	struct i2c_command cmd;
6374 	int i;
6375 	int result = -EIO;
6376 
6377 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6378 
6379 	if (!cmd.payloads)
6380 		return result;
6381 
6382 	cmd.number_of_payloads = num;
6383 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6384 	cmd.speed = 100;
6385 
6386 	for (i = 0; i < num; i++) {
6387 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6388 		cmd.payloads[i].address = msgs[i].addr;
6389 		cmd.payloads[i].length = msgs[i].len;
6390 		cmd.payloads[i].data = msgs[i].buf;
6391 	}
6392 
6393 	if (dc_submit_i2c(
6394 			ddc_service->ctx->dc,
6395 			ddc_service->ddc_pin->hw_info.ddc_channel,
6396 			&cmd))
6397 		result = num;
6398 
6399 	kfree(cmd.payloads);
6400 	return result;
6401 }
6402 
6403 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6404 {
6405 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6406 }
6407 
6408 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6409 	.master_xfer = amdgpu_dm_i2c_xfer,
6410 	.functionality = amdgpu_dm_i2c_func,
6411 };
6412 
6413 static struct amdgpu_i2c_adapter *
6414 create_i2c(struct ddc_service *ddc_service,
6415 	   int link_index,
6416 	   int *res)
6417 {
6418 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6419 	struct amdgpu_i2c_adapter *i2c;
6420 
6421 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6422 	if (!i2c)
6423 		return NULL;
6424 	i2c->base.owner = THIS_MODULE;
6425 	i2c->base.class = I2C_CLASS_DDC;
6426 	i2c->base.dev.parent = &adev->pdev->dev;
6427 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6428 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6429 	i2c_set_adapdata(&i2c->base, i2c);
6430 	i2c->ddc_service = ddc_service;
6431 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6432 
6433 	return i2c;
6434 }
6435 
6436 
6437 /*
6438  * Note: this function assumes that dc_link_detect() was called for the
6439  * dc_link which will be represented by this aconnector.
6440  */
6441 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6442 				    struct amdgpu_dm_connector *aconnector,
6443 				    uint32_t link_index,
6444 				    struct amdgpu_encoder *aencoder)
6445 {
6446 	int res = 0;
6447 	int connector_type;
6448 	struct dc *dc = dm->dc;
6449 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6450 	struct amdgpu_i2c_adapter *i2c;
6451 
6452 	link->priv = aconnector;
6453 
6454 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6455 
6456 	i2c = create_i2c(link->ddc, link->link_index, &res);
6457 	if (!i2c) {
6458 		DRM_ERROR("Failed to create i2c adapter data\n");
6459 		return -ENOMEM;
6460 	}
6461 
6462 	aconnector->i2c = i2c;
6463 	res = i2c_add_adapter(&i2c->base);
6464 
6465 	if (res) {
6466 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6467 		goto out_free;
6468 	}
6469 
6470 	connector_type = to_drm_connector_type(link->connector_signal);
6471 
6472 	res = drm_connector_init_with_ddc(
6473 			dm->ddev,
6474 			&aconnector->base,
6475 			&amdgpu_dm_connector_funcs,
6476 			connector_type,
6477 			&i2c->base);
6478 
6479 	if (res) {
6480 		DRM_ERROR("connector_init failed\n");
6481 		aconnector->connector_id = -1;
6482 		goto out_free;
6483 	}
6484 
6485 	drm_connector_helper_add(
6486 			&aconnector->base,
6487 			&amdgpu_dm_connector_helper_funcs);
6488 
6489 	amdgpu_dm_connector_init_helper(
6490 		dm,
6491 		aconnector,
6492 		connector_type,
6493 		link,
6494 		link_index);
6495 
6496 	drm_connector_attach_encoder(
6497 		&aconnector->base, &aencoder->base);
6498 
6499 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6500 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6501 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6502 
6503 out_free:
6504 	if (res) {
6505 		kfree(i2c);
6506 		aconnector->i2c = NULL;
6507 	}
6508 	return res;
6509 }
6510 
6511 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6512 {
6513 	switch (adev->mode_info.num_crtc) {
6514 	case 1:
6515 		return 0x1;
6516 	case 2:
6517 		return 0x3;
6518 	case 3:
6519 		return 0x7;
6520 	case 4:
6521 		return 0xf;
6522 	case 5:
6523 		return 0x1f;
6524 	case 6:
6525 	default:
6526 		return 0x3f;
6527 	}
6528 }
6529 
6530 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6531 				  struct amdgpu_encoder *aencoder,
6532 				  uint32_t link_index)
6533 {
6534 	struct amdgpu_device *adev = dev->dev_private;
6535 
6536 	int res = drm_encoder_init(dev,
6537 				   &aencoder->base,
6538 				   &amdgpu_dm_encoder_funcs,
6539 				   DRM_MODE_ENCODER_TMDS,
6540 				   NULL);
6541 
6542 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6543 
6544 	if (!res)
6545 		aencoder->encoder_id = link_index;
6546 	else
6547 		aencoder->encoder_id = -1;
6548 
6549 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6550 
6551 	return res;
6552 }
6553 
6554 static void manage_dm_interrupts(struct amdgpu_device *adev,
6555 				 struct amdgpu_crtc *acrtc,
6556 				 bool enable)
6557 {
6558 	/*
6559 	 * We have no guarantee that the frontend index maps to the same
6560 	 * backend index - some even map to more than one.
6561 	 *
6562 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6563 	 */
6564 	int irq_type =
6565 		amdgpu_display_crtc_idx_to_irq_type(
6566 			adev,
6567 			acrtc->crtc_id);
6568 
6569 	if (enable) {
6570 		drm_crtc_vblank_on(&acrtc->base);
6571 		amdgpu_irq_get(
6572 			adev,
6573 			&adev->pageflip_irq,
6574 			irq_type);
6575 	} else {
6576 
6577 		amdgpu_irq_put(
6578 			adev,
6579 			&adev->pageflip_irq,
6580 			irq_type);
6581 		drm_crtc_vblank_off(&acrtc->base);
6582 	}
6583 }
6584 
6585 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6586 				      struct amdgpu_crtc *acrtc)
6587 {
6588 	int irq_type =
6589 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6590 
6591 	/**
6592 	 * This reads the current state for the IRQ and force reapplies
6593 	 * the setting to hardware.
6594 	 */
6595 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6596 }
6597 
6598 static bool
6599 is_scaling_state_different(const struct dm_connector_state *dm_state,
6600 			   const struct dm_connector_state *old_dm_state)
6601 {
6602 	if (dm_state->scaling != old_dm_state->scaling)
6603 		return true;
6604 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6605 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6606 			return true;
6607 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6608 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6609 			return true;
6610 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6611 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6612 		return true;
6613 	return false;
6614 }
6615 
6616 #ifdef CONFIG_DRM_AMD_DC_HDCP
6617 static bool is_content_protection_different(struct drm_connector_state *state,
6618 					    const struct drm_connector_state *old_state,
6619 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6620 {
6621 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6622 
6623 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6624 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6625 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6626 		return true;
6627 	}
6628 
6629 	/* CP is being re enabled, ignore this */
6630 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6631 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6632 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6633 		return false;
6634 	}
6635 
6636 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6637 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6638 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6639 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6640 
6641 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6642 	 * hot-plug, headless s3, dpms
6643 	 */
6644 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6645 	    aconnector->dc_sink != NULL)
6646 		return true;
6647 
6648 	if (old_state->content_protection == state->content_protection)
6649 		return false;
6650 
6651 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6652 		return true;
6653 
6654 	return false;
6655 }
6656 
6657 #endif
6658 static void remove_stream(struct amdgpu_device *adev,
6659 			  struct amdgpu_crtc *acrtc,
6660 			  struct dc_stream_state *stream)
6661 {
6662 	/* this is the update mode case */
6663 
6664 	acrtc->otg_inst = -1;
6665 	acrtc->enabled = false;
6666 }
6667 
6668 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6669 			       struct dc_cursor_position *position)
6670 {
6671 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6672 	int x, y;
6673 	int xorigin = 0, yorigin = 0;
6674 
6675 	position->enable = false;
6676 	position->x = 0;
6677 	position->y = 0;
6678 
6679 	if (!crtc || !plane->state->fb)
6680 		return 0;
6681 
6682 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6683 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6684 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6685 			  __func__,
6686 			  plane->state->crtc_w,
6687 			  plane->state->crtc_h);
6688 		return -EINVAL;
6689 	}
6690 
6691 	x = plane->state->crtc_x;
6692 	y = plane->state->crtc_y;
6693 
6694 	if (x <= -amdgpu_crtc->max_cursor_width ||
6695 	    y <= -amdgpu_crtc->max_cursor_height)
6696 		return 0;
6697 
6698 	if (x < 0) {
6699 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6700 		x = 0;
6701 	}
6702 	if (y < 0) {
6703 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6704 		y = 0;
6705 	}
6706 	position->enable = true;
6707 	position->translate_by_source = true;
6708 	position->x = x;
6709 	position->y = y;
6710 	position->x_hotspot = xorigin;
6711 	position->y_hotspot = yorigin;
6712 
6713 	return 0;
6714 }
6715 
6716 static void handle_cursor_update(struct drm_plane *plane,
6717 				 struct drm_plane_state *old_plane_state)
6718 {
6719 	struct amdgpu_device *adev = plane->dev->dev_private;
6720 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6721 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6722 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6723 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6724 	uint64_t address = afb ? afb->address : 0;
6725 	struct dc_cursor_position position;
6726 	struct dc_cursor_attributes attributes;
6727 	int ret;
6728 
6729 	if (!plane->state->fb && !old_plane_state->fb)
6730 		return;
6731 
6732 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6733 			 __func__,
6734 			 amdgpu_crtc->crtc_id,
6735 			 plane->state->crtc_w,
6736 			 plane->state->crtc_h);
6737 
6738 	ret = get_cursor_position(plane, crtc, &position);
6739 	if (ret)
6740 		return;
6741 
6742 	if (!position.enable) {
6743 		/* turn off cursor */
6744 		if (crtc_state && crtc_state->stream) {
6745 			mutex_lock(&adev->dm.dc_lock);
6746 			dc_stream_set_cursor_position(crtc_state->stream,
6747 						      &position);
6748 			mutex_unlock(&adev->dm.dc_lock);
6749 		}
6750 		return;
6751 	}
6752 
6753 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6754 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6755 
6756 	memset(&attributes, 0, sizeof(attributes));
6757 	attributes.address.high_part = upper_32_bits(address);
6758 	attributes.address.low_part  = lower_32_bits(address);
6759 	attributes.width             = plane->state->crtc_w;
6760 	attributes.height            = plane->state->crtc_h;
6761 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6762 	attributes.rotation_angle    = 0;
6763 	attributes.attribute_flags.value = 0;
6764 
6765 	attributes.pitch = attributes.width;
6766 
6767 	if (crtc_state->stream) {
6768 		mutex_lock(&adev->dm.dc_lock);
6769 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6770 							 &attributes))
6771 			DRM_ERROR("DC failed to set cursor attributes\n");
6772 
6773 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6774 						   &position))
6775 			DRM_ERROR("DC failed to set cursor position\n");
6776 		mutex_unlock(&adev->dm.dc_lock);
6777 	}
6778 }
6779 
6780 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6781 {
6782 
6783 	assert_spin_locked(&acrtc->base.dev->event_lock);
6784 	WARN_ON(acrtc->event);
6785 
6786 	acrtc->event = acrtc->base.state->event;
6787 
6788 	/* Set the flip status */
6789 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6790 
6791 	/* Mark this event as consumed */
6792 	acrtc->base.state->event = NULL;
6793 
6794 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6795 						 acrtc->crtc_id);
6796 }
6797 
6798 static void update_freesync_state_on_stream(
6799 	struct amdgpu_display_manager *dm,
6800 	struct dm_crtc_state *new_crtc_state,
6801 	struct dc_stream_state *new_stream,
6802 	struct dc_plane_state *surface,
6803 	u32 flip_timestamp_in_us)
6804 {
6805 	struct mod_vrr_params vrr_params;
6806 	struct dc_info_packet vrr_infopacket = {0};
6807 	struct amdgpu_device *adev = dm->adev;
6808 	unsigned long flags;
6809 
6810 	if (!new_stream)
6811 		return;
6812 
6813 	/*
6814 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6815 	 * For now it's sufficient to just guard against these conditions.
6816 	 */
6817 
6818 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6819 		return;
6820 
6821 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6822 	vrr_params = new_crtc_state->vrr_params;
6823 
6824 	if (surface) {
6825 		mod_freesync_handle_preflip(
6826 			dm->freesync_module,
6827 			surface,
6828 			new_stream,
6829 			flip_timestamp_in_us,
6830 			&vrr_params);
6831 
6832 		if (adev->family < AMDGPU_FAMILY_AI &&
6833 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6834 			mod_freesync_handle_v_update(dm->freesync_module,
6835 						     new_stream, &vrr_params);
6836 
6837 			/* Need to call this before the frame ends. */
6838 			dc_stream_adjust_vmin_vmax(dm->dc,
6839 						   new_crtc_state->stream,
6840 						   &vrr_params.adjust);
6841 		}
6842 	}
6843 
6844 	mod_freesync_build_vrr_infopacket(
6845 		dm->freesync_module,
6846 		new_stream,
6847 		&vrr_params,
6848 		PACKET_TYPE_VRR,
6849 		TRANSFER_FUNC_UNKNOWN,
6850 		&vrr_infopacket);
6851 
6852 	new_crtc_state->freesync_timing_changed |=
6853 		(memcmp(&new_crtc_state->vrr_params.adjust,
6854 			&vrr_params.adjust,
6855 			sizeof(vrr_params.adjust)) != 0);
6856 
6857 	new_crtc_state->freesync_vrr_info_changed |=
6858 		(memcmp(&new_crtc_state->vrr_infopacket,
6859 			&vrr_infopacket,
6860 			sizeof(vrr_infopacket)) != 0);
6861 
6862 	new_crtc_state->vrr_params = vrr_params;
6863 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6864 
6865 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6866 	new_stream->vrr_infopacket = vrr_infopacket;
6867 
6868 	if (new_crtc_state->freesync_vrr_info_changed)
6869 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6870 			      new_crtc_state->base.crtc->base.id,
6871 			      (int)new_crtc_state->base.vrr_enabled,
6872 			      (int)vrr_params.state);
6873 
6874 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6875 }
6876 
6877 static void pre_update_freesync_state_on_stream(
6878 	struct amdgpu_display_manager *dm,
6879 	struct dm_crtc_state *new_crtc_state)
6880 {
6881 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6882 	struct mod_vrr_params vrr_params;
6883 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6884 	struct amdgpu_device *adev = dm->adev;
6885 	unsigned long flags;
6886 
6887 	if (!new_stream)
6888 		return;
6889 
6890 	/*
6891 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6892 	 * For now it's sufficient to just guard against these conditions.
6893 	 */
6894 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6895 		return;
6896 
6897 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6898 	vrr_params = new_crtc_state->vrr_params;
6899 
6900 	if (new_crtc_state->vrr_supported &&
6901 	    config.min_refresh_in_uhz &&
6902 	    config.max_refresh_in_uhz) {
6903 		config.state = new_crtc_state->base.vrr_enabled ?
6904 			VRR_STATE_ACTIVE_VARIABLE :
6905 			VRR_STATE_INACTIVE;
6906 	} else {
6907 		config.state = VRR_STATE_UNSUPPORTED;
6908 	}
6909 
6910 	mod_freesync_build_vrr_params(dm->freesync_module,
6911 				      new_stream,
6912 				      &config, &vrr_params);
6913 
6914 	new_crtc_state->freesync_timing_changed |=
6915 		(memcmp(&new_crtc_state->vrr_params.adjust,
6916 			&vrr_params.adjust,
6917 			sizeof(vrr_params.adjust)) != 0);
6918 
6919 	new_crtc_state->vrr_params = vrr_params;
6920 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6921 }
6922 
6923 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6924 					    struct dm_crtc_state *new_state)
6925 {
6926 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6927 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6928 
6929 	if (!old_vrr_active && new_vrr_active) {
6930 		/* Transition VRR inactive -> active:
6931 		 * While VRR is active, we must not disable vblank irq, as a
6932 		 * reenable after disable would compute bogus vblank/pflip
6933 		 * timestamps if it likely happened inside display front-porch.
6934 		 *
6935 		 * We also need vupdate irq for the actual core vblank handling
6936 		 * at end of vblank.
6937 		 */
6938 		dm_set_vupdate_irq(new_state->base.crtc, true);
6939 		drm_crtc_vblank_get(new_state->base.crtc);
6940 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6941 				 __func__, new_state->base.crtc->base.id);
6942 	} else if (old_vrr_active && !new_vrr_active) {
6943 		/* Transition VRR active -> inactive:
6944 		 * Allow vblank irq disable again for fixed refresh rate.
6945 		 */
6946 		dm_set_vupdate_irq(new_state->base.crtc, false);
6947 		drm_crtc_vblank_put(new_state->base.crtc);
6948 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6949 				 __func__, new_state->base.crtc->base.id);
6950 	}
6951 }
6952 
6953 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6954 {
6955 	struct drm_plane *plane;
6956 	struct drm_plane_state *old_plane_state, *new_plane_state;
6957 	int i;
6958 
6959 	/*
6960 	 * TODO: Make this per-stream so we don't issue redundant updates for
6961 	 * commits with multiple streams.
6962 	 */
6963 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6964 				       new_plane_state, i)
6965 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6966 			handle_cursor_update(plane, old_plane_state);
6967 }
6968 
6969 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6970 				    struct dc_state *dc_state,
6971 				    struct drm_device *dev,
6972 				    struct amdgpu_display_manager *dm,
6973 				    struct drm_crtc *pcrtc,
6974 				    bool wait_for_vblank)
6975 {
6976 	uint32_t i;
6977 	uint64_t timestamp_ns;
6978 	struct drm_plane *plane;
6979 	struct drm_plane_state *old_plane_state, *new_plane_state;
6980 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6981 	struct drm_crtc_state *new_pcrtc_state =
6982 			drm_atomic_get_new_crtc_state(state, pcrtc);
6983 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6984 	struct dm_crtc_state *dm_old_crtc_state =
6985 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6986 	int planes_count = 0, vpos, hpos;
6987 	long r;
6988 	unsigned long flags;
6989 	struct amdgpu_bo *abo;
6990 	uint64_t tiling_flags;
6991 	bool tmz_surface = false;
6992 	uint32_t target_vblank, last_flip_vblank;
6993 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6994 	bool pflip_present = false;
6995 	struct {
6996 		struct dc_surface_update surface_updates[MAX_SURFACES];
6997 		struct dc_plane_info plane_infos[MAX_SURFACES];
6998 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6999 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7000 		struct dc_stream_update stream_update;
7001 	} *bundle;
7002 
7003 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7004 
7005 	if (!bundle) {
7006 		dm_error("Failed to allocate update bundle\n");
7007 		goto cleanup;
7008 	}
7009 
7010 	/*
7011 	 * Disable the cursor first if we're disabling all the planes.
7012 	 * It'll remain on the screen after the planes are re-enabled
7013 	 * if we don't.
7014 	 */
7015 	if (acrtc_state->active_planes == 0)
7016 		amdgpu_dm_commit_cursors(state);
7017 
7018 	/* update planes when needed */
7019 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7020 		struct drm_crtc *crtc = new_plane_state->crtc;
7021 		struct drm_crtc_state *new_crtc_state;
7022 		struct drm_framebuffer *fb = new_plane_state->fb;
7023 		bool plane_needs_flip;
7024 		struct dc_plane_state *dc_plane;
7025 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7026 
7027 		/* Cursor plane is handled after stream updates */
7028 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7029 			continue;
7030 
7031 		if (!fb || !crtc || pcrtc != crtc)
7032 			continue;
7033 
7034 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7035 		if (!new_crtc_state->active)
7036 			continue;
7037 
7038 		dc_plane = dm_new_plane_state->dc_state;
7039 
7040 		bundle->surface_updates[planes_count].surface = dc_plane;
7041 		if (new_pcrtc_state->color_mgmt_changed) {
7042 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7043 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7044 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7045 		}
7046 
7047 		fill_dc_scaling_info(new_plane_state,
7048 				     &bundle->scaling_infos[planes_count]);
7049 
7050 		bundle->surface_updates[planes_count].scaling_info =
7051 			&bundle->scaling_infos[planes_count];
7052 
7053 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7054 
7055 		pflip_present = pflip_present || plane_needs_flip;
7056 
7057 		if (!plane_needs_flip) {
7058 			planes_count += 1;
7059 			continue;
7060 		}
7061 
7062 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7063 
7064 		/*
7065 		 * Wait for all fences on this FB. Do limited wait to avoid
7066 		 * deadlock during GPU reset when this fence will not signal
7067 		 * but we hold reservation lock for the BO.
7068 		 */
7069 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7070 							false,
7071 							msecs_to_jiffies(5000));
7072 		if (unlikely(r <= 0))
7073 			DRM_ERROR("Waiting for fences timed out!");
7074 
7075 		/*
7076 		 * TODO This might fail and hence better not used, wait
7077 		 * explicitly on fences instead
7078 		 * and in general should be called for
7079 		 * blocking commit to as per framework helpers
7080 		 */
7081 		r = amdgpu_bo_reserve(abo, true);
7082 		if (unlikely(r != 0))
7083 			DRM_ERROR("failed to reserve buffer before flip\n");
7084 
7085 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
7086 
7087 		tmz_surface = amdgpu_bo_encrypted(abo);
7088 
7089 		amdgpu_bo_unreserve(abo);
7090 
7091 		fill_dc_plane_info_and_addr(
7092 			dm->adev, new_plane_state, tiling_flags,
7093 			&bundle->plane_infos[planes_count],
7094 			&bundle->flip_addrs[planes_count].address,
7095 			tmz_surface,
7096 			false);
7097 
7098 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7099 				 new_plane_state->plane->index,
7100 				 bundle->plane_infos[planes_count].dcc.enable);
7101 
7102 		bundle->surface_updates[planes_count].plane_info =
7103 			&bundle->plane_infos[planes_count];
7104 
7105 		/*
7106 		 * Only allow immediate flips for fast updates that don't
7107 		 * change FB pitch, DCC state, rotation or mirroing.
7108 		 */
7109 		bundle->flip_addrs[planes_count].flip_immediate =
7110 			crtc->state->async_flip &&
7111 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7112 
7113 		timestamp_ns = ktime_get_ns();
7114 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7115 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7116 		bundle->surface_updates[planes_count].surface = dc_plane;
7117 
7118 		if (!bundle->surface_updates[planes_count].surface) {
7119 			DRM_ERROR("No surface for CRTC: id=%d\n",
7120 					acrtc_attach->crtc_id);
7121 			continue;
7122 		}
7123 
7124 		if (plane == pcrtc->primary)
7125 			update_freesync_state_on_stream(
7126 				dm,
7127 				acrtc_state,
7128 				acrtc_state->stream,
7129 				dc_plane,
7130 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7131 
7132 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7133 				 __func__,
7134 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7135 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7136 
7137 		planes_count += 1;
7138 
7139 	}
7140 
7141 	if (pflip_present) {
7142 		if (!vrr_active) {
7143 			/* Use old throttling in non-vrr fixed refresh rate mode
7144 			 * to keep flip scheduling based on target vblank counts
7145 			 * working in a backwards compatible way, e.g., for
7146 			 * clients using the GLX_OML_sync_control extension or
7147 			 * DRI3/Present extension with defined target_msc.
7148 			 */
7149 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7150 		}
7151 		else {
7152 			/* For variable refresh rate mode only:
7153 			 * Get vblank of last completed flip to avoid > 1 vrr
7154 			 * flips per video frame by use of throttling, but allow
7155 			 * flip programming anywhere in the possibly large
7156 			 * variable vrr vblank interval for fine-grained flip
7157 			 * timing control and more opportunity to avoid stutter
7158 			 * on late submission of flips.
7159 			 */
7160 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7161 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7162 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7163 		}
7164 
7165 		target_vblank = last_flip_vblank + wait_for_vblank;
7166 
7167 		/*
7168 		 * Wait until we're out of the vertical blank period before the one
7169 		 * targeted by the flip
7170 		 */
7171 		while ((acrtc_attach->enabled &&
7172 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7173 							    0, &vpos, &hpos, NULL,
7174 							    NULL, &pcrtc->hwmode)
7175 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7176 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7177 			(int)(target_vblank -
7178 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7179 			usleep_range(1000, 1100);
7180 		}
7181 
7182 		/**
7183 		 * Prepare the flip event for the pageflip interrupt to handle.
7184 		 *
7185 		 * This only works in the case where we've already turned on the
7186 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7187 		 * from 0 -> n planes we have to skip a hardware generated event
7188 		 * and rely on sending it from software.
7189 		 */
7190 		if (acrtc_attach->base.state->event &&
7191 		    acrtc_state->active_planes > 0) {
7192 			drm_crtc_vblank_get(pcrtc);
7193 
7194 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7195 
7196 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7197 			prepare_flip_isr(acrtc_attach);
7198 
7199 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7200 		}
7201 
7202 		if (acrtc_state->stream) {
7203 			if (acrtc_state->freesync_vrr_info_changed)
7204 				bundle->stream_update.vrr_infopacket =
7205 					&acrtc_state->stream->vrr_infopacket;
7206 		}
7207 	}
7208 
7209 	/* Update the planes if changed or disable if we don't have any. */
7210 	if ((planes_count || acrtc_state->active_planes == 0) &&
7211 		acrtc_state->stream) {
7212 		bundle->stream_update.stream = acrtc_state->stream;
7213 		if (new_pcrtc_state->mode_changed) {
7214 			bundle->stream_update.src = acrtc_state->stream->src;
7215 			bundle->stream_update.dst = acrtc_state->stream->dst;
7216 		}
7217 
7218 		if (new_pcrtc_state->color_mgmt_changed) {
7219 			/*
7220 			 * TODO: This isn't fully correct since we've actually
7221 			 * already modified the stream in place.
7222 			 */
7223 			bundle->stream_update.gamut_remap =
7224 				&acrtc_state->stream->gamut_remap_matrix;
7225 			bundle->stream_update.output_csc_transform =
7226 				&acrtc_state->stream->csc_color_matrix;
7227 			bundle->stream_update.out_transfer_func =
7228 				acrtc_state->stream->out_transfer_func;
7229 		}
7230 
7231 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7232 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7233 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7234 
7235 		/*
7236 		 * If FreeSync state on the stream has changed then we need to
7237 		 * re-adjust the min/max bounds now that DC doesn't handle this
7238 		 * as part of commit.
7239 		 */
7240 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7241 		    amdgpu_dm_vrr_active(acrtc_state)) {
7242 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7243 			dc_stream_adjust_vmin_vmax(
7244 				dm->dc, acrtc_state->stream,
7245 				&acrtc_state->vrr_params.adjust);
7246 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7247 		}
7248 		mutex_lock(&dm->dc_lock);
7249 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7250 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7251 			amdgpu_dm_psr_disable(acrtc_state->stream);
7252 
7253 		dc_commit_updates_for_stream(dm->dc,
7254 						     bundle->surface_updates,
7255 						     planes_count,
7256 						     acrtc_state->stream,
7257 						     &bundle->stream_update,
7258 						     dc_state);
7259 
7260 		/**
7261 		 * Enable or disable the interrupts on the backend.
7262 		 *
7263 		 * Most pipes are put into power gating when unused.
7264 		 *
7265 		 * When power gating is enabled on a pipe we lose the
7266 		 * interrupt enablement state when power gating is disabled.
7267 		 *
7268 		 * So we need to update the IRQ control state in hardware
7269 		 * whenever the pipe turns on (since it could be previously
7270 		 * power gated) or off (since some pipes can't be power gated
7271 		 * on some ASICs).
7272 		 */
7273 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7274 			dm_update_pflip_irq_state(
7275 				(struct amdgpu_device *)dev->dev_private,
7276 				acrtc_attach);
7277 
7278 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7279 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7280 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7281 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7282 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7283 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7284 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7285 			amdgpu_dm_psr_enable(acrtc_state->stream);
7286 		}
7287 
7288 		mutex_unlock(&dm->dc_lock);
7289 	}
7290 
7291 	/*
7292 	 * Update cursor state *after* programming all the planes.
7293 	 * This avoids redundant programming in the case where we're going
7294 	 * to be disabling a single plane - those pipes are being disabled.
7295 	 */
7296 	if (acrtc_state->active_planes)
7297 		amdgpu_dm_commit_cursors(state);
7298 
7299 cleanup:
7300 	kfree(bundle);
7301 }
7302 
7303 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7304 				   struct drm_atomic_state *state)
7305 {
7306 	struct amdgpu_device *adev = dev->dev_private;
7307 	struct amdgpu_dm_connector *aconnector;
7308 	struct drm_connector *connector;
7309 	struct drm_connector_state *old_con_state, *new_con_state;
7310 	struct drm_crtc_state *new_crtc_state;
7311 	struct dm_crtc_state *new_dm_crtc_state;
7312 	const struct dc_stream_status *status;
7313 	int i, inst;
7314 
7315 	/* Notify device removals. */
7316 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7317 		if (old_con_state->crtc != new_con_state->crtc) {
7318 			/* CRTC changes require notification. */
7319 			goto notify;
7320 		}
7321 
7322 		if (!new_con_state->crtc)
7323 			continue;
7324 
7325 		new_crtc_state = drm_atomic_get_new_crtc_state(
7326 			state, new_con_state->crtc);
7327 
7328 		if (!new_crtc_state)
7329 			continue;
7330 
7331 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7332 			continue;
7333 
7334 	notify:
7335 		aconnector = to_amdgpu_dm_connector(connector);
7336 
7337 		mutex_lock(&adev->dm.audio_lock);
7338 		inst = aconnector->audio_inst;
7339 		aconnector->audio_inst = -1;
7340 		mutex_unlock(&adev->dm.audio_lock);
7341 
7342 		amdgpu_dm_audio_eld_notify(adev, inst);
7343 	}
7344 
7345 	/* Notify audio device additions. */
7346 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7347 		if (!new_con_state->crtc)
7348 			continue;
7349 
7350 		new_crtc_state = drm_atomic_get_new_crtc_state(
7351 			state, new_con_state->crtc);
7352 
7353 		if (!new_crtc_state)
7354 			continue;
7355 
7356 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7357 			continue;
7358 
7359 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7360 		if (!new_dm_crtc_state->stream)
7361 			continue;
7362 
7363 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7364 		if (!status)
7365 			continue;
7366 
7367 		aconnector = to_amdgpu_dm_connector(connector);
7368 
7369 		mutex_lock(&adev->dm.audio_lock);
7370 		inst = status->audio_inst;
7371 		aconnector->audio_inst = inst;
7372 		mutex_unlock(&adev->dm.audio_lock);
7373 
7374 		amdgpu_dm_audio_eld_notify(adev, inst);
7375 	}
7376 }
7377 
7378 /*
7379  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7380  * @crtc_state: the DRM CRTC state
7381  * @stream_state: the DC stream state.
7382  *
7383  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7384  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7385  */
7386 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7387 						struct dc_stream_state *stream_state)
7388 {
7389 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7390 }
7391 
7392 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7393 				   struct drm_atomic_state *state,
7394 				   bool nonblock)
7395 {
7396 	struct drm_crtc *crtc;
7397 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7398 	struct amdgpu_device *adev = dev->dev_private;
7399 	int i;
7400 
7401 	/*
7402 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7403 	 * a modeset, being disabled, or have no active planes.
7404 	 *
7405 	 * It's done in atomic commit rather than commit tail for now since
7406 	 * some of these interrupt handlers access the current CRTC state and
7407 	 * potentially the stream pointer itself.
7408 	 *
7409 	 * Since the atomic state is swapped within atomic commit and not within
7410 	 * commit tail this would leave to new state (that hasn't been committed yet)
7411 	 * being accesssed from within the handlers.
7412 	 *
7413 	 * TODO: Fix this so we can do this in commit tail and not have to block
7414 	 * in atomic check.
7415 	 */
7416 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7417 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7418 
7419 		if (old_crtc_state->active &&
7420 		    (!new_crtc_state->active ||
7421 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7422 			manage_dm_interrupts(adev, acrtc, false);
7423 	}
7424 	/*
7425 	 * Add check here for SoC's that support hardware cursor plane, to
7426 	 * unset legacy_cursor_update
7427 	 */
7428 
7429 	return drm_atomic_helper_commit(dev, state, nonblock);
7430 
7431 	/*TODO Handle EINTR, reenable IRQ*/
7432 }
7433 
7434 /**
7435  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7436  * @state: The atomic state to commit
7437  *
7438  * This will tell DC to commit the constructed DC state from atomic_check,
7439  * programming the hardware. Any failures here implies a hardware failure, since
7440  * atomic check should have filtered anything non-kosher.
7441  */
7442 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7443 {
7444 	struct drm_device *dev = state->dev;
7445 	struct amdgpu_device *adev = dev->dev_private;
7446 	struct amdgpu_display_manager *dm = &adev->dm;
7447 	struct dm_atomic_state *dm_state;
7448 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7449 	uint32_t i, j;
7450 	struct drm_crtc *crtc;
7451 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7452 	unsigned long flags;
7453 	bool wait_for_vblank = true;
7454 	struct drm_connector *connector;
7455 	struct drm_connector_state *old_con_state, *new_con_state;
7456 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7457 	int crtc_disable_count = 0;
7458 
7459 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7460 
7461 	dm_state = dm_atomic_get_new_state(state);
7462 	if (dm_state && dm_state->context) {
7463 		dc_state = dm_state->context;
7464 	} else {
7465 		/* No state changes, retain current state. */
7466 		dc_state_temp = dc_create_state(dm->dc);
7467 		ASSERT(dc_state_temp);
7468 		dc_state = dc_state_temp;
7469 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7470 	}
7471 
7472 	/* update changed items */
7473 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7474 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7475 
7476 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7477 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7478 
7479 		DRM_DEBUG_DRIVER(
7480 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7481 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7482 			"connectors_changed:%d\n",
7483 			acrtc->crtc_id,
7484 			new_crtc_state->enable,
7485 			new_crtc_state->active,
7486 			new_crtc_state->planes_changed,
7487 			new_crtc_state->mode_changed,
7488 			new_crtc_state->active_changed,
7489 			new_crtc_state->connectors_changed);
7490 
7491 		/* Copy all transient state flags into dc state */
7492 		if (dm_new_crtc_state->stream) {
7493 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7494 							    dm_new_crtc_state->stream);
7495 		}
7496 
7497 		/* handles headless hotplug case, updating new_state and
7498 		 * aconnector as needed
7499 		 */
7500 
7501 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7502 
7503 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7504 
7505 			if (!dm_new_crtc_state->stream) {
7506 				/*
7507 				 * this could happen because of issues with
7508 				 * userspace notifications delivery.
7509 				 * In this case userspace tries to set mode on
7510 				 * display which is disconnected in fact.
7511 				 * dc_sink is NULL in this case on aconnector.
7512 				 * We expect reset mode will come soon.
7513 				 *
7514 				 * This can also happen when unplug is done
7515 				 * during resume sequence ended
7516 				 *
7517 				 * In this case, we want to pretend we still
7518 				 * have a sink to keep the pipe running so that
7519 				 * hw state is consistent with the sw state
7520 				 */
7521 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7522 						__func__, acrtc->base.base.id);
7523 				continue;
7524 			}
7525 
7526 			if (dm_old_crtc_state->stream)
7527 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7528 
7529 			pm_runtime_get_noresume(dev->dev);
7530 
7531 			acrtc->enabled = true;
7532 			acrtc->hw_mode = new_crtc_state->mode;
7533 			crtc->hwmode = new_crtc_state->mode;
7534 		} else if (modereset_required(new_crtc_state)) {
7535 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7536 			/* i.e. reset mode */
7537 			if (dm_old_crtc_state->stream) {
7538 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7539 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7540 
7541 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7542 			}
7543 		}
7544 	} /* for_each_crtc_in_state() */
7545 
7546 	if (dc_state) {
7547 		dm_enable_per_frame_crtc_master_sync(dc_state);
7548 		mutex_lock(&dm->dc_lock);
7549 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7550 		mutex_unlock(&dm->dc_lock);
7551 	}
7552 
7553 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7554 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7555 
7556 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7557 
7558 		if (dm_new_crtc_state->stream != NULL) {
7559 			const struct dc_stream_status *status =
7560 					dc_stream_get_status(dm_new_crtc_state->stream);
7561 
7562 			if (!status)
7563 				status = dc_stream_get_status_from_state(dc_state,
7564 									 dm_new_crtc_state->stream);
7565 
7566 			if (!status)
7567 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7568 			else
7569 				acrtc->otg_inst = status->primary_otg_inst;
7570 		}
7571 	}
7572 #ifdef CONFIG_DRM_AMD_DC_HDCP
7573 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7574 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7575 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7576 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7577 
7578 		new_crtc_state = NULL;
7579 
7580 		if (acrtc)
7581 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7582 
7583 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7584 
7585 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7586 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7587 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7588 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7589 			continue;
7590 		}
7591 
7592 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7593 			hdcp_update_display(
7594 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7595 				new_con_state->hdcp_content_type,
7596 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7597 													 : false);
7598 	}
7599 #endif
7600 
7601 	/* Handle connector state changes */
7602 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7603 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7604 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7605 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7606 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7607 		struct dc_stream_update stream_update;
7608 		struct dc_info_packet hdr_packet;
7609 		struct dc_stream_status *status = NULL;
7610 		bool abm_changed, hdr_changed, scaling_changed;
7611 
7612 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7613 		memset(&stream_update, 0, sizeof(stream_update));
7614 
7615 		if (acrtc) {
7616 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7617 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7618 		}
7619 
7620 		/* Skip any modesets/resets */
7621 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7622 			continue;
7623 
7624 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7625 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7626 
7627 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7628 							     dm_old_con_state);
7629 
7630 		abm_changed = dm_new_crtc_state->abm_level !=
7631 			      dm_old_crtc_state->abm_level;
7632 
7633 		hdr_changed =
7634 			is_hdr_metadata_different(old_con_state, new_con_state);
7635 
7636 		if (!scaling_changed && !abm_changed && !hdr_changed)
7637 			continue;
7638 
7639 		stream_update.stream = dm_new_crtc_state->stream;
7640 		if (scaling_changed) {
7641 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7642 					dm_new_con_state, dm_new_crtc_state->stream);
7643 
7644 			stream_update.src = dm_new_crtc_state->stream->src;
7645 			stream_update.dst = dm_new_crtc_state->stream->dst;
7646 		}
7647 
7648 		if (abm_changed) {
7649 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7650 
7651 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7652 		}
7653 
7654 		if (hdr_changed) {
7655 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7656 			stream_update.hdr_static_metadata = &hdr_packet;
7657 		}
7658 
7659 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7660 		WARN_ON(!status);
7661 		WARN_ON(!status->plane_count);
7662 
7663 		/*
7664 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7665 		 * Here we create an empty update on each plane.
7666 		 * To fix this, DC should permit updating only stream properties.
7667 		 */
7668 		for (j = 0; j < status->plane_count; j++)
7669 			dummy_updates[j].surface = status->plane_states[0];
7670 
7671 
7672 		mutex_lock(&dm->dc_lock);
7673 		dc_commit_updates_for_stream(dm->dc,
7674 						     dummy_updates,
7675 						     status->plane_count,
7676 						     dm_new_crtc_state->stream,
7677 						     &stream_update,
7678 						     dc_state);
7679 		mutex_unlock(&dm->dc_lock);
7680 	}
7681 
7682 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7683 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7684 				      new_crtc_state, i) {
7685 		if (old_crtc_state->active && !new_crtc_state->active)
7686 			crtc_disable_count++;
7687 
7688 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7689 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7690 
7691 		/* Update freesync active state. */
7692 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7693 
7694 		/* Handle vrr on->off / off->on transitions */
7695 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7696 						dm_new_crtc_state);
7697 	}
7698 
7699 	/**
7700 	 * Enable interrupts for CRTCs that are newly enabled or went through
7701 	 * a modeset. It was intentionally deferred until after the front end
7702 	 * state was modified to wait until the OTG was on and so the IRQ
7703 	 * handlers didn't access stale or invalid state.
7704 	 */
7705 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7706 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7707 
7708 		if (new_crtc_state->active &&
7709 		    (!old_crtc_state->active ||
7710 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7711 			manage_dm_interrupts(adev, acrtc, true);
7712 #ifdef CONFIG_DEBUG_FS
7713 			/**
7714 			 * Frontend may have changed so reapply the CRC capture
7715 			 * settings for the stream.
7716 			 */
7717 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7718 
7719 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7720 				amdgpu_dm_crtc_configure_crc_source(
7721 					crtc, dm_new_crtc_state,
7722 					dm_new_crtc_state->crc_src);
7723 			}
7724 #endif
7725 		}
7726 	}
7727 
7728 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7729 		if (new_crtc_state->async_flip)
7730 			wait_for_vblank = false;
7731 
7732 	/* update planes when needed per crtc*/
7733 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7734 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7735 
7736 		if (dm_new_crtc_state->stream)
7737 			amdgpu_dm_commit_planes(state, dc_state, dev,
7738 						dm, crtc, wait_for_vblank);
7739 	}
7740 
7741 	/* Update audio instances for each connector. */
7742 	amdgpu_dm_commit_audio(dev, state);
7743 
7744 	/*
7745 	 * send vblank event on all events not handled in flip and
7746 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7747 	 */
7748 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7749 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7750 
7751 		if (new_crtc_state->event)
7752 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7753 
7754 		new_crtc_state->event = NULL;
7755 	}
7756 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7757 
7758 	/* Signal HW programming completion */
7759 	drm_atomic_helper_commit_hw_done(state);
7760 
7761 	if (wait_for_vblank)
7762 		drm_atomic_helper_wait_for_flip_done(dev, state);
7763 
7764 	drm_atomic_helper_cleanup_planes(dev, state);
7765 
7766 	/*
7767 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7768 	 * so we can put the GPU into runtime suspend if we're not driving any
7769 	 * displays anymore
7770 	 */
7771 	for (i = 0; i < crtc_disable_count; i++)
7772 		pm_runtime_put_autosuspend(dev->dev);
7773 	pm_runtime_mark_last_busy(dev->dev);
7774 
7775 	if (dc_state_temp)
7776 		dc_release_state(dc_state_temp);
7777 }
7778 
7779 
7780 static int dm_force_atomic_commit(struct drm_connector *connector)
7781 {
7782 	int ret = 0;
7783 	struct drm_device *ddev = connector->dev;
7784 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7785 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7786 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7787 	struct drm_connector_state *conn_state;
7788 	struct drm_crtc_state *crtc_state;
7789 	struct drm_plane_state *plane_state;
7790 
7791 	if (!state)
7792 		return -ENOMEM;
7793 
7794 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7795 
7796 	/* Construct an atomic state to restore previous display setting */
7797 
7798 	/*
7799 	 * Attach connectors to drm_atomic_state
7800 	 */
7801 	conn_state = drm_atomic_get_connector_state(state, connector);
7802 
7803 	ret = PTR_ERR_OR_ZERO(conn_state);
7804 	if (ret)
7805 		goto err;
7806 
7807 	/* Attach crtc to drm_atomic_state*/
7808 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7809 
7810 	ret = PTR_ERR_OR_ZERO(crtc_state);
7811 	if (ret)
7812 		goto err;
7813 
7814 	/* force a restore */
7815 	crtc_state->mode_changed = true;
7816 
7817 	/* Attach plane to drm_atomic_state */
7818 	plane_state = drm_atomic_get_plane_state(state, plane);
7819 
7820 	ret = PTR_ERR_OR_ZERO(plane_state);
7821 	if (ret)
7822 		goto err;
7823 
7824 
7825 	/* Call commit internally with the state we just constructed */
7826 	ret = drm_atomic_commit(state);
7827 	if (!ret)
7828 		return 0;
7829 
7830 err:
7831 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7832 	drm_atomic_state_put(state);
7833 
7834 	return ret;
7835 }
7836 
7837 /*
7838  * This function handles all cases when set mode does not come upon hotplug.
7839  * This includes when a display is unplugged then plugged back into the
7840  * same port and when running without usermode desktop manager supprot
7841  */
7842 void dm_restore_drm_connector_state(struct drm_device *dev,
7843 				    struct drm_connector *connector)
7844 {
7845 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7846 	struct amdgpu_crtc *disconnected_acrtc;
7847 	struct dm_crtc_state *acrtc_state;
7848 
7849 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7850 		return;
7851 
7852 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7853 	if (!disconnected_acrtc)
7854 		return;
7855 
7856 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7857 	if (!acrtc_state->stream)
7858 		return;
7859 
7860 	/*
7861 	 * If the previous sink is not released and different from the current,
7862 	 * we deduce we are in a state where we can not rely on usermode call
7863 	 * to turn on the display, so we do it here
7864 	 */
7865 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7866 		dm_force_atomic_commit(&aconnector->base);
7867 }
7868 
7869 /*
7870  * Grabs all modesetting locks to serialize against any blocking commits,
7871  * Waits for completion of all non blocking commits.
7872  */
7873 static int do_aquire_global_lock(struct drm_device *dev,
7874 				 struct drm_atomic_state *state)
7875 {
7876 	struct drm_crtc *crtc;
7877 	struct drm_crtc_commit *commit;
7878 	long ret;
7879 
7880 	/*
7881 	 * Adding all modeset locks to aquire_ctx will
7882 	 * ensure that when the framework release it the
7883 	 * extra locks we are locking here will get released to
7884 	 */
7885 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7886 	if (ret)
7887 		return ret;
7888 
7889 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7890 		spin_lock(&crtc->commit_lock);
7891 		commit = list_first_entry_or_null(&crtc->commit_list,
7892 				struct drm_crtc_commit, commit_entry);
7893 		if (commit)
7894 			drm_crtc_commit_get(commit);
7895 		spin_unlock(&crtc->commit_lock);
7896 
7897 		if (!commit)
7898 			continue;
7899 
7900 		/*
7901 		 * Make sure all pending HW programming completed and
7902 		 * page flips done
7903 		 */
7904 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7905 
7906 		if (ret > 0)
7907 			ret = wait_for_completion_interruptible_timeout(
7908 					&commit->flip_done, 10*HZ);
7909 
7910 		if (ret == 0)
7911 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7912 				  "timed out\n", crtc->base.id, crtc->name);
7913 
7914 		drm_crtc_commit_put(commit);
7915 	}
7916 
7917 	return ret < 0 ? ret : 0;
7918 }
7919 
7920 static void get_freesync_config_for_crtc(
7921 	struct dm_crtc_state *new_crtc_state,
7922 	struct dm_connector_state *new_con_state)
7923 {
7924 	struct mod_freesync_config config = {0};
7925 	struct amdgpu_dm_connector *aconnector =
7926 			to_amdgpu_dm_connector(new_con_state->base.connector);
7927 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7928 	int vrefresh = drm_mode_vrefresh(mode);
7929 
7930 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7931 					vrefresh >= aconnector->min_vfreq &&
7932 					vrefresh <= aconnector->max_vfreq;
7933 
7934 	if (new_crtc_state->vrr_supported) {
7935 		new_crtc_state->stream->ignore_msa_timing_param = true;
7936 		config.state = new_crtc_state->base.vrr_enabled ?
7937 				VRR_STATE_ACTIVE_VARIABLE :
7938 				VRR_STATE_INACTIVE;
7939 		config.min_refresh_in_uhz =
7940 				aconnector->min_vfreq * 1000000;
7941 		config.max_refresh_in_uhz =
7942 				aconnector->max_vfreq * 1000000;
7943 		config.vsif_supported = true;
7944 		config.btr = true;
7945 	}
7946 
7947 	new_crtc_state->freesync_config = config;
7948 }
7949 
7950 static void reset_freesync_config_for_crtc(
7951 	struct dm_crtc_state *new_crtc_state)
7952 {
7953 	new_crtc_state->vrr_supported = false;
7954 
7955 	memset(&new_crtc_state->vrr_params, 0,
7956 	       sizeof(new_crtc_state->vrr_params));
7957 	memset(&new_crtc_state->vrr_infopacket, 0,
7958 	       sizeof(new_crtc_state->vrr_infopacket));
7959 }
7960 
7961 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7962 				struct drm_atomic_state *state,
7963 				struct drm_crtc *crtc,
7964 				struct drm_crtc_state *old_crtc_state,
7965 				struct drm_crtc_state *new_crtc_state,
7966 				bool enable,
7967 				bool *lock_and_validation_needed)
7968 {
7969 	struct dm_atomic_state *dm_state = NULL;
7970 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7971 	struct dc_stream_state *new_stream;
7972 	int ret = 0;
7973 
7974 	/*
7975 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7976 	 * update changed items
7977 	 */
7978 	struct amdgpu_crtc *acrtc = NULL;
7979 	struct amdgpu_dm_connector *aconnector = NULL;
7980 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7981 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7982 
7983 	new_stream = NULL;
7984 
7985 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7986 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7987 	acrtc = to_amdgpu_crtc(crtc);
7988 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7989 
7990 	/* TODO This hack should go away */
7991 	if (aconnector && enable) {
7992 		/* Make sure fake sink is created in plug-in scenario */
7993 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7994 							    &aconnector->base);
7995 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7996 							    &aconnector->base);
7997 
7998 		if (IS_ERR(drm_new_conn_state)) {
7999 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8000 			goto fail;
8001 		}
8002 
8003 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8004 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8005 
8006 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8007 			goto skip_modeset;
8008 
8009 		new_stream = create_validate_stream_for_sink(aconnector,
8010 							     &new_crtc_state->mode,
8011 							     dm_new_conn_state,
8012 							     dm_old_crtc_state->stream);
8013 
8014 		/*
8015 		 * we can have no stream on ACTION_SET if a display
8016 		 * was disconnected during S3, in this case it is not an
8017 		 * error, the OS will be updated after detection, and
8018 		 * will do the right thing on next atomic commit
8019 		 */
8020 
8021 		if (!new_stream) {
8022 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8023 					__func__, acrtc->base.base.id);
8024 			ret = -ENOMEM;
8025 			goto fail;
8026 		}
8027 
8028 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8029 
8030 		ret = fill_hdr_info_packet(drm_new_conn_state,
8031 					   &new_stream->hdr_static_metadata);
8032 		if (ret)
8033 			goto fail;
8034 
8035 		/*
8036 		 * If we already removed the old stream from the context
8037 		 * (and set the new stream to NULL) then we can't reuse
8038 		 * the old stream even if the stream and scaling are unchanged.
8039 		 * We'll hit the BUG_ON and black screen.
8040 		 *
8041 		 * TODO: Refactor this function to allow this check to work
8042 		 * in all conditions.
8043 		 */
8044 		if (dm_new_crtc_state->stream &&
8045 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8046 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8047 			new_crtc_state->mode_changed = false;
8048 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8049 					 new_crtc_state->mode_changed);
8050 		}
8051 	}
8052 
8053 	/* mode_changed flag may get updated above, need to check again */
8054 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8055 		goto skip_modeset;
8056 
8057 	DRM_DEBUG_DRIVER(
8058 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8059 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8060 		"connectors_changed:%d\n",
8061 		acrtc->crtc_id,
8062 		new_crtc_state->enable,
8063 		new_crtc_state->active,
8064 		new_crtc_state->planes_changed,
8065 		new_crtc_state->mode_changed,
8066 		new_crtc_state->active_changed,
8067 		new_crtc_state->connectors_changed);
8068 
8069 	/* Remove stream for any changed/disabled CRTC */
8070 	if (!enable) {
8071 
8072 		if (!dm_old_crtc_state->stream)
8073 			goto skip_modeset;
8074 
8075 		ret = dm_atomic_get_state(state, &dm_state);
8076 		if (ret)
8077 			goto fail;
8078 
8079 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8080 				crtc->base.id);
8081 
8082 		/* i.e. reset mode */
8083 		if (dc_remove_stream_from_ctx(
8084 				dm->dc,
8085 				dm_state->context,
8086 				dm_old_crtc_state->stream) != DC_OK) {
8087 			ret = -EINVAL;
8088 			goto fail;
8089 		}
8090 
8091 		dc_stream_release(dm_old_crtc_state->stream);
8092 		dm_new_crtc_state->stream = NULL;
8093 
8094 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8095 
8096 		*lock_and_validation_needed = true;
8097 
8098 	} else {/* Add stream for any updated/enabled CRTC */
8099 		/*
8100 		 * Quick fix to prevent NULL pointer on new_stream when
8101 		 * added MST connectors not found in existing crtc_state in the chained mode
8102 		 * TODO: need to dig out the root cause of that
8103 		 */
8104 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8105 			goto skip_modeset;
8106 
8107 		if (modereset_required(new_crtc_state))
8108 			goto skip_modeset;
8109 
8110 		if (modeset_required(new_crtc_state, new_stream,
8111 				     dm_old_crtc_state->stream)) {
8112 
8113 			WARN_ON(dm_new_crtc_state->stream);
8114 
8115 			ret = dm_atomic_get_state(state, &dm_state);
8116 			if (ret)
8117 				goto fail;
8118 
8119 			dm_new_crtc_state->stream = new_stream;
8120 
8121 			dc_stream_retain(new_stream);
8122 
8123 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8124 						crtc->base.id);
8125 
8126 			if (dc_add_stream_to_ctx(
8127 					dm->dc,
8128 					dm_state->context,
8129 					dm_new_crtc_state->stream) != DC_OK) {
8130 				ret = -EINVAL;
8131 				goto fail;
8132 			}
8133 
8134 			*lock_and_validation_needed = true;
8135 		}
8136 	}
8137 
8138 skip_modeset:
8139 	/* Release extra reference */
8140 	if (new_stream)
8141 		 dc_stream_release(new_stream);
8142 
8143 	/*
8144 	 * We want to do dc stream updates that do not require a
8145 	 * full modeset below.
8146 	 */
8147 	if (!(enable && aconnector && new_crtc_state->active))
8148 		return 0;
8149 	/*
8150 	 * Given above conditions, the dc state cannot be NULL because:
8151 	 * 1. We're in the process of enabling CRTCs (just been added
8152 	 *    to the dc context, or already is on the context)
8153 	 * 2. Has a valid connector attached, and
8154 	 * 3. Is currently active and enabled.
8155 	 * => The dc stream state currently exists.
8156 	 */
8157 	BUG_ON(dm_new_crtc_state->stream == NULL);
8158 
8159 	/* Scaling or underscan settings */
8160 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8161 		update_stream_scaling_settings(
8162 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8163 
8164 	/* ABM settings */
8165 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8166 
8167 	/*
8168 	 * Color management settings. We also update color properties
8169 	 * when a modeset is needed, to ensure it gets reprogrammed.
8170 	 */
8171 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8172 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8173 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8174 		if (ret)
8175 			goto fail;
8176 	}
8177 
8178 	/* Update Freesync settings. */
8179 	get_freesync_config_for_crtc(dm_new_crtc_state,
8180 				     dm_new_conn_state);
8181 
8182 	return ret;
8183 
8184 fail:
8185 	if (new_stream)
8186 		dc_stream_release(new_stream);
8187 	return ret;
8188 }
8189 
8190 static bool should_reset_plane(struct drm_atomic_state *state,
8191 			       struct drm_plane *plane,
8192 			       struct drm_plane_state *old_plane_state,
8193 			       struct drm_plane_state *new_plane_state)
8194 {
8195 	struct drm_plane *other;
8196 	struct drm_plane_state *old_other_state, *new_other_state;
8197 	struct drm_crtc_state *new_crtc_state;
8198 	int i;
8199 
8200 	/*
8201 	 * TODO: Remove this hack once the checks below are sufficient
8202 	 * enough to determine when we need to reset all the planes on
8203 	 * the stream.
8204 	 */
8205 	if (state->allow_modeset)
8206 		return true;
8207 
8208 	/* Exit early if we know that we're adding or removing the plane. */
8209 	if (old_plane_state->crtc != new_plane_state->crtc)
8210 		return true;
8211 
8212 	/* old crtc == new_crtc == NULL, plane not in context. */
8213 	if (!new_plane_state->crtc)
8214 		return false;
8215 
8216 	new_crtc_state =
8217 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8218 
8219 	if (!new_crtc_state)
8220 		return true;
8221 
8222 	/* CRTC Degamma changes currently require us to recreate planes. */
8223 	if (new_crtc_state->color_mgmt_changed)
8224 		return true;
8225 
8226 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8227 		return true;
8228 
8229 	/*
8230 	 * If there are any new primary or overlay planes being added or
8231 	 * removed then the z-order can potentially change. To ensure
8232 	 * correct z-order and pipe acquisition the current DC architecture
8233 	 * requires us to remove and recreate all existing planes.
8234 	 *
8235 	 * TODO: Come up with a more elegant solution for this.
8236 	 */
8237 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8238 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8239 			continue;
8240 
8241 		if (old_other_state->crtc != new_plane_state->crtc &&
8242 		    new_other_state->crtc != new_plane_state->crtc)
8243 			continue;
8244 
8245 		if (old_other_state->crtc != new_other_state->crtc)
8246 			return true;
8247 
8248 		/* TODO: Remove this once we can handle fast format changes. */
8249 		if (old_other_state->fb && new_other_state->fb &&
8250 		    old_other_state->fb->format != new_other_state->fb->format)
8251 			return true;
8252 	}
8253 
8254 	return false;
8255 }
8256 
8257 static int dm_update_plane_state(struct dc *dc,
8258 				 struct drm_atomic_state *state,
8259 				 struct drm_plane *plane,
8260 				 struct drm_plane_state *old_plane_state,
8261 				 struct drm_plane_state *new_plane_state,
8262 				 bool enable,
8263 				 bool *lock_and_validation_needed)
8264 {
8265 
8266 	struct dm_atomic_state *dm_state = NULL;
8267 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8268 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8269 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8270 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8271 	struct amdgpu_crtc *new_acrtc;
8272 	bool needs_reset;
8273 	int ret = 0;
8274 
8275 
8276 	new_plane_crtc = new_plane_state->crtc;
8277 	old_plane_crtc = old_plane_state->crtc;
8278 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8279 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8280 
8281 	/*TODO Implement better atomic check for cursor plane */
8282 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8283 		if (!enable || !new_plane_crtc ||
8284 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8285 			return 0;
8286 
8287 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8288 
8289 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8290 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8291 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8292 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8293 			return -EINVAL;
8294 		}
8295 
8296 		return 0;
8297 	}
8298 
8299 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8300 					 new_plane_state);
8301 
8302 	/* Remove any changed/removed planes */
8303 	if (!enable) {
8304 		if (!needs_reset)
8305 			return 0;
8306 
8307 		if (!old_plane_crtc)
8308 			return 0;
8309 
8310 		old_crtc_state = drm_atomic_get_old_crtc_state(
8311 				state, old_plane_crtc);
8312 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8313 
8314 		if (!dm_old_crtc_state->stream)
8315 			return 0;
8316 
8317 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8318 				plane->base.id, old_plane_crtc->base.id);
8319 
8320 		ret = dm_atomic_get_state(state, &dm_state);
8321 		if (ret)
8322 			return ret;
8323 
8324 		if (!dc_remove_plane_from_context(
8325 				dc,
8326 				dm_old_crtc_state->stream,
8327 				dm_old_plane_state->dc_state,
8328 				dm_state->context)) {
8329 
8330 			ret = EINVAL;
8331 			return ret;
8332 		}
8333 
8334 
8335 		dc_plane_state_release(dm_old_plane_state->dc_state);
8336 		dm_new_plane_state->dc_state = NULL;
8337 
8338 		*lock_and_validation_needed = true;
8339 
8340 	} else { /* Add new planes */
8341 		struct dc_plane_state *dc_new_plane_state;
8342 
8343 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8344 			return 0;
8345 
8346 		if (!new_plane_crtc)
8347 			return 0;
8348 
8349 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8350 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8351 
8352 		if (!dm_new_crtc_state->stream)
8353 			return 0;
8354 
8355 		if (!needs_reset)
8356 			return 0;
8357 
8358 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8359 		if (ret)
8360 			return ret;
8361 
8362 		WARN_ON(dm_new_plane_state->dc_state);
8363 
8364 		dc_new_plane_state = dc_create_plane_state(dc);
8365 		if (!dc_new_plane_state)
8366 			return -ENOMEM;
8367 
8368 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8369 				plane->base.id, new_plane_crtc->base.id);
8370 
8371 		ret = fill_dc_plane_attributes(
8372 			new_plane_crtc->dev->dev_private,
8373 			dc_new_plane_state,
8374 			new_plane_state,
8375 			new_crtc_state);
8376 		if (ret) {
8377 			dc_plane_state_release(dc_new_plane_state);
8378 			return ret;
8379 		}
8380 
8381 		ret = dm_atomic_get_state(state, &dm_state);
8382 		if (ret) {
8383 			dc_plane_state_release(dc_new_plane_state);
8384 			return ret;
8385 		}
8386 
8387 		/*
8388 		 * Any atomic check errors that occur after this will
8389 		 * not need a release. The plane state will be attached
8390 		 * to the stream, and therefore part of the atomic
8391 		 * state. It'll be released when the atomic state is
8392 		 * cleaned.
8393 		 */
8394 		if (!dc_add_plane_to_context(
8395 				dc,
8396 				dm_new_crtc_state->stream,
8397 				dc_new_plane_state,
8398 				dm_state->context)) {
8399 
8400 			dc_plane_state_release(dc_new_plane_state);
8401 			return -EINVAL;
8402 		}
8403 
8404 		dm_new_plane_state->dc_state = dc_new_plane_state;
8405 
8406 		/* Tell DC to do a full surface update every time there
8407 		 * is a plane change. Inefficient, but works for now.
8408 		 */
8409 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8410 
8411 		*lock_and_validation_needed = true;
8412 	}
8413 
8414 
8415 	return ret;
8416 }
8417 
8418 static int
8419 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8420 				    struct drm_atomic_state *state,
8421 				    enum surface_update_type *out_type)
8422 {
8423 	struct dc *dc = dm->dc;
8424 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8425 	int i, j, num_plane, ret = 0;
8426 	struct drm_plane_state *old_plane_state, *new_plane_state;
8427 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8428 	struct drm_crtc *new_plane_crtc;
8429 	struct drm_plane *plane;
8430 
8431 	struct drm_crtc *crtc;
8432 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8433 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8434 	struct dc_stream_status *status = NULL;
8435 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8436 	struct surface_info_bundle {
8437 		struct dc_surface_update surface_updates[MAX_SURFACES];
8438 		struct dc_plane_info plane_infos[MAX_SURFACES];
8439 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8440 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8441 		struct dc_stream_update stream_update;
8442 	} *bundle;
8443 
8444 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8445 
8446 	if (!bundle) {
8447 		DRM_ERROR("Failed to allocate update bundle\n");
8448 		/* Set type to FULL to avoid crashing in DC*/
8449 		update_type = UPDATE_TYPE_FULL;
8450 		goto cleanup;
8451 	}
8452 
8453 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8454 
8455 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8456 
8457 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8458 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8459 		num_plane = 0;
8460 
8461 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8462 			update_type = UPDATE_TYPE_FULL;
8463 			goto cleanup;
8464 		}
8465 
8466 		if (!new_dm_crtc_state->stream)
8467 			continue;
8468 
8469 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8470 			const struct amdgpu_framebuffer *amdgpu_fb =
8471 				to_amdgpu_framebuffer(new_plane_state->fb);
8472 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8473 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8474 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8475 			uint64_t tiling_flags;
8476 			bool tmz_surface = false;
8477 
8478 			new_plane_crtc = new_plane_state->crtc;
8479 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8480 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8481 
8482 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8483 				continue;
8484 
8485 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8486 				update_type = UPDATE_TYPE_FULL;
8487 				goto cleanup;
8488 			}
8489 
8490 			if (crtc != new_plane_crtc)
8491 				continue;
8492 
8493 			bundle->surface_updates[num_plane].surface =
8494 					new_dm_plane_state->dc_state;
8495 
8496 			if (new_crtc_state->mode_changed) {
8497 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8498 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8499 			}
8500 
8501 			if (new_crtc_state->color_mgmt_changed) {
8502 				bundle->surface_updates[num_plane].gamma =
8503 						new_dm_plane_state->dc_state->gamma_correction;
8504 				bundle->surface_updates[num_plane].in_transfer_func =
8505 						new_dm_plane_state->dc_state->in_transfer_func;
8506 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8507 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8508 				bundle->stream_update.gamut_remap =
8509 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8510 				bundle->stream_update.output_csc_transform =
8511 						&new_dm_crtc_state->stream->csc_color_matrix;
8512 				bundle->stream_update.out_transfer_func =
8513 						new_dm_crtc_state->stream->out_transfer_func;
8514 			}
8515 
8516 			ret = fill_dc_scaling_info(new_plane_state,
8517 						   scaling_info);
8518 			if (ret)
8519 				goto cleanup;
8520 
8521 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8522 
8523 			if (amdgpu_fb) {
8524 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8525 				if (ret)
8526 					goto cleanup;
8527 
8528 				ret = fill_dc_plane_info_and_addr(
8529 					dm->adev, new_plane_state, tiling_flags,
8530 					plane_info,
8531 					&flip_addr->address, tmz_surface,
8532 					false);
8533 				if (ret)
8534 					goto cleanup;
8535 
8536 				bundle->surface_updates[num_plane].plane_info = plane_info;
8537 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8538 			}
8539 
8540 			num_plane++;
8541 		}
8542 
8543 		if (num_plane == 0)
8544 			continue;
8545 
8546 		ret = dm_atomic_get_state(state, &dm_state);
8547 		if (ret)
8548 			goto cleanup;
8549 
8550 		old_dm_state = dm_atomic_get_old_state(state);
8551 		if (!old_dm_state) {
8552 			ret = -EINVAL;
8553 			goto cleanup;
8554 		}
8555 
8556 		status = dc_stream_get_status_from_state(old_dm_state->context,
8557 							 new_dm_crtc_state->stream);
8558 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8559 		/*
8560 		 * TODO: DC modifies the surface during this call so we need
8561 		 * to lock here - find a way to do this without locking.
8562 		 */
8563 		mutex_lock(&dm->dc_lock);
8564 		update_type = dc_check_update_surfaces_for_stream(
8565 				dc,	bundle->surface_updates, num_plane,
8566 				&bundle->stream_update, status);
8567 		mutex_unlock(&dm->dc_lock);
8568 
8569 		if (update_type > UPDATE_TYPE_MED) {
8570 			update_type = UPDATE_TYPE_FULL;
8571 			goto cleanup;
8572 		}
8573 	}
8574 
8575 cleanup:
8576 	kfree(bundle);
8577 
8578 	*out_type = update_type;
8579 	return ret;
8580 }
8581 #if defined(CONFIG_DRM_AMD_DC_DCN)
8582 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8583 {
8584 	struct drm_connector *connector;
8585 	struct drm_connector_state *conn_state;
8586 	struct amdgpu_dm_connector *aconnector = NULL;
8587 	int i;
8588 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8589 		if (conn_state->crtc != crtc)
8590 			continue;
8591 
8592 		aconnector = to_amdgpu_dm_connector(connector);
8593 		if (!aconnector->port || !aconnector->mst_port)
8594 			aconnector = NULL;
8595 		else
8596 			break;
8597 	}
8598 
8599 	if (!aconnector)
8600 		return 0;
8601 
8602 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8603 }
8604 #endif
8605 
8606 /**
8607  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8608  * @dev: The DRM device
8609  * @state: The atomic state to commit
8610  *
8611  * Validate that the given atomic state is programmable by DC into hardware.
8612  * This involves constructing a &struct dc_state reflecting the new hardware
8613  * state we wish to commit, then querying DC to see if it is programmable. It's
8614  * important not to modify the existing DC state. Otherwise, atomic_check
8615  * may unexpectedly commit hardware changes.
8616  *
8617  * When validating the DC state, it's important that the right locks are
8618  * acquired. For full updates case which removes/adds/updates streams on one
8619  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8620  * that any such full update commit will wait for completion of any outstanding
8621  * flip using DRMs synchronization events. See
8622  * dm_determine_update_type_for_commit()
8623  *
8624  * Note that DM adds the affected connectors for all CRTCs in state, when that
8625  * might not seem necessary. This is because DC stream creation requires the
8626  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8627  * be possible but non-trivial - a possible TODO item.
8628  *
8629  * Return: -Error code if validation failed.
8630  */
8631 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8632 				  struct drm_atomic_state *state)
8633 {
8634 	struct amdgpu_device *adev = dev->dev_private;
8635 	struct dm_atomic_state *dm_state = NULL;
8636 	struct dc *dc = adev->dm.dc;
8637 	struct drm_connector *connector;
8638 	struct drm_connector_state *old_con_state, *new_con_state;
8639 	struct drm_crtc *crtc;
8640 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8641 	struct drm_plane *plane;
8642 	struct drm_plane_state *old_plane_state, *new_plane_state;
8643 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8644 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8645 	enum dc_status status;
8646 	int ret, i;
8647 
8648 	/*
8649 	 * This bool will be set for true for any modeset/reset
8650 	 * or plane update which implies non fast surface update.
8651 	 */
8652 	bool lock_and_validation_needed = false;
8653 
8654 	ret = drm_atomic_helper_check_modeset(dev, state);
8655 	if (ret)
8656 		goto fail;
8657 
8658 #if defined(CONFIG_DRM_AMD_DC_DCN)
8659 	if (adev->asic_type >= CHIP_NAVI10) {
8660 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8661 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8662 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8663 				if (ret)
8664 					goto fail;
8665 			}
8666 		}
8667 	}
8668 #endif
8669 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8670 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8671 		    !new_crtc_state->color_mgmt_changed &&
8672 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8673 			continue;
8674 
8675 		if (!new_crtc_state->enable)
8676 			continue;
8677 
8678 		ret = drm_atomic_add_affected_connectors(state, crtc);
8679 		if (ret)
8680 			return ret;
8681 
8682 		ret = drm_atomic_add_affected_planes(state, crtc);
8683 		if (ret)
8684 			goto fail;
8685 	}
8686 
8687 	/*
8688 	 * Add all primary and overlay planes on the CRTC to the state
8689 	 * whenever a plane is enabled to maintain correct z-ordering
8690 	 * and to enable fast surface updates.
8691 	 */
8692 	drm_for_each_crtc(crtc, dev) {
8693 		bool modified = false;
8694 
8695 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8696 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8697 				continue;
8698 
8699 			if (new_plane_state->crtc == crtc ||
8700 			    old_plane_state->crtc == crtc) {
8701 				modified = true;
8702 				break;
8703 			}
8704 		}
8705 
8706 		if (!modified)
8707 			continue;
8708 
8709 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8710 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8711 				continue;
8712 
8713 			new_plane_state =
8714 				drm_atomic_get_plane_state(state, plane);
8715 
8716 			if (IS_ERR(new_plane_state)) {
8717 				ret = PTR_ERR(new_plane_state);
8718 				goto fail;
8719 			}
8720 		}
8721 	}
8722 
8723 	/* Remove exiting planes if they are modified */
8724 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8725 		ret = dm_update_plane_state(dc, state, plane,
8726 					    old_plane_state,
8727 					    new_plane_state,
8728 					    false,
8729 					    &lock_and_validation_needed);
8730 		if (ret)
8731 			goto fail;
8732 	}
8733 
8734 	/* Disable all crtcs which require disable */
8735 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8736 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8737 					   old_crtc_state,
8738 					   new_crtc_state,
8739 					   false,
8740 					   &lock_and_validation_needed);
8741 		if (ret)
8742 			goto fail;
8743 	}
8744 
8745 	/* Enable all crtcs which require enable */
8746 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8747 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8748 					   old_crtc_state,
8749 					   new_crtc_state,
8750 					   true,
8751 					   &lock_and_validation_needed);
8752 		if (ret)
8753 			goto fail;
8754 	}
8755 
8756 	/* Add new/modified planes */
8757 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8758 		ret = dm_update_plane_state(dc, state, plane,
8759 					    old_plane_state,
8760 					    new_plane_state,
8761 					    true,
8762 					    &lock_and_validation_needed);
8763 		if (ret)
8764 			goto fail;
8765 	}
8766 
8767 	/* Run this here since we want to validate the streams we created */
8768 	ret = drm_atomic_helper_check_planes(dev, state);
8769 	if (ret)
8770 		goto fail;
8771 
8772 	if (state->legacy_cursor_update) {
8773 		/*
8774 		 * This is a fast cursor update coming from the plane update
8775 		 * helper, check if it can be done asynchronously for better
8776 		 * performance.
8777 		 */
8778 		state->async_update =
8779 			!drm_atomic_helper_async_check(dev, state);
8780 
8781 		/*
8782 		 * Skip the remaining global validation if this is an async
8783 		 * update. Cursor updates can be done without affecting
8784 		 * state or bandwidth calcs and this avoids the performance
8785 		 * penalty of locking the private state object and
8786 		 * allocating a new dc_state.
8787 		 */
8788 		if (state->async_update)
8789 			return 0;
8790 	}
8791 
8792 	/* Check scaling and underscan changes*/
8793 	/* TODO Removed scaling changes validation due to inability to commit
8794 	 * new stream into context w\o causing full reset. Need to
8795 	 * decide how to handle.
8796 	 */
8797 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8798 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8799 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8800 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8801 
8802 		/* Skip any modesets/resets */
8803 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8804 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8805 			continue;
8806 
8807 		/* Skip any thing not scale or underscan changes */
8808 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8809 			continue;
8810 
8811 		overall_update_type = UPDATE_TYPE_FULL;
8812 		lock_and_validation_needed = true;
8813 	}
8814 
8815 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8816 	if (ret)
8817 		goto fail;
8818 
8819 	if (overall_update_type < update_type)
8820 		overall_update_type = update_type;
8821 
8822 	/*
8823 	 * lock_and_validation_needed was an old way to determine if we need to set
8824 	 * the global lock. Leaving it in to check if we broke any corner cases
8825 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8826 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8827 	 */
8828 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8829 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8830 
8831 	if (overall_update_type > UPDATE_TYPE_FAST) {
8832 		ret = dm_atomic_get_state(state, &dm_state);
8833 		if (ret)
8834 			goto fail;
8835 
8836 		ret = do_aquire_global_lock(dev, state);
8837 		if (ret)
8838 			goto fail;
8839 
8840 #if defined(CONFIG_DRM_AMD_DC_DCN)
8841 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8842 			goto fail;
8843 
8844 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8845 		if (ret)
8846 			goto fail;
8847 #endif
8848 
8849 		/*
8850 		 * Perform validation of MST topology in the state:
8851 		 * We need to perform MST atomic check before calling
8852 		 * dc_validate_global_state(), or there is a chance
8853 		 * to get stuck in an infinite loop and hang eventually.
8854 		 */
8855 		ret = drm_dp_mst_atomic_check(state);
8856 		if (ret)
8857 			goto fail;
8858 		status = dc_validate_global_state(dc, dm_state->context, false);
8859 		if (status != DC_OK) {
8860 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8861 				       dc_status_to_str(status), status);
8862 			ret = -EINVAL;
8863 			goto fail;
8864 		}
8865 	} else {
8866 		/*
8867 		 * The commit is a fast update. Fast updates shouldn't change
8868 		 * the DC context, affect global validation, and can have their
8869 		 * commit work done in parallel with other commits not touching
8870 		 * the same resource. If we have a new DC context as part of
8871 		 * the DM atomic state from validation we need to free it and
8872 		 * retain the existing one instead.
8873 		 */
8874 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8875 
8876 		new_dm_state = dm_atomic_get_new_state(state);
8877 		old_dm_state = dm_atomic_get_old_state(state);
8878 
8879 		if (new_dm_state && old_dm_state) {
8880 			if (new_dm_state->context)
8881 				dc_release_state(new_dm_state->context);
8882 
8883 			new_dm_state->context = old_dm_state->context;
8884 
8885 			if (old_dm_state->context)
8886 				dc_retain_state(old_dm_state->context);
8887 		}
8888 	}
8889 
8890 	/* Store the overall update type for use later in atomic check. */
8891 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8892 		struct dm_crtc_state *dm_new_crtc_state =
8893 			to_dm_crtc_state(new_crtc_state);
8894 
8895 		dm_new_crtc_state->update_type = (int)overall_update_type;
8896 	}
8897 
8898 	/* Must be success */
8899 	WARN_ON(ret);
8900 	return ret;
8901 
8902 fail:
8903 	if (ret == -EDEADLK)
8904 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8905 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8906 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8907 	else
8908 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8909 
8910 	return ret;
8911 }
8912 
8913 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8914 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8915 {
8916 	uint8_t dpcd_data;
8917 	bool capable = false;
8918 
8919 	if (amdgpu_dm_connector->dc_link &&
8920 		dm_helpers_dp_read_dpcd(
8921 				NULL,
8922 				amdgpu_dm_connector->dc_link,
8923 				DP_DOWN_STREAM_PORT_COUNT,
8924 				&dpcd_data,
8925 				sizeof(dpcd_data))) {
8926 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8927 	}
8928 
8929 	return capable;
8930 }
8931 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8932 					struct edid *edid)
8933 {
8934 	int i;
8935 	bool edid_check_required;
8936 	struct detailed_timing *timing;
8937 	struct detailed_non_pixel *data;
8938 	struct detailed_data_monitor_range *range;
8939 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8940 			to_amdgpu_dm_connector(connector);
8941 	struct dm_connector_state *dm_con_state = NULL;
8942 
8943 	struct drm_device *dev = connector->dev;
8944 	struct amdgpu_device *adev = dev->dev_private;
8945 	bool freesync_capable = false;
8946 
8947 	if (!connector->state) {
8948 		DRM_ERROR("%s - Connector has no state", __func__);
8949 		goto update;
8950 	}
8951 
8952 	if (!edid) {
8953 		dm_con_state = to_dm_connector_state(connector->state);
8954 
8955 		amdgpu_dm_connector->min_vfreq = 0;
8956 		amdgpu_dm_connector->max_vfreq = 0;
8957 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8958 
8959 		goto update;
8960 	}
8961 
8962 	dm_con_state = to_dm_connector_state(connector->state);
8963 
8964 	edid_check_required = false;
8965 	if (!amdgpu_dm_connector->dc_sink) {
8966 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8967 		goto update;
8968 	}
8969 	if (!adev->dm.freesync_module)
8970 		goto update;
8971 	/*
8972 	 * if edid non zero restrict freesync only for dp and edp
8973 	 */
8974 	if (edid) {
8975 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8976 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8977 			edid_check_required = is_dp_capable_without_timing_msa(
8978 						adev->dm.dc,
8979 						amdgpu_dm_connector);
8980 		}
8981 	}
8982 	if (edid_check_required == true && (edid->version > 1 ||
8983 	   (edid->version == 1 && edid->revision > 1))) {
8984 		for (i = 0; i < 4; i++) {
8985 
8986 			timing	= &edid->detailed_timings[i];
8987 			data	= &timing->data.other_data;
8988 			range	= &data->data.range;
8989 			/*
8990 			 * Check if monitor has continuous frequency mode
8991 			 */
8992 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8993 				continue;
8994 			/*
8995 			 * Check for flag range limits only. If flag == 1 then
8996 			 * no additional timing information provided.
8997 			 * Default GTF, GTF Secondary curve and CVT are not
8998 			 * supported
8999 			 */
9000 			if (range->flags != 1)
9001 				continue;
9002 
9003 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9004 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9005 			amdgpu_dm_connector->pixel_clock_mhz =
9006 				range->pixel_clock_mhz * 10;
9007 			break;
9008 		}
9009 
9010 		if (amdgpu_dm_connector->max_vfreq -
9011 		    amdgpu_dm_connector->min_vfreq > 10) {
9012 
9013 			freesync_capable = true;
9014 		}
9015 	}
9016 
9017 update:
9018 	if (dm_con_state)
9019 		dm_con_state->freesync_capable = freesync_capable;
9020 
9021 	if (connector->vrr_capable_property)
9022 		drm_connector_set_vrr_capable_property(connector,
9023 						       freesync_capable);
9024 }
9025 
9026 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9027 {
9028 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9029 
9030 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9031 		return;
9032 	if (link->type == dc_connection_none)
9033 		return;
9034 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9035 					dpcd_data, sizeof(dpcd_data))) {
9036 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9037 
9038 		if (dpcd_data[0] == 0) {
9039 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9040 			link->psr_settings.psr_feature_enabled = false;
9041 		} else {
9042 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9043 			link->psr_settings.psr_feature_enabled = true;
9044 		}
9045 
9046 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9047 	}
9048 }
9049 
9050 /*
9051  * amdgpu_dm_link_setup_psr() - configure psr link
9052  * @stream: stream state
9053  *
9054  * Return: true if success
9055  */
9056 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9057 {
9058 	struct dc_link *link = NULL;
9059 	struct psr_config psr_config = {0};
9060 	struct psr_context psr_context = {0};
9061 	bool ret = false;
9062 
9063 	if (stream == NULL)
9064 		return false;
9065 
9066 	link = stream->link;
9067 
9068 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9069 
9070 	if (psr_config.psr_version > 0) {
9071 		psr_config.psr_exit_link_training_required = 0x1;
9072 		psr_config.psr_frame_capture_indication_req = 0;
9073 		psr_config.psr_rfb_setup_time = 0x37;
9074 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9075 		psr_config.allow_smu_optimizations = 0x0;
9076 
9077 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9078 
9079 	}
9080 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9081 
9082 	return ret;
9083 }
9084 
9085 /*
9086  * amdgpu_dm_psr_enable() - enable psr f/w
9087  * @stream: stream state
9088  *
9089  * Return: true if success
9090  */
9091 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9092 {
9093 	struct dc_link *link = stream->link;
9094 	unsigned int vsync_rate_hz = 0;
9095 	struct dc_static_screen_params params = {0};
9096 	/* Calculate number of static frames before generating interrupt to
9097 	 * enter PSR.
9098 	 */
9099 	// Init fail safe of 2 frames static
9100 	unsigned int num_frames_static = 2;
9101 
9102 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9103 
9104 	vsync_rate_hz = div64_u64(div64_u64((
9105 			stream->timing.pix_clk_100hz * 100),
9106 			stream->timing.v_total),
9107 			stream->timing.h_total);
9108 
9109 	/* Round up
9110 	 * Calculate number of frames such that at least 30 ms of time has
9111 	 * passed.
9112 	 */
9113 	if (vsync_rate_hz != 0) {
9114 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9115 		num_frames_static = (30000 / frame_time_microsec) + 1;
9116 	}
9117 
9118 	params.triggers.cursor_update = true;
9119 	params.triggers.overlay_update = true;
9120 	params.triggers.surface_update = true;
9121 	params.num_frames = num_frames_static;
9122 
9123 	dc_stream_set_static_screen_params(link->ctx->dc,
9124 					   &stream, 1,
9125 					   &params);
9126 
9127 	return dc_link_set_psr_allow_active(link, true, false);
9128 }
9129 
9130 /*
9131  * amdgpu_dm_psr_disable() - disable psr f/w
9132  * @stream:  stream state
9133  *
9134  * Return: true if success
9135  */
9136 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9137 {
9138 
9139 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9140 
9141 	return dc_link_set_psr_allow_active(stream->link, false, true);
9142 }
9143