1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 	if (!acrtc)
459 		return;
460 
461 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
462 
463 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464 			 amdgpu_dm_vrr_active(acrtc_state),
465 			 acrtc_state->active_planes);
466 
467 	/**
468 	 * Core vblank handling at start of front-porch is only possible
469 	 * in non-vrr mode, as only there vblank timestamping will give
470 	 * valid results while done in front-porch. Otherwise defer it
471 	 * to dm_vupdate_high_irq after end of front-porch.
472 	 */
473 	if (!amdgpu_dm_vrr_active(acrtc_state))
474 		drm_crtc_handle_vblank(&acrtc->base);
475 
476 	/**
477 	 * Following stuff must happen at start of vblank, for crc
478 	 * computation and below-the-range btr support in vrr mode.
479 	 */
480 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481 
482 	/* BTR updates need to happen before VUPDATE on Vega and above. */
483 	if (adev->family < AMDGPU_FAMILY_AI)
484 		return;
485 
486 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
487 
488 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490 		mod_freesync_handle_v_update(adev->dm.freesync_module,
491 					     acrtc_state->stream,
492 					     &acrtc_state->vrr_params);
493 
494 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495 					   &acrtc_state->vrr_params.adjust);
496 	}
497 
498 	/*
499 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
500 	 * In that case, pageflip completion interrupts won't fire and pageflip
501 	 * completion events won't get delivered. Prevent this by sending
502 	 * pending pageflip events from here if a flip is still pending.
503 	 *
504 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
505 	 * avoid race conditions between flip programming and completion,
506 	 * which could cause too early flip completion events.
507 	 */
508 	if (adev->family >= AMDGPU_FAMILY_RV &&
509 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510 	    acrtc_state->active_planes == 0) {
511 		if (acrtc->event) {
512 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513 			acrtc->event = NULL;
514 			drm_crtc_vblank_put(&acrtc->base);
515 		}
516 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
517 	}
518 
519 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521 
522 static int dm_set_clockgating_state(void *handle,
523 		  enum amd_clockgating_state state)
524 {
525 	return 0;
526 }
527 
528 static int dm_set_powergating_state(void *handle,
529 		  enum amd_powergating_state state)
530 {
531 	return 0;
532 }
533 
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536 
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540 	struct drm_device *dev = connector->dev;
541 	struct amdgpu_device *adev = dev->dev_private;
542 	struct dm_comressor_info *compressor = &adev->dm.compressor;
543 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544 	struct drm_display_mode *mode;
545 	unsigned long max_size = 0;
546 
547 	if (adev->dm.dc->fbc_compressor == NULL)
548 		return;
549 
550 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551 		return;
552 
553 	if (compressor->bo_ptr)
554 		return;
555 
556 
557 	list_for_each_entry(mode, &connector->modes, head) {
558 		if (max_size < mode->htotal * mode->vtotal)
559 			max_size = mode->htotal * mode->vtotal;
560 	}
561 
562 	if (max_size) {
563 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565 			    &compressor->gpu_addr, &compressor->cpu_addr);
566 
567 		if (r)
568 			DRM_ERROR("DM: Failed to initialize FBC\n");
569 		else {
570 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572 		}
573 
574 	}
575 
576 }
577 
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579 					  int pipe, bool *enabled,
580 					  unsigned char *buf, int max_bytes)
581 {
582 	struct drm_device *dev = dev_get_drvdata(kdev);
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct drm_connector *connector;
585 	struct drm_connector_list_iter conn_iter;
586 	struct amdgpu_dm_connector *aconnector;
587 	int ret = 0;
588 
589 	*enabled = false;
590 
591 	mutex_lock(&adev->dm.audio_lock);
592 
593 	drm_connector_list_iter_begin(dev, &conn_iter);
594 	drm_for_each_connector_iter(connector, &conn_iter) {
595 		aconnector = to_amdgpu_dm_connector(connector);
596 		if (aconnector->audio_inst != port)
597 			continue;
598 
599 		*enabled = true;
600 		ret = drm_eld_size(connector->eld);
601 		memcpy(buf, connector->eld, min(max_bytes, ret));
602 
603 		break;
604 	}
605 	drm_connector_list_iter_end(&conn_iter);
606 
607 	mutex_unlock(&adev->dm.audio_lock);
608 
609 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610 
611 	return ret;
612 }
613 
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615 	.get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617 
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619 				       struct device *hda_kdev, void *data)
620 {
621 	struct drm_device *dev = dev_get_drvdata(kdev);
622 	struct amdgpu_device *adev = dev->dev_private;
623 	struct drm_audio_component *acomp = data;
624 
625 	acomp->ops = &amdgpu_dm_audio_component_ops;
626 	acomp->dev = kdev;
627 	adev->dm.audio_component = acomp;
628 
629 	return 0;
630 }
631 
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633 					  struct device *hda_kdev, void *data)
634 {
635 	struct drm_device *dev = dev_get_drvdata(kdev);
636 	struct amdgpu_device *adev = dev->dev_private;
637 	struct drm_audio_component *acomp = data;
638 
639 	acomp->ops = NULL;
640 	acomp->dev = NULL;
641 	adev->dm.audio_component = NULL;
642 }
643 
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645 	.bind	= amdgpu_dm_audio_component_bind,
646 	.unbind	= amdgpu_dm_audio_component_unbind,
647 };
648 
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650 {
651 	int i, ret;
652 
653 	if (!amdgpu_audio)
654 		return 0;
655 
656 	adev->mode_info.audio.enabled = true;
657 
658 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659 
660 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661 		adev->mode_info.audio.pin[i].channels = -1;
662 		adev->mode_info.audio.pin[i].rate = -1;
663 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
664 		adev->mode_info.audio.pin[i].status_bits = 0;
665 		adev->mode_info.audio.pin[i].category_code = 0;
666 		adev->mode_info.audio.pin[i].connected = false;
667 		adev->mode_info.audio.pin[i].id =
668 			adev->dm.dc->res_pool->audios[i]->inst;
669 		adev->mode_info.audio.pin[i].offset = 0;
670 	}
671 
672 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673 	if (ret < 0)
674 		return ret;
675 
676 	adev->dm.audio_registered = true;
677 
678 	return 0;
679 }
680 
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682 {
683 	if (!amdgpu_audio)
684 		return;
685 
686 	if (!adev->mode_info.audio.enabled)
687 		return;
688 
689 	if (adev->dm.audio_registered) {
690 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691 		adev->dm.audio_registered = false;
692 	}
693 
694 	/* TODO: Disable audio? */
695 
696 	adev->mode_info.audio.enabled = false;
697 }
698 
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700 {
701 	struct drm_audio_component *acomp = adev->dm.audio_component;
702 
703 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705 
706 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707 						 pin, -1);
708 	}
709 }
710 
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
712 {
713 	const struct dmcub_firmware_header_v1_0 *hdr;
714 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
717 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718 	struct abm *abm = adev->dm.dc->res_pool->abm;
719 	struct dmub_srv_hw_params hw_params;
720 	enum dmub_status status;
721 	const unsigned char *fw_inst_const, *fw_bss_data;
722 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
723 	bool has_hw_support;
724 
725 	if (!dmub_srv)
726 		/* DMUB isn't supported on the ASIC. */
727 		return 0;
728 
729 	if (!fb_info) {
730 		DRM_ERROR("No framebuffer info for DMUB service.\n");
731 		return -EINVAL;
732 	}
733 
734 	if (!dmub_fw) {
735 		/* Firmware required for DMUB support. */
736 		DRM_ERROR("No firmware provided for DMUB.\n");
737 		return -EINVAL;
738 	}
739 
740 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741 	if (status != DMUB_STATUS_OK) {
742 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743 		return -EINVAL;
744 	}
745 
746 	if (!has_hw_support) {
747 		DRM_INFO("DMUB unsupported on ASIC\n");
748 		return 0;
749 	}
750 
751 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752 
753 	fw_inst_const = dmub_fw->data +
754 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755 			PSP_HEADER_BYTES;
756 
757 	fw_bss_data = dmub_fw->data +
758 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 		      le32_to_cpu(hdr->inst_const_bytes);
760 
761 	/* Copy firmware and bios info into FB memory. */
762 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764 
765 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766 
767 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768 	 * amdgpu_ucode_init_single_fw will load dmub firmware
769 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
770 	 * will be done by dm_dmub_hw_init
771 	 */
772 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774 				fw_inst_const_size);
775 	}
776 
777 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
778 	       fw_bss_data_size);
779 
780 	/* Copy firmware bios info into FB memory. */
781 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
782 	       adev->bios_size);
783 
784 	/* Reset regions that need to be reset. */
785 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
786 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
787 
788 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
789 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
790 
791 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
792 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
793 
794 	/* Initialize hardware. */
795 	memset(&hw_params, 0, sizeof(hw_params));
796 	hw_params.fb_base = adev->gmc.fb_start;
797 	hw_params.fb_offset = adev->gmc.aper_base;
798 
799 	/* backdoor load firmware and trigger dmub running */
800 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
801 		hw_params.load_inst_const = true;
802 
803 	if (dmcu)
804 		hw_params.psp_version = dmcu->psp_version;
805 
806 	for (i = 0; i < fb_info->num_fb; ++i)
807 		hw_params.fb[i] = &fb_info->fb[i];
808 
809 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
810 	if (status != DMUB_STATUS_OK) {
811 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
812 		return -EINVAL;
813 	}
814 
815 	/* Wait for firmware load to finish. */
816 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
817 	if (status != DMUB_STATUS_OK)
818 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
819 
820 	/* Init DMCU and ABM if available. */
821 	if (dmcu && abm) {
822 		dmcu->funcs->dmcu_init(dmcu);
823 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
824 	}
825 
826 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
827 	if (!adev->dm.dc->ctx->dmub_srv) {
828 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
829 		return -ENOMEM;
830 	}
831 
832 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
833 		 adev->dm.dmcub_fw_version);
834 
835 	return 0;
836 }
837 
838 static int amdgpu_dm_init(struct amdgpu_device *adev)
839 {
840 	struct dc_init_data init_data;
841 #ifdef CONFIG_DRM_AMD_DC_HDCP
842 	struct dc_callback_init init_params;
843 #endif
844 	int r;
845 
846 	adev->dm.ddev = adev->ddev;
847 	adev->dm.adev = adev;
848 
849 	/* Zero all the fields */
850 	memset(&init_data, 0, sizeof(init_data));
851 #ifdef CONFIG_DRM_AMD_DC_HDCP
852 	memset(&init_params, 0, sizeof(init_params));
853 #endif
854 
855 	mutex_init(&adev->dm.dc_lock);
856 	mutex_init(&adev->dm.audio_lock);
857 
858 	if(amdgpu_dm_irq_init(adev)) {
859 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
860 		goto error;
861 	}
862 
863 	init_data.asic_id.chip_family = adev->family;
864 
865 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
866 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
867 
868 	init_data.asic_id.vram_width = adev->gmc.vram_width;
869 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
870 	init_data.asic_id.atombios_base_address =
871 		adev->mode_info.atom_context->bios;
872 
873 	init_data.driver = adev;
874 
875 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
876 
877 	if (!adev->dm.cgs_device) {
878 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
879 		goto error;
880 	}
881 
882 	init_data.cgs_device = adev->dm.cgs_device;
883 
884 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
885 
886 	switch (adev->asic_type) {
887 	case CHIP_CARRIZO:
888 	case CHIP_STONEY:
889 	case CHIP_RAVEN:
890 	case CHIP_RENOIR:
891 		init_data.flags.gpu_vm_support = true;
892 		break;
893 	default:
894 		break;
895 	}
896 
897 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
898 		init_data.flags.fbc_support = true;
899 
900 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
901 		init_data.flags.multi_mon_pp_mclk_switch = true;
902 
903 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
904 		init_data.flags.disable_fractional_pwm = true;
905 
906 	init_data.flags.power_down_display_on_boot = true;
907 
908 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
909 
910 	/* Display Core create. */
911 	adev->dm.dc = dc_create(&init_data);
912 
913 	if (adev->dm.dc) {
914 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
915 	} else {
916 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
917 		goto error;
918 	}
919 
920 	r = dm_dmub_hw_init(adev);
921 	if (r) {
922 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
923 		goto error;
924 	}
925 
926 	dc_hardware_init(adev->dm.dc);
927 
928 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
929 	if (!adev->dm.freesync_module) {
930 		DRM_ERROR(
931 		"amdgpu: failed to initialize freesync_module.\n");
932 	} else
933 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
934 				adev->dm.freesync_module);
935 
936 	amdgpu_dm_init_color_mod();
937 
938 #ifdef CONFIG_DRM_AMD_DC_HDCP
939 	if (adev->asic_type >= CHIP_RAVEN) {
940 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
941 
942 		if (!adev->dm.hdcp_workqueue)
943 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
944 		else
945 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
946 
947 		dc_init_callbacks(adev->dm.dc, &init_params);
948 	}
949 #endif
950 	if (amdgpu_dm_initialize_drm_device(adev)) {
951 		DRM_ERROR(
952 		"amdgpu: failed to initialize sw for display support.\n");
953 		goto error;
954 	}
955 
956 	/* Update the actual used number of crtc */
957 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
958 
959 	/* TODO: Add_display_info? */
960 
961 	/* TODO use dynamic cursor width */
962 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
963 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
964 
965 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
966 		DRM_ERROR(
967 		"amdgpu: failed to initialize sw for display support.\n");
968 		goto error;
969 	}
970 
971 	DRM_DEBUG_DRIVER("KMS initialized.\n");
972 
973 	return 0;
974 error:
975 	amdgpu_dm_fini(adev);
976 
977 	return -EINVAL;
978 }
979 
980 static void amdgpu_dm_fini(struct amdgpu_device *adev)
981 {
982 	amdgpu_dm_audio_fini(adev);
983 
984 	amdgpu_dm_destroy_drm_device(&adev->dm);
985 
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 	if (adev->dm.hdcp_workqueue) {
988 		hdcp_destroy(adev->dm.hdcp_workqueue);
989 		adev->dm.hdcp_workqueue = NULL;
990 	}
991 
992 	if (adev->dm.dc)
993 		dc_deinit_callbacks(adev->dm.dc);
994 #endif
995 	if (adev->dm.dc->ctx->dmub_srv) {
996 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
997 		adev->dm.dc->ctx->dmub_srv = NULL;
998 	}
999 
1000 	if (adev->dm.dmub_bo)
1001 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1002 				      &adev->dm.dmub_bo_gpu_addr,
1003 				      &adev->dm.dmub_bo_cpu_addr);
1004 
1005 	/* DC Destroy TODO: Replace destroy DAL */
1006 	if (adev->dm.dc)
1007 		dc_destroy(&adev->dm.dc);
1008 	/*
1009 	 * TODO: pageflip, vlank interrupt
1010 	 *
1011 	 * amdgpu_dm_irq_fini(adev);
1012 	 */
1013 
1014 	if (adev->dm.cgs_device) {
1015 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1016 		adev->dm.cgs_device = NULL;
1017 	}
1018 	if (adev->dm.freesync_module) {
1019 		mod_freesync_destroy(adev->dm.freesync_module);
1020 		adev->dm.freesync_module = NULL;
1021 	}
1022 
1023 	mutex_destroy(&adev->dm.audio_lock);
1024 	mutex_destroy(&adev->dm.dc_lock);
1025 
1026 	return;
1027 }
1028 
1029 static int load_dmcu_fw(struct amdgpu_device *adev)
1030 {
1031 	const char *fw_name_dmcu = NULL;
1032 	int r;
1033 	const struct dmcu_firmware_header_v1_0 *hdr;
1034 
1035 	switch(adev->asic_type) {
1036 	case CHIP_BONAIRE:
1037 	case CHIP_HAWAII:
1038 	case CHIP_KAVERI:
1039 	case CHIP_KABINI:
1040 	case CHIP_MULLINS:
1041 	case CHIP_TONGA:
1042 	case CHIP_FIJI:
1043 	case CHIP_CARRIZO:
1044 	case CHIP_STONEY:
1045 	case CHIP_POLARIS11:
1046 	case CHIP_POLARIS10:
1047 	case CHIP_POLARIS12:
1048 	case CHIP_VEGAM:
1049 	case CHIP_VEGA10:
1050 	case CHIP_VEGA12:
1051 	case CHIP_VEGA20:
1052 	case CHIP_NAVI10:
1053 	case CHIP_NAVI14:
1054 	case CHIP_RENOIR:
1055 		return 0;
1056 	case CHIP_NAVI12:
1057 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1058 		break;
1059 	case CHIP_RAVEN:
1060 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1061 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1062 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1063 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1064 		else
1065 			return 0;
1066 		break;
1067 	default:
1068 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1069 		return -EINVAL;
1070 	}
1071 
1072 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1073 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1074 		return 0;
1075 	}
1076 
1077 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1078 	if (r == -ENOENT) {
1079 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1080 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1081 		adev->dm.fw_dmcu = NULL;
1082 		return 0;
1083 	}
1084 	if (r) {
1085 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1086 			fw_name_dmcu);
1087 		return r;
1088 	}
1089 
1090 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1091 	if (r) {
1092 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1093 			fw_name_dmcu);
1094 		release_firmware(adev->dm.fw_dmcu);
1095 		adev->dm.fw_dmcu = NULL;
1096 		return r;
1097 	}
1098 
1099 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1100 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1101 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1102 	adev->firmware.fw_size +=
1103 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1104 
1105 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1106 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1107 	adev->firmware.fw_size +=
1108 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1109 
1110 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1111 
1112 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1113 
1114 	return 0;
1115 }
1116 
1117 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1118 {
1119 	struct amdgpu_device *adev = ctx;
1120 
1121 	return dm_read_reg(adev->dm.dc->ctx, address);
1122 }
1123 
1124 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1125 				     uint32_t value)
1126 {
1127 	struct amdgpu_device *adev = ctx;
1128 
1129 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1130 }
1131 
1132 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1133 {
1134 	struct dmub_srv_create_params create_params;
1135 	struct dmub_srv_region_params region_params;
1136 	struct dmub_srv_region_info region_info;
1137 	struct dmub_srv_fb_params fb_params;
1138 	struct dmub_srv_fb_info *fb_info;
1139 	struct dmub_srv *dmub_srv;
1140 	const struct dmcub_firmware_header_v1_0 *hdr;
1141 	const char *fw_name_dmub;
1142 	enum dmub_asic dmub_asic;
1143 	enum dmub_status status;
1144 	int r;
1145 
1146 	switch (adev->asic_type) {
1147 	case CHIP_RENOIR:
1148 		dmub_asic = DMUB_ASIC_DCN21;
1149 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1150 		break;
1151 
1152 	default:
1153 		/* ASIC doesn't support DMUB. */
1154 		return 0;
1155 	}
1156 
1157 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1158 	if (r) {
1159 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1160 		return 0;
1161 	}
1162 
1163 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1164 	if (r) {
1165 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1166 		return 0;
1167 	}
1168 
1169 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1170 
1171 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1172 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1173 			AMDGPU_UCODE_ID_DMCUB;
1174 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1175 			adev->dm.dmub_fw;
1176 		adev->firmware.fw_size +=
1177 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1178 
1179 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1180 			 adev->dm.dmcub_fw_version);
1181 	}
1182 
1183 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1184 
1185 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1186 	dmub_srv = adev->dm.dmub_srv;
1187 
1188 	if (!dmub_srv) {
1189 		DRM_ERROR("Failed to allocate DMUB service!\n");
1190 		return -ENOMEM;
1191 	}
1192 
1193 	memset(&create_params, 0, sizeof(create_params));
1194 	create_params.user_ctx = adev;
1195 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1196 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1197 	create_params.asic = dmub_asic;
1198 
1199 	/* Create the DMUB service. */
1200 	status = dmub_srv_create(dmub_srv, &create_params);
1201 	if (status != DMUB_STATUS_OK) {
1202 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1203 		return -EINVAL;
1204 	}
1205 
1206 	/* Calculate the size of all the regions for the DMUB service. */
1207 	memset(&region_params, 0, sizeof(region_params));
1208 
1209 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1210 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1211 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1212 	region_params.vbios_size = adev->bios_size;
1213 	region_params.fw_bss_data =
1214 		adev->dm.dmub_fw->data +
1215 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1216 		le32_to_cpu(hdr->inst_const_bytes);
1217 
1218 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1219 					   &region_info);
1220 
1221 	if (status != DMUB_STATUS_OK) {
1222 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1223 		return -EINVAL;
1224 	}
1225 
1226 	/*
1227 	 * Allocate a framebuffer based on the total size of all the regions.
1228 	 * TODO: Move this into GART.
1229 	 */
1230 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1231 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1232 				    &adev->dm.dmub_bo_gpu_addr,
1233 				    &adev->dm.dmub_bo_cpu_addr);
1234 	if (r)
1235 		return r;
1236 
1237 	/* Rebase the regions on the framebuffer address. */
1238 	memset(&fb_params, 0, sizeof(fb_params));
1239 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1240 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1241 	fb_params.region_info = &region_info;
1242 
1243 	adev->dm.dmub_fb_info =
1244 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1245 	fb_info = adev->dm.dmub_fb_info;
1246 
1247 	if (!fb_info) {
1248 		DRM_ERROR(
1249 			"Failed to allocate framebuffer info for DMUB service!\n");
1250 		return -ENOMEM;
1251 	}
1252 
1253 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1254 	if (status != DMUB_STATUS_OK) {
1255 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1256 		return -EINVAL;
1257 	}
1258 
1259 	return 0;
1260 }
1261 
1262 static int dm_sw_init(void *handle)
1263 {
1264 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265 	int r;
1266 
1267 	r = dm_dmub_sw_init(adev);
1268 	if (r)
1269 		return r;
1270 
1271 	return load_dmcu_fw(adev);
1272 }
1273 
1274 static int dm_sw_fini(void *handle)
1275 {
1276 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1277 
1278 	kfree(adev->dm.dmub_fb_info);
1279 	adev->dm.dmub_fb_info = NULL;
1280 
1281 	if (adev->dm.dmub_srv) {
1282 		dmub_srv_destroy(adev->dm.dmub_srv);
1283 		adev->dm.dmub_srv = NULL;
1284 	}
1285 
1286 	if (adev->dm.dmub_fw) {
1287 		release_firmware(adev->dm.dmub_fw);
1288 		adev->dm.dmub_fw = NULL;
1289 	}
1290 
1291 	if(adev->dm.fw_dmcu) {
1292 		release_firmware(adev->dm.fw_dmcu);
1293 		adev->dm.fw_dmcu = NULL;
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1300 {
1301 	struct amdgpu_dm_connector *aconnector;
1302 	struct drm_connector *connector;
1303 	struct drm_connector_list_iter iter;
1304 	int ret = 0;
1305 
1306 	drm_connector_list_iter_begin(dev, &iter);
1307 	drm_for_each_connector_iter(connector, &iter) {
1308 		aconnector = to_amdgpu_dm_connector(connector);
1309 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1310 		    aconnector->mst_mgr.aux) {
1311 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1312 					 aconnector,
1313 					 aconnector->base.base.id);
1314 
1315 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1316 			if (ret < 0) {
1317 				DRM_ERROR("DM_MST: Failed to start MST\n");
1318 				aconnector->dc_link->type =
1319 					dc_connection_single;
1320 				break;
1321 			}
1322 		}
1323 	}
1324 	drm_connector_list_iter_end(&iter);
1325 
1326 	return ret;
1327 }
1328 
1329 static int dm_late_init(void *handle)
1330 {
1331 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1332 
1333 	struct dmcu_iram_parameters params;
1334 	unsigned int linear_lut[16];
1335 	int i;
1336 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1337 	bool ret = false;
1338 
1339 	for (i = 0; i < 16; i++)
1340 		linear_lut[i] = 0xFFFF * i / 15;
1341 
1342 	params.set = 0;
1343 	params.backlight_ramping_start = 0xCCCC;
1344 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1345 	params.backlight_lut_array_size = 16;
1346 	params.backlight_lut_array = linear_lut;
1347 
1348 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1349 	 * 0xFFFF x 0.01 = 0x28F
1350 	 */
1351 	params.min_abm_backlight = 0x28F;
1352 
1353 	/* todo will enable for navi10 */
1354 	if (adev->asic_type <= CHIP_RAVEN) {
1355 		ret = dmcu_load_iram(dmcu, params);
1356 
1357 		if (!ret)
1358 			return -EINVAL;
1359 	}
1360 
1361 	return detect_mst_link_for_all_connectors(adev->ddev);
1362 }
1363 
1364 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1365 {
1366 	struct amdgpu_dm_connector *aconnector;
1367 	struct drm_connector *connector;
1368 	struct drm_connector_list_iter iter;
1369 	struct drm_dp_mst_topology_mgr *mgr;
1370 	int ret;
1371 	bool need_hotplug = false;
1372 
1373 	drm_connector_list_iter_begin(dev, &iter);
1374 	drm_for_each_connector_iter(connector, &iter) {
1375 		aconnector = to_amdgpu_dm_connector(connector);
1376 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1377 		    aconnector->mst_port)
1378 			continue;
1379 
1380 		mgr = &aconnector->mst_mgr;
1381 
1382 		if (suspend) {
1383 			drm_dp_mst_topology_mgr_suspend(mgr);
1384 		} else {
1385 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1386 			if (ret < 0) {
1387 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1388 				need_hotplug = true;
1389 			}
1390 		}
1391 	}
1392 	drm_connector_list_iter_end(&iter);
1393 
1394 	if (need_hotplug)
1395 		drm_kms_helper_hotplug_event(dev);
1396 }
1397 
1398 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1399 {
1400 	struct smu_context *smu = &adev->smu;
1401 	int ret = 0;
1402 
1403 	if (!is_support_sw_smu(adev))
1404 		return 0;
1405 
1406 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1407 	 * on window driver dc implementation.
1408 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1409 	 * should be passed to smu during boot up and resume from s3.
1410 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1411 	 * dcn20_resource_construct
1412 	 * then call pplib functions below to pass the settings to smu:
1413 	 * smu_set_watermarks_for_clock_ranges
1414 	 * smu_set_watermarks_table
1415 	 * navi10_set_watermarks_table
1416 	 * smu_write_watermarks_table
1417 	 *
1418 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1419 	 * dc has implemented different flow for window driver:
1420 	 * dc_hardware_init / dc_set_power_state
1421 	 * dcn10_init_hw
1422 	 * notify_wm_ranges
1423 	 * set_wm_ranges
1424 	 * -- Linux
1425 	 * smu_set_watermarks_for_clock_ranges
1426 	 * renoir_set_watermarks_table
1427 	 * smu_write_watermarks_table
1428 	 *
1429 	 * For Linux,
1430 	 * dc_hardware_init -> amdgpu_dm_init
1431 	 * dc_set_power_state --> dm_resume
1432 	 *
1433 	 * therefore, this function apply to navi10/12/14 but not Renoir
1434 	 * *
1435 	 */
1436 	switch(adev->asic_type) {
1437 	case CHIP_NAVI10:
1438 	case CHIP_NAVI14:
1439 	case CHIP_NAVI12:
1440 		break;
1441 	default:
1442 		return 0;
1443 	}
1444 
1445 	mutex_lock(&smu->mutex);
1446 
1447 	/* pass data to smu controller */
1448 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1449 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1450 		ret = smu_write_watermarks_table(smu);
1451 
1452 		if (ret) {
1453 			mutex_unlock(&smu->mutex);
1454 			DRM_ERROR("Failed to update WMTABLE!\n");
1455 			return ret;
1456 		}
1457 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1458 	}
1459 
1460 	mutex_unlock(&smu->mutex);
1461 
1462 	return 0;
1463 }
1464 
1465 /**
1466  * dm_hw_init() - Initialize DC device
1467  * @handle: The base driver device containing the amdgpu_dm device.
1468  *
1469  * Initialize the &struct amdgpu_display_manager device. This involves calling
1470  * the initializers of each DM component, then populating the struct with them.
1471  *
1472  * Although the function implies hardware initialization, both hardware and
1473  * software are initialized here. Splitting them out to their relevant init
1474  * hooks is a future TODO item.
1475  *
1476  * Some notable things that are initialized here:
1477  *
1478  * - Display Core, both software and hardware
1479  * - DC modules that we need (freesync and color management)
1480  * - DRM software states
1481  * - Interrupt sources and handlers
1482  * - Vblank support
1483  * - Debug FS entries, if enabled
1484  */
1485 static int dm_hw_init(void *handle)
1486 {
1487 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 	/* Create DAL display manager */
1489 	amdgpu_dm_init(adev);
1490 	amdgpu_dm_hpd_init(adev);
1491 
1492 	return 0;
1493 }
1494 
1495 /**
1496  * dm_hw_fini() - Teardown DC device
1497  * @handle: The base driver device containing the amdgpu_dm device.
1498  *
1499  * Teardown components within &struct amdgpu_display_manager that require
1500  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1501  * were loaded. Also flush IRQ workqueues and disable them.
1502  */
1503 static int dm_hw_fini(void *handle)
1504 {
1505 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506 
1507 	amdgpu_dm_hpd_fini(adev);
1508 
1509 	amdgpu_dm_irq_fini(adev);
1510 	amdgpu_dm_fini(adev);
1511 	return 0;
1512 }
1513 
1514 static int dm_suspend(void *handle)
1515 {
1516 	struct amdgpu_device *adev = handle;
1517 	struct amdgpu_display_manager *dm = &adev->dm;
1518 	int ret = 0;
1519 
1520 	WARN_ON(adev->dm.cached_state);
1521 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1522 
1523 	s3_handle_mst(adev->ddev, true);
1524 
1525 	amdgpu_dm_irq_suspend(adev);
1526 
1527 
1528 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1529 
1530 	return ret;
1531 }
1532 
1533 static struct amdgpu_dm_connector *
1534 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1535 					     struct drm_crtc *crtc)
1536 {
1537 	uint32_t i;
1538 	struct drm_connector_state *new_con_state;
1539 	struct drm_connector *connector;
1540 	struct drm_crtc *crtc_from_state;
1541 
1542 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1543 		crtc_from_state = new_con_state->crtc;
1544 
1545 		if (crtc_from_state == crtc)
1546 			return to_amdgpu_dm_connector(connector);
1547 	}
1548 
1549 	return NULL;
1550 }
1551 
1552 static void emulated_link_detect(struct dc_link *link)
1553 {
1554 	struct dc_sink_init_data sink_init_data = { 0 };
1555 	struct display_sink_capability sink_caps = { 0 };
1556 	enum dc_edid_status edid_status;
1557 	struct dc_context *dc_ctx = link->ctx;
1558 	struct dc_sink *sink = NULL;
1559 	struct dc_sink *prev_sink = NULL;
1560 
1561 	link->type = dc_connection_none;
1562 	prev_sink = link->local_sink;
1563 
1564 	if (prev_sink != NULL)
1565 		dc_sink_retain(prev_sink);
1566 
1567 	switch (link->connector_signal) {
1568 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1569 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1570 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1571 		break;
1572 	}
1573 
1574 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1575 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1576 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1577 		break;
1578 	}
1579 
1580 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1581 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1582 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1583 		break;
1584 	}
1585 
1586 	case SIGNAL_TYPE_LVDS: {
1587 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1588 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1589 		break;
1590 	}
1591 
1592 	case SIGNAL_TYPE_EDP: {
1593 		sink_caps.transaction_type =
1594 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1595 		sink_caps.signal = SIGNAL_TYPE_EDP;
1596 		break;
1597 	}
1598 
1599 	case SIGNAL_TYPE_DISPLAY_PORT: {
1600 		sink_caps.transaction_type =
1601 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1602 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1603 		break;
1604 	}
1605 
1606 	default:
1607 		DC_ERROR("Invalid connector type! signal:%d\n",
1608 			link->connector_signal);
1609 		return;
1610 	}
1611 
1612 	sink_init_data.link = link;
1613 	sink_init_data.sink_signal = sink_caps.signal;
1614 
1615 	sink = dc_sink_create(&sink_init_data);
1616 	if (!sink) {
1617 		DC_ERROR("Failed to create sink!\n");
1618 		return;
1619 	}
1620 
1621 	/* dc_sink_create returns a new reference */
1622 	link->local_sink = sink;
1623 
1624 	edid_status = dm_helpers_read_local_edid(
1625 			link->ctx,
1626 			link,
1627 			sink);
1628 
1629 	if (edid_status != EDID_OK)
1630 		DC_ERROR("Failed to read EDID");
1631 
1632 }
1633 
1634 static int dm_resume(void *handle)
1635 {
1636 	struct amdgpu_device *adev = handle;
1637 	struct drm_device *ddev = adev->ddev;
1638 	struct amdgpu_display_manager *dm = &adev->dm;
1639 	struct amdgpu_dm_connector *aconnector;
1640 	struct drm_connector *connector;
1641 	struct drm_connector_list_iter iter;
1642 	struct drm_crtc *crtc;
1643 	struct drm_crtc_state *new_crtc_state;
1644 	struct dm_crtc_state *dm_new_crtc_state;
1645 	struct drm_plane *plane;
1646 	struct drm_plane_state *new_plane_state;
1647 	struct dm_plane_state *dm_new_plane_state;
1648 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1649 	enum dc_connection_type new_connection_type = dc_connection_none;
1650 	int i, r;
1651 
1652 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1653 	dc_release_state(dm_state->context);
1654 	dm_state->context = dc_create_state(dm->dc);
1655 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1656 	dc_resource_state_construct(dm->dc, dm_state->context);
1657 
1658 	/* Before powering on DC we need to re-initialize DMUB. */
1659 	r = dm_dmub_hw_init(adev);
1660 	if (r)
1661 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1662 
1663 	/* power on hardware */
1664 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1665 
1666 	/* program HPD filter */
1667 	dc_resume(dm->dc);
1668 
1669 	/*
1670 	 * early enable HPD Rx IRQ, should be done before set mode as short
1671 	 * pulse interrupts are used for MST
1672 	 */
1673 	amdgpu_dm_irq_resume_early(adev);
1674 
1675 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1676 	s3_handle_mst(ddev, false);
1677 
1678 	/* Do detection*/
1679 	drm_connector_list_iter_begin(ddev, &iter);
1680 	drm_for_each_connector_iter(connector, &iter) {
1681 		aconnector = to_amdgpu_dm_connector(connector);
1682 
1683 		/*
1684 		 * this is the case when traversing through already created
1685 		 * MST connectors, should be skipped
1686 		 */
1687 		if (aconnector->mst_port)
1688 			continue;
1689 
1690 		mutex_lock(&aconnector->hpd_lock);
1691 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1692 			DRM_ERROR("KMS: Failed to detect connector\n");
1693 
1694 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1695 			emulated_link_detect(aconnector->dc_link);
1696 		else
1697 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1698 
1699 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1700 			aconnector->fake_enable = false;
1701 
1702 		if (aconnector->dc_sink)
1703 			dc_sink_release(aconnector->dc_sink);
1704 		aconnector->dc_sink = NULL;
1705 		amdgpu_dm_update_connector_after_detect(aconnector);
1706 		mutex_unlock(&aconnector->hpd_lock);
1707 	}
1708 	drm_connector_list_iter_end(&iter);
1709 
1710 	/* Force mode set in atomic commit */
1711 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1712 		new_crtc_state->active_changed = true;
1713 
1714 	/*
1715 	 * atomic_check is expected to create the dc states. We need to release
1716 	 * them here, since they were duplicated as part of the suspend
1717 	 * procedure.
1718 	 */
1719 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1720 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1721 		if (dm_new_crtc_state->stream) {
1722 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1723 			dc_stream_release(dm_new_crtc_state->stream);
1724 			dm_new_crtc_state->stream = NULL;
1725 		}
1726 	}
1727 
1728 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1729 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1730 		if (dm_new_plane_state->dc_state) {
1731 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1732 			dc_plane_state_release(dm_new_plane_state->dc_state);
1733 			dm_new_plane_state->dc_state = NULL;
1734 		}
1735 	}
1736 
1737 	drm_atomic_helper_resume(ddev, dm->cached_state);
1738 
1739 	dm->cached_state = NULL;
1740 
1741 	amdgpu_dm_irq_resume_late(adev);
1742 
1743 	amdgpu_dm_smu_write_watermarks_table(adev);
1744 
1745 	return 0;
1746 }
1747 
1748 /**
1749  * DOC: DM Lifecycle
1750  *
1751  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1752  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1753  * the base driver's device list to be initialized and torn down accordingly.
1754  *
1755  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1756  */
1757 
1758 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1759 	.name = "dm",
1760 	.early_init = dm_early_init,
1761 	.late_init = dm_late_init,
1762 	.sw_init = dm_sw_init,
1763 	.sw_fini = dm_sw_fini,
1764 	.hw_init = dm_hw_init,
1765 	.hw_fini = dm_hw_fini,
1766 	.suspend = dm_suspend,
1767 	.resume = dm_resume,
1768 	.is_idle = dm_is_idle,
1769 	.wait_for_idle = dm_wait_for_idle,
1770 	.check_soft_reset = dm_check_soft_reset,
1771 	.soft_reset = dm_soft_reset,
1772 	.set_clockgating_state = dm_set_clockgating_state,
1773 	.set_powergating_state = dm_set_powergating_state,
1774 };
1775 
1776 const struct amdgpu_ip_block_version dm_ip_block =
1777 {
1778 	.type = AMD_IP_BLOCK_TYPE_DCE,
1779 	.major = 1,
1780 	.minor = 0,
1781 	.rev = 0,
1782 	.funcs = &amdgpu_dm_funcs,
1783 };
1784 
1785 
1786 /**
1787  * DOC: atomic
1788  *
1789  * *WIP*
1790  */
1791 
1792 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1793 	.fb_create = amdgpu_display_user_framebuffer_create,
1794 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1795 	.atomic_check = amdgpu_dm_atomic_check,
1796 	.atomic_commit = amdgpu_dm_atomic_commit,
1797 };
1798 
1799 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1800 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1801 };
1802 
1803 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1804 {
1805 	u32 max_cll, min_cll, max, min, q, r;
1806 	struct amdgpu_dm_backlight_caps *caps;
1807 	struct amdgpu_display_manager *dm;
1808 	struct drm_connector *conn_base;
1809 	struct amdgpu_device *adev;
1810 	static const u8 pre_computed_values[] = {
1811 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1812 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1813 
1814 	if (!aconnector || !aconnector->dc_link)
1815 		return;
1816 
1817 	conn_base = &aconnector->base;
1818 	adev = conn_base->dev->dev_private;
1819 	dm = &adev->dm;
1820 	caps = &dm->backlight_caps;
1821 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1822 	caps->aux_support = false;
1823 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1824 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1825 
1826 	if (caps->ext_caps->bits.oled == 1 ||
1827 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1828 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1829 		caps->aux_support = true;
1830 
1831 	/* From the specification (CTA-861-G), for calculating the maximum
1832 	 * luminance we need to use:
1833 	 *	Luminance = 50*2**(CV/32)
1834 	 * Where CV is a one-byte value.
1835 	 * For calculating this expression we may need float point precision;
1836 	 * to avoid this complexity level, we take advantage that CV is divided
1837 	 * by a constant. From the Euclids division algorithm, we know that CV
1838 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1839 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1840 	 * need to pre-compute the value of r/32. For pre-computing the values
1841 	 * We just used the following Ruby line:
1842 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1843 	 * The results of the above expressions can be verified at
1844 	 * pre_computed_values.
1845 	 */
1846 	q = max_cll >> 5;
1847 	r = max_cll % 32;
1848 	max = (1 << q) * pre_computed_values[r];
1849 
1850 	// min luminance: maxLum * (CV/255)^2 / 100
1851 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1852 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1853 
1854 	caps->aux_max_input_signal = max;
1855 	caps->aux_min_input_signal = min;
1856 }
1857 
1858 void amdgpu_dm_update_connector_after_detect(
1859 		struct amdgpu_dm_connector *aconnector)
1860 {
1861 	struct drm_connector *connector = &aconnector->base;
1862 	struct drm_device *dev = connector->dev;
1863 	struct dc_sink *sink;
1864 
1865 	/* MST handled by drm_mst framework */
1866 	if (aconnector->mst_mgr.mst_state == true)
1867 		return;
1868 
1869 
1870 	sink = aconnector->dc_link->local_sink;
1871 	if (sink)
1872 		dc_sink_retain(sink);
1873 
1874 	/*
1875 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1876 	 * the connector sink is set to either fake or physical sink depends on link status.
1877 	 * Skip if already done during boot.
1878 	 */
1879 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1880 			&& aconnector->dc_em_sink) {
1881 
1882 		/*
1883 		 * For S3 resume with headless use eml_sink to fake stream
1884 		 * because on resume connector->sink is set to NULL
1885 		 */
1886 		mutex_lock(&dev->mode_config.mutex);
1887 
1888 		if (sink) {
1889 			if (aconnector->dc_sink) {
1890 				amdgpu_dm_update_freesync_caps(connector, NULL);
1891 				/*
1892 				 * retain and release below are used to
1893 				 * bump up refcount for sink because the link doesn't point
1894 				 * to it anymore after disconnect, so on next crtc to connector
1895 				 * reshuffle by UMD we will get into unwanted dc_sink release
1896 				 */
1897 				dc_sink_release(aconnector->dc_sink);
1898 			}
1899 			aconnector->dc_sink = sink;
1900 			dc_sink_retain(aconnector->dc_sink);
1901 			amdgpu_dm_update_freesync_caps(connector,
1902 					aconnector->edid);
1903 		} else {
1904 			amdgpu_dm_update_freesync_caps(connector, NULL);
1905 			if (!aconnector->dc_sink) {
1906 				aconnector->dc_sink = aconnector->dc_em_sink;
1907 				dc_sink_retain(aconnector->dc_sink);
1908 			}
1909 		}
1910 
1911 		mutex_unlock(&dev->mode_config.mutex);
1912 
1913 		if (sink)
1914 			dc_sink_release(sink);
1915 		return;
1916 	}
1917 
1918 	/*
1919 	 * TODO: temporary guard to look for proper fix
1920 	 * if this sink is MST sink, we should not do anything
1921 	 */
1922 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1923 		dc_sink_release(sink);
1924 		return;
1925 	}
1926 
1927 	if (aconnector->dc_sink == sink) {
1928 		/*
1929 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1930 		 * Do nothing!!
1931 		 */
1932 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1933 				aconnector->connector_id);
1934 		if (sink)
1935 			dc_sink_release(sink);
1936 		return;
1937 	}
1938 
1939 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1940 		aconnector->connector_id, aconnector->dc_sink, sink);
1941 
1942 	mutex_lock(&dev->mode_config.mutex);
1943 
1944 	/*
1945 	 * 1. Update status of the drm connector
1946 	 * 2. Send an event and let userspace tell us what to do
1947 	 */
1948 	if (sink) {
1949 		/*
1950 		 * TODO: check if we still need the S3 mode update workaround.
1951 		 * If yes, put it here.
1952 		 */
1953 		if (aconnector->dc_sink)
1954 			amdgpu_dm_update_freesync_caps(connector, NULL);
1955 
1956 		aconnector->dc_sink = sink;
1957 		dc_sink_retain(aconnector->dc_sink);
1958 		if (sink->dc_edid.length == 0) {
1959 			aconnector->edid = NULL;
1960 			if (aconnector->dc_link->aux_mode) {
1961 				drm_dp_cec_unset_edid(
1962 					&aconnector->dm_dp_aux.aux);
1963 			}
1964 		} else {
1965 			aconnector->edid =
1966 				(struct edid *)sink->dc_edid.raw_edid;
1967 
1968 			drm_connector_update_edid_property(connector,
1969 							   aconnector->edid);
1970 
1971 			if (aconnector->dc_link->aux_mode)
1972 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1973 						    aconnector->edid);
1974 		}
1975 
1976 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1977 		update_connector_ext_caps(aconnector);
1978 	} else {
1979 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1980 		amdgpu_dm_update_freesync_caps(connector, NULL);
1981 		drm_connector_update_edid_property(connector, NULL);
1982 		aconnector->num_modes = 0;
1983 		dc_sink_release(aconnector->dc_sink);
1984 		aconnector->dc_sink = NULL;
1985 		aconnector->edid = NULL;
1986 #ifdef CONFIG_DRM_AMD_DC_HDCP
1987 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1988 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1989 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1990 #endif
1991 	}
1992 
1993 	mutex_unlock(&dev->mode_config.mutex);
1994 
1995 	if (sink)
1996 		dc_sink_release(sink);
1997 }
1998 
1999 static void handle_hpd_irq(void *param)
2000 {
2001 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2002 	struct drm_connector *connector = &aconnector->base;
2003 	struct drm_device *dev = connector->dev;
2004 	enum dc_connection_type new_connection_type = dc_connection_none;
2005 #ifdef CONFIG_DRM_AMD_DC_HDCP
2006 	struct amdgpu_device *adev = dev->dev_private;
2007 #endif
2008 
2009 	/*
2010 	 * In case of failure or MST no need to update connector status or notify the OS
2011 	 * since (for MST case) MST does this in its own context.
2012 	 */
2013 	mutex_lock(&aconnector->hpd_lock);
2014 
2015 #ifdef CONFIG_DRM_AMD_DC_HDCP
2016 	if (adev->dm.hdcp_workqueue)
2017 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2018 #endif
2019 	if (aconnector->fake_enable)
2020 		aconnector->fake_enable = false;
2021 
2022 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2023 		DRM_ERROR("KMS: Failed to detect connector\n");
2024 
2025 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2026 		emulated_link_detect(aconnector->dc_link);
2027 
2028 
2029 		drm_modeset_lock_all(dev);
2030 		dm_restore_drm_connector_state(dev, connector);
2031 		drm_modeset_unlock_all(dev);
2032 
2033 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2034 			drm_kms_helper_hotplug_event(dev);
2035 
2036 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2037 		amdgpu_dm_update_connector_after_detect(aconnector);
2038 
2039 
2040 		drm_modeset_lock_all(dev);
2041 		dm_restore_drm_connector_state(dev, connector);
2042 		drm_modeset_unlock_all(dev);
2043 
2044 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2045 			drm_kms_helper_hotplug_event(dev);
2046 	}
2047 	mutex_unlock(&aconnector->hpd_lock);
2048 
2049 }
2050 
2051 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2052 {
2053 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2054 	uint8_t dret;
2055 	bool new_irq_handled = false;
2056 	int dpcd_addr;
2057 	int dpcd_bytes_to_read;
2058 
2059 	const int max_process_count = 30;
2060 	int process_count = 0;
2061 
2062 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2063 
2064 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2065 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2066 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2067 		dpcd_addr = DP_SINK_COUNT;
2068 	} else {
2069 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2070 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2071 		dpcd_addr = DP_SINK_COUNT_ESI;
2072 	}
2073 
2074 	dret = drm_dp_dpcd_read(
2075 		&aconnector->dm_dp_aux.aux,
2076 		dpcd_addr,
2077 		esi,
2078 		dpcd_bytes_to_read);
2079 
2080 	while (dret == dpcd_bytes_to_read &&
2081 		process_count < max_process_count) {
2082 		uint8_t retry;
2083 		dret = 0;
2084 
2085 		process_count++;
2086 
2087 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2088 		/* handle HPD short pulse irq */
2089 		if (aconnector->mst_mgr.mst_state)
2090 			drm_dp_mst_hpd_irq(
2091 				&aconnector->mst_mgr,
2092 				esi,
2093 				&new_irq_handled);
2094 
2095 		if (new_irq_handled) {
2096 			/* ACK at DPCD to notify down stream */
2097 			const int ack_dpcd_bytes_to_write =
2098 				dpcd_bytes_to_read - 1;
2099 
2100 			for (retry = 0; retry < 3; retry++) {
2101 				uint8_t wret;
2102 
2103 				wret = drm_dp_dpcd_write(
2104 					&aconnector->dm_dp_aux.aux,
2105 					dpcd_addr + 1,
2106 					&esi[1],
2107 					ack_dpcd_bytes_to_write);
2108 				if (wret == ack_dpcd_bytes_to_write)
2109 					break;
2110 			}
2111 
2112 			/* check if there is new irq to be handled */
2113 			dret = drm_dp_dpcd_read(
2114 				&aconnector->dm_dp_aux.aux,
2115 				dpcd_addr,
2116 				esi,
2117 				dpcd_bytes_to_read);
2118 
2119 			new_irq_handled = false;
2120 		} else {
2121 			break;
2122 		}
2123 	}
2124 
2125 	if (process_count == max_process_count)
2126 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2127 }
2128 
2129 static void handle_hpd_rx_irq(void *param)
2130 {
2131 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2132 	struct drm_connector *connector = &aconnector->base;
2133 	struct drm_device *dev = connector->dev;
2134 	struct dc_link *dc_link = aconnector->dc_link;
2135 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2136 	enum dc_connection_type new_connection_type = dc_connection_none;
2137 #ifdef CONFIG_DRM_AMD_DC_HDCP
2138 	union hpd_irq_data hpd_irq_data;
2139 	struct amdgpu_device *adev = dev->dev_private;
2140 
2141 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2142 #endif
2143 
2144 	/*
2145 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2146 	 * conflict, after implement i2c helper, this mutex should be
2147 	 * retired.
2148 	 */
2149 	if (dc_link->type != dc_connection_mst_branch)
2150 		mutex_lock(&aconnector->hpd_lock);
2151 
2152 
2153 #ifdef CONFIG_DRM_AMD_DC_HDCP
2154 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2155 #else
2156 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2157 #endif
2158 			!is_mst_root_connector) {
2159 		/* Downstream Port status changed. */
2160 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2161 			DRM_ERROR("KMS: Failed to detect connector\n");
2162 
2163 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2164 			emulated_link_detect(dc_link);
2165 
2166 			if (aconnector->fake_enable)
2167 				aconnector->fake_enable = false;
2168 
2169 			amdgpu_dm_update_connector_after_detect(aconnector);
2170 
2171 
2172 			drm_modeset_lock_all(dev);
2173 			dm_restore_drm_connector_state(dev, connector);
2174 			drm_modeset_unlock_all(dev);
2175 
2176 			drm_kms_helper_hotplug_event(dev);
2177 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2178 
2179 			if (aconnector->fake_enable)
2180 				aconnector->fake_enable = false;
2181 
2182 			amdgpu_dm_update_connector_after_detect(aconnector);
2183 
2184 
2185 			drm_modeset_lock_all(dev);
2186 			dm_restore_drm_connector_state(dev, connector);
2187 			drm_modeset_unlock_all(dev);
2188 
2189 			drm_kms_helper_hotplug_event(dev);
2190 		}
2191 	}
2192 #ifdef CONFIG_DRM_AMD_DC_HDCP
2193 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2194 		if (adev->dm.hdcp_workqueue)
2195 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2196 	}
2197 #endif
2198 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2199 	    (dc_link->type == dc_connection_mst_branch))
2200 		dm_handle_hpd_rx_irq(aconnector);
2201 
2202 	if (dc_link->type != dc_connection_mst_branch) {
2203 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2204 		mutex_unlock(&aconnector->hpd_lock);
2205 	}
2206 }
2207 
2208 static void register_hpd_handlers(struct amdgpu_device *adev)
2209 {
2210 	struct drm_device *dev = adev->ddev;
2211 	struct drm_connector *connector;
2212 	struct amdgpu_dm_connector *aconnector;
2213 	const struct dc_link *dc_link;
2214 	struct dc_interrupt_params int_params = {0};
2215 
2216 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2217 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2218 
2219 	list_for_each_entry(connector,
2220 			&dev->mode_config.connector_list, head)	{
2221 
2222 		aconnector = to_amdgpu_dm_connector(connector);
2223 		dc_link = aconnector->dc_link;
2224 
2225 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2226 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2227 			int_params.irq_source = dc_link->irq_source_hpd;
2228 
2229 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2230 					handle_hpd_irq,
2231 					(void *) aconnector);
2232 		}
2233 
2234 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2235 
2236 			/* Also register for DP short pulse (hpd_rx). */
2237 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2238 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2239 
2240 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2241 					handle_hpd_rx_irq,
2242 					(void *) aconnector);
2243 		}
2244 	}
2245 }
2246 
2247 /* Register IRQ sources and initialize IRQ callbacks */
2248 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2249 {
2250 	struct dc *dc = adev->dm.dc;
2251 	struct common_irq_params *c_irq_params;
2252 	struct dc_interrupt_params int_params = {0};
2253 	int r;
2254 	int i;
2255 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2256 
2257 	if (adev->asic_type >= CHIP_VEGA10)
2258 		client_id = SOC15_IH_CLIENTID_DCE;
2259 
2260 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2261 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2262 
2263 	/*
2264 	 * Actions of amdgpu_irq_add_id():
2265 	 * 1. Register a set() function with base driver.
2266 	 *    Base driver will call set() function to enable/disable an
2267 	 *    interrupt in DC hardware.
2268 	 * 2. Register amdgpu_dm_irq_handler().
2269 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2270 	 *    coming from DC hardware.
2271 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2272 	 *    for acknowledging and handling. */
2273 
2274 	/* Use VBLANK interrupt */
2275 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2276 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2277 		if (r) {
2278 			DRM_ERROR("Failed to add crtc irq id!\n");
2279 			return r;
2280 		}
2281 
2282 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2283 		int_params.irq_source =
2284 			dc_interrupt_to_irq_source(dc, i, 0);
2285 
2286 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2287 
2288 		c_irq_params->adev = adev;
2289 		c_irq_params->irq_src = int_params.irq_source;
2290 
2291 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 				dm_crtc_high_irq, c_irq_params);
2293 	}
2294 
2295 	/* Use VUPDATE interrupt */
2296 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2297 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2298 		if (r) {
2299 			DRM_ERROR("Failed to add vupdate irq id!\n");
2300 			return r;
2301 		}
2302 
2303 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2304 		int_params.irq_source =
2305 			dc_interrupt_to_irq_source(dc, i, 0);
2306 
2307 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2308 
2309 		c_irq_params->adev = adev;
2310 		c_irq_params->irq_src = int_params.irq_source;
2311 
2312 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2313 				dm_vupdate_high_irq, c_irq_params);
2314 	}
2315 
2316 	/* Use GRPH_PFLIP interrupt */
2317 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2318 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2319 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2320 		if (r) {
2321 			DRM_ERROR("Failed to add page flip irq id!\n");
2322 			return r;
2323 		}
2324 
2325 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2326 		int_params.irq_source =
2327 			dc_interrupt_to_irq_source(dc, i, 0);
2328 
2329 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2330 
2331 		c_irq_params->adev = adev;
2332 		c_irq_params->irq_src = int_params.irq_source;
2333 
2334 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2335 				dm_pflip_high_irq, c_irq_params);
2336 
2337 	}
2338 
2339 	/* HPD */
2340 	r = amdgpu_irq_add_id(adev, client_id,
2341 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2342 	if (r) {
2343 		DRM_ERROR("Failed to add hpd irq id!\n");
2344 		return r;
2345 	}
2346 
2347 	register_hpd_handlers(adev);
2348 
2349 	return 0;
2350 }
2351 
2352 #if defined(CONFIG_DRM_AMD_DC_DCN)
2353 /* Register IRQ sources and initialize IRQ callbacks */
2354 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2355 {
2356 	struct dc *dc = adev->dm.dc;
2357 	struct common_irq_params *c_irq_params;
2358 	struct dc_interrupt_params int_params = {0};
2359 	int r;
2360 	int i;
2361 
2362 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2363 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2364 
2365 	/*
2366 	 * Actions of amdgpu_irq_add_id():
2367 	 * 1. Register a set() function with base driver.
2368 	 *    Base driver will call set() function to enable/disable an
2369 	 *    interrupt in DC hardware.
2370 	 * 2. Register amdgpu_dm_irq_handler().
2371 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2372 	 *    coming from DC hardware.
2373 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2374 	 *    for acknowledging and handling.
2375 	 */
2376 
2377 	/* Use VSTARTUP interrupt */
2378 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2379 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2380 			i++) {
2381 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2382 
2383 		if (r) {
2384 			DRM_ERROR("Failed to add crtc irq id!\n");
2385 			return r;
2386 		}
2387 
2388 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2389 		int_params.irq_source =
2390 			dc_interrupt_to_irq_source(dc, i, 0);
2391 
2392 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2393 
2394 		c_irq_params->adev = adev;
2395 		c_irq_params->irq_src = int_params.irq_source;
2396 
2397 		amdgpu_dm_irq_register_interrupt(
2398 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2399 	}
2400 
2401 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2402 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2403 	 * to trigger at end of each vblank, regardless of state of the lock,
2404 	 * matching DCE behaviour.
2405 	 */
2406 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2407 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2408 	     i++) {
2409 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2410 
2411 		if (r) {
2412 			DRM_ERROR("Failed to add vupdate irq id!\n");
2413 			return r;
2414 		}
2415 
2416 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2417 		int_params.irq_source =
2418 			dc_interrupt_to_irq_source(dc, i, 0);
2419 
2420 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2421 
2422 		c_irq_params->adev = adev;
2423 		c_irq_params->irq_src = int_params.irq_source;
2424 
2425 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2426 				dm_vupdate_high_irq, c_irq_params);
2427 	}
2428 
2429 	/* Use GRPH_PFLIP interrupt */
2430 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2431 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2432 			i++) {
2433 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2434 		if (r) {
2435 			DRM_ERROR("Failed to add page flip irq id!\n");
2436 			return r;
2437 		}
2438 
2439 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 		int_params.irq_source =
2441 			dc_interrupt_to_irq_source(dc, i, 0);
2442 
2443 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2444 
2445 		c_irq_params->adev = adev;
2446 		c_irq_params->irq_src = int_params.irq_source;
2447 
2448 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449 				dm_pflip_high_irq, c_irq_params);
2450 
2451 	}
2452 
2453 	/* HPD */
2454 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2455 			&adev->hpd_irq);
2456 	if (r) {
2457 		DRM_ERROR("Failed to add hpd irq id!\n");
2458 		return r;
2459 	}
2460 
2461 	register_hpd_handlers(adev);
2462 
2463 	return 0;
2464 }
2465 #endif
2466 
2467 /*
2468  * Acquires the lock for the atomic state object and returns
2469  * the new atomic state.
2470  *
2471  * This should only be called during atomic check.
2472  */
2473 static int dm_atomic_get_state(struct drm_atomic_state *state,
2474 			       struct dm_atomic_state **dm_state)
2475 {
2476 	struct drm_device *dev = state->dev;
2477 	struct amdgpu_device *adev = dev->dev_private;
2478 	struct amdgpu_display_manager *dm = &adev->dm;
2479 	struct drm_private_state *priv_state;
2480 
2481 	if (*dm_state)
2482 		return 0;
2483 
2484 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2485 	if (IS_ERR(priv_state))
2486 		return PTR_ERR(priv_state);
2487 
2488 	*dm_state = to_dm_atomic_state(priv_state);
2489 
2490 	return 0;
2491 }
2492 
2493 struct dm_atomic_state *
2494 dm_atomic_get_new_state(struct drm_atomic_state *state)
2495 {
2496 	struct drm_device *dev = state->dev;
2497 	struct amdgpu_device *adev = dev->dev_private;
2498 	struct amdgpu_display_manager *dm = &adev->dm;
2499 	struct drm_private_obj *obj;
2500 	struct drm_private_state *new_obj_state;
2501 	int i;
2502 
2503 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2504 		if (obj->funcs == dm->atomic_obj.funcs)
2505 			return to_dm_atomic_state(new_obj_state);
2506 	}
2507 
2508 	return NULL;
2509 }
2510 
2511 struct dm_atomic_state *
2512 dm_atomic_get_old_state(struct drm_atomic_state *state)
2513 {
2514 	struct drm_device *dev = state->dev;
2515 	struct amdgpu_device *adev = dev->dev_private;
2516 	struct amdgpu_display_manager *dm = &adev->dm;
2517 	struct drm_private_obj *obj;
2518 	struct drm_private_state *old_obj_state;
2519 	int i;
2520 
2521 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2522 		if (obj->funcs == dm->atomic_obj.funcs)
2523 			return to_dm_atomic_state(old_obj_state);
2524 	}
2525 
2526 	return NULL;
2527 }
2528 
2529 static struct drm_private_state *
2530 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2531 {
2532 	struct dm_atomic_state *old_state, *new_state;
2533 
2534 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2535 	if (!new_state)
2536 		return NULL;
2537 
2538 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2539 
2540 	old_state = to_dm_atomic_state(obj->state);
2541 
2542 	if (old_state && old_state->context)
2543 		new_state->context = dc_copy_state(old_state->context);
2544 
2545 	if (!new_state->context) {
2546 		kfree(new_state);
2547 		return NULL;
2548 	}
2549 
2550 	return &new_state->base;
2551 }
2552 
2553 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2554 				    struct drm_private_state *state)
2555 {
2556 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2557 
2558 	if (dm_state && dm_state->context)
2559 		dc_release_state(dm_state->context);
2560 
2561 	kfree(dm_state);
2562 }
2563 
2564 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2565 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2566 	.atomic_destroy_state = dm_atomic_destroy_state,
2567 };
2568 
2569 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2570 {
2571 	struct dm_atomic_state *state;
2572 	int r;
2573 
2574 	adev->mode_info.mode_config_initialized = true;
2575 
2576 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2577 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2578 
2579 	adev->ddev->mode_config.max_width = 16384;
2580 	adev->ddev->mode_config.max_height = 16384;
2581 
2582 	adev->ddev->mode_config.preferred_depth = 24;
2583 	adev->ddev->mode_config.prefer_shadow = 1;
2584 	/* indicates support for immediate flip */
2585 	adev->ddev->mode_config.async_page_flip = true;
2586 
2587 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2588 
2589 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2590 	if (!state)
2591 		return -ENOMEM;
2592 
2593 	state->context = dc_create_state(adev->dm.dc);
2594 	if (!state->context) {
2595 		kfree(state);
2596 		return -ENOMEM;
2597 	}
2598 
2599 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2600 
2601 	drm_atomic_private_obj_init(adev->ddev,
2602 				    &adev->dm.atomic_obj,
2603 				    &state->base,
2604 				    &dm_atomic_state_funcs);
2605 
2606 	r = amdgpu_display_modeset_create_props(adev);
2607 	if (r)
2608 		return r;
2609 
2610 	r = amdgpu_dm_audio_init(adev);
2611 	if (r)
2612 		return r;
2613 
2614 	return 0;
2615 }
2616 
2617 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2618 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2619 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2620 
2621 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2622 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2623 
2624 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2625 {
2626 #if defined(CONFIG_ACPI)
2627 	struct amdgpu_dm_backlight_caps caps;
2628 
2629 	if (dm->backlight_caps.caps_valid)
2630 		return;
2631 
2632 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2633 	if (caps.caps_valid) {
2634 		dm->backlight_caps.caps_valid = true;
2635 		if (caps.aux_support)
2636 			return;
2637 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2638 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2639 	} else {
2640 		dm->backlight_caps.min_input_signal =
2641 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2642 		dm->backlight_caps.max_input_signal =
2643 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2644 	}
2645 #else
2646 	if (dm->backlight_caps.aux_support)
2647 		return;
2648 
2649 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2650 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2651 #endif
2652 }
2653 
2654 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2655 {
2656 	bool rc;
2657 
2658 	if (!link)
2659 		return 1;
2660 
2661 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2662 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2663 
2664 	return rc ? 0 : 1;
2665 }
2666 
2667 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2668 			      const uint32_t user_brightness)
2669 {
2670 	u32 min, max, conversion_pace;
2671 	u32 brightness = user_brightness;
2672 
2673 	if (!caps)
2674 		goto out;
2675 
2676 	if (!caps->aux_support) {
2677 		max = caps->max_input_signal;
2678 		min = caps->min_input_signal;
2679 		/*
2680 		 * The brightness input is in the range 0-255
2681 		 * It needs to be rescaled to be between the
2682 		 * requested min and max input signal
2683 		 * It also needs to be scaled up by 0x101 to
2684 		 * match the DC interface which has a range of
2685 		 * 0 to 0xffff
2686 		 */
2687 		conversion_pace = 0x101;
2688 		brightness =
2689 			user_brightness
2690 			* conversion_pace
2691 			* (max - min)
2692 			/ AMDGPU_MAX_BL_LEVEL
2693 			+ min * conversion_pace;
2694 	} else {
2695 		/* TODO
2696 		 * We are doing a linear interpolation here, which is OK but
2697 		 * does not provide the optimal result. We probably want
2698 		 * something close to the Perceptual Quantizer (PQ) curve.
2699 		 */
2700 		max = caps->aux_max_input_signal;
2701 		min = caps->aux_min_input_signal;
2702 
2703 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2704 			       + user_brightness * max;
2705 		// Multiple the value by 1000 since we use millinits
2706 		brightness *= 1000;
2707 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2708 	}
2709 
2710 out:
2711 	return brightness;
2712 }
2713 
2714 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2715 {
2716 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2717 	struct amdgpu_dm_backlight_caps caps;
2718 	struct dc_link *link = NULL;
2719 	u32 brightness;
2720 	bool rc;
2721 
2722 	amdgpu_dm_update_backlight_caps(dm);
2723 	caps = dm->backlight_caps;
2724 
2725 	link = (struct dc_link *)dm->backlight_link;
2726 
2727 	brightness = convert_brightness(&caps, bd->props.brightness);
2728 	// Change brightness based on AUX property
2729 	if (caps.aux_support)
2730 		return set_backlight_via_aux(link, brightness);
2731 
2732 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2733 
2734 	return rc ? 0 : 1;
2735 }
2736 
2737 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2738 {
2739 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2740 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2741 
2742 	if (ret == DC_ERROR_UNEXPECTED)
2743 		return bd->props.brightness;
2744 	return ret;
2745 }
2746 
2747 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2748 	.options = BL_CORE_SUSPENDRESUME,
2749 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2750 	.update_status	= amdgpu_dm_backlight_update_status,
2751 };
2752 
2753 static void
2754 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2755 {
2756 	char bl_name[16];
2757 	struct backlight_properties props = { 0 };
2758 
2759 	amdgpu_dm_update_backlight_caps(dm);
2760 
2761 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2762 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2763 	props.type = BACKLIGHT_RAW;
2764 
2765 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2766 			dm->adev->ddev->primary->index);
2767 
2768 	dm->backlight_dev = backlight_device_register(bl_name,
2769 			dm->adev->ddev->dev,
2770 			dm,
2771 			&amdgpu_dm_backlight_ops,
2772 			&props);
2773 
2774 	if (IS_ERR(dm->backlight_dev))
2775 		DRM_ERROR("DM: Backlight registration failed!\n");
2776 	else
2777 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2778 }
2779 
2780 #endif
2781 
2782 static int initialize_plane(struct amdgpu_display_manager *dm,
2783 			    struct amdgpu_mode_info *mode_info, int plane_id,
2784 			    enum drm_plane_type plane_type,
2785 			    const struct dc_plane_cap *plane_cap)
2786 {
2787 	struct drm_plane *plane;
2788 	unsigned long possible_crtcs;
2789 	int ret = 0;
2790 
2791 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2792 	if (!plane) {
2793 		DRM_ERROR("KMS: Failed to allocate plane\n");
2794 		return -ENOMEM;
2795 	}
2796 	plane->type = plane_type;
2797 
2798 	/*
2799 	 * HACK: IGT tests expect that the primary plane for a CRTC
2800 	 * can only have one possible CRTC. Only expose support for
2801 	 * any CRTC if they're not going to be used as a primary plane
2802 	 * for a CRTC - like overlay or underlay planes.
2803 	 */
2804 	possible_crtcs = 1 << plane_id;
2805 	if (plane_id >= dm->dc->caps.max_streams)
2806 		possible_crtcs = 0xff;
2807 
2808 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2809 
2810 	if (ret) {
2811 		DRM_ERROR("KMS: Failed to initialize plane\n");
2812 		kfree(plane);
2813 		return ret;
2814 	}
2815 
2816 	if (mode_info)
2817 		mode_info->planes[plane_id] = plane;
2818 
2819 	return ret;
2820 }
2821 
2822 
2823 static void register_backlight_device(struct amdgpu_display_manager *dm,
2824 				      struct dc_link *link)
2825 {
2826 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2827 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2828 
2829 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2830 	    link->type != dc_connection_none) {
2831 		/*
2832 		 * Event if registration failed, we should continue with
2833 		 * DM initialization because not having a backlight control
2834 		 * is better then a black screen.
2835 		 */
2836 		amdgpu_dm_register_backlight_device(dm);
2837 
2838 		if (dm->backlight_dev)
2839 			dm->backlight_link = link;
2840 	}
2841 #endif
2842 }
2843 
2844 
2845 /*
2846  * In this architecture, the association
2847  * connector -> encoder -> crtc
2848  * id not really requried. The crtc and connector will hold the
2849  * display_index as an abstraction to use with DAL component
2850  *
2851  * Returns 0 on success
2852  */
2853 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2854 {
2855 	struct amdgpu_display_manager *dm = &adev->dm;
2856 	int32_t i;
2857 	struct amdgpu_dm_connector *aconnector = NULL;
2858 	struct amdgpu_encoder *aencoder = NULL;
2859 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2860 	uint32_t link_cnt;
2861 	int32_t primary_planes;
2862 	enum dc_connection_type new_connection_type = dc_connection_none;
2863 	const struct dc_plane_cap *plane;
2864 
2865 	link_cnt = dm->dc->caps.max_links;
2866 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2867 		DRM_ERROR("DM: Failed to initialize mode config\n");
2868 		return -EINVAL;
2869 	}
2870 
2871 	/* There is one primary plane per CRTC */
2872 	primary_planes = dm->dc->caps.max_streams;
2873 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2874 
2875 	/*
2876 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2877 	 * Order is reversed to match iteration order in atomic check.
2878 	 */
2879 	for (i = (primary_planes - 1); i >= 0; i--) {
2880 		plane = &dm->dc->caps.planes[i];
2881 
2882 		if (initialize_plane(dm, mode_info, i,
2883 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2884 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2885 			goto fail;
2886 		}
2887 	}
2888 
2889 	/*
2890 	 * Initialize overlay planes, index starting after primary planes.
2891 	 * These planes have a higher DRM index than the primary planes since
2892 	 * they should be considered as having a higher z-order.
2893 	 * Order is reversed to match iteration order in atomic check.
2894 	 *
2895 	 * Only support DCN for now, and only expose one so we don't encourage
2896 	 * userspace to use up all the pipes.
2897 	 */
2898 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2899 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2900 
2901 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2902 			continue;
2903 
2904 		if (!plane->blends_with_above || !plane->blends_with_below)
2905 			continue;
2906 
2907 		if (!plane->pixel_format_support.argb8888)
2908 			continue;
2909 
2910 		if (initialize_plane(dm, NULL, primary_planes + i,
2911 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2912 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2913 			goto fail;
2914 		}
2915 
2916 		/* Only create one overlay plane. */
2917 		break;
2918 	}
2919 
2920 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2921 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2922 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2923 			goto fail;
2924 		}
2925 
2926 	dm->display_indexes_num = dm->dc->caps.max_streams;
2927 
2928 	/* loops over all connectors on the board */
2929 	for (i = 0; i < link_cnt; i++) {
2930 		struct dc_link *link = NULL;
2931 
2932 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2933 			DRM_ERROR(
2934 				"KMS: Cannot support more than %d display indexes\n",
2935 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2936 			continue;
2937 		}
2938 
2939 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2940 		if (!aconnector)
2941 			goto fail;
2942 
2943 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2944 		if (!aencoder)
2945 			goto fail;
2946 
2947 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2948 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2949 			goto fail;
2950 		}
2951 
2952 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2953 			DRM_ERROR("KMS: Failed to initialize connector\n");
2954 			goto fail;
2955 		}
2956 
2957 		link = dc_get_link_at_index(dm->dc, i);
2958 
2959 		if (!dc_link_detect_sink(link, &new_connection_type))
2960 			DRM_ERROR("KMS: Failed to detect connector\n");
2961 
2962 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2963 			emulated_link_detect(link);
2964 			amdgpu_dm_update_connector_after_detect(aconnector);
2965 
2966 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2967 			amdgpu_dm_update_connector_after_detect(aconnector);
2968 			register_backlight_device(dm, link);
2969 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2970 				amdgpu_dm_set_psr_caps(link);
2971 		}
2972 
2973 
2974 	}
2975 
2976 	/* Software is initialized. Now we can register interrupt handlers. */
2977 	switch (adev->asic_type) {
2978 	case CHIP_BONAIRE:
2979 	case CHIP_HAWAII:
2980 	case CHIP_KAVERI:
2981 	case CHIP_KABINI:
2982 	case CHIP_MULLINS:
2983 	case CHIP_TONGA:
2984 	case CHIP_FIJI:
2985 	case CHIP_CARRIZO:
2986 	case CHIP_STONEY:
2987 	case CHIP_POLARIS11:
2988 	case CHIP_POLARIS10:
2989 	case CHIP_POLARIS12:
2990 	case CHIP_VEGAM:
2991 	case CHIP_VEGA10:
2992 	case CHIP_VEGA12:
2993 	case CHIP_VEGA20:
2994 		if (dce110_register_irq_handlers(dm->adev)) {
2995 			DRM_ERROR("DM: Failed to initialize IRQ\n");
2996 			goto fail;
2997 		}
2998 		break;
2999 #if defined(CONFIG_DRM_AMD_DC_DCN)
3000 	case CHIP_RAVEN:
3001 	case CHIP_NAVI12:
3002 	case CHIP_NAVI10:
3003 	case CHIP_NAVI14:
3004 	case CHIP_RENOIR:
3005 		if (dcn10_register_irq_handlers(dm->adev)) {
3006 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3007 			goto fail;
3008 		}
3009 		break;
3010 #endif
3011 	default:
3012 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3013 		goto fail;
3014 	}
3015 
3016 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3017 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3018 
3019 	/* No userspace support. */
3020 	dm->dc->debug.disable_tri_buf = true;
3021 
3022 	return 0;
3023 fail:
3024 	kfree(aencoder);
3025 	kfree(aconnector);
3026 
3027 	return -EINVAL;
3028 }
3029 
3030 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3031 {
3032 	drm_mode_config_cleanup(dm->ddev);
3033 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3034 	return;
3035 }
3036 
3037 /******************************************************************************
3038  * amdgpu_display_funcs functions
3039  *****************************************************************************/
3040 
3041 /*
3042  * dm_bandwidth_update - program display watermarks
3043  *
3044  * @adev: amdgpu_device pointer
3045  *
3046  * Calculate and program the display watermarks and line buffer allocation.
3047  */
3048 static void dm_bandwidth_update(struct amdgpu_device *adev)
3049 {
3050 	/* TODO: implement later */
3051 }
3052 
3053 static const struct amdgpu_display_funcs dm_display_funcs = {
3054 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3055 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3056 	.backlight_set_level = NULL, /* never called for DC */
3057 	.backlight_get_level = NULL, /* never called for DC */
3058 	.hpd_sense = NULL,/* called unconditionally */
3059 	.hpd_set_polarity = NULL, /* called unconditionally */
3060 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3061 	.page_flip_get_scanoutpos =
3062 		dm_crtc_get_scanoutpos,/* called unconditionally */
3063 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3064 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3065 };
3066 
3067 #if defined(CONFIG_DEBUG_KERNEL_DC)
3068 
3069 static ssize_t s3_debug_store(struct device *device,
3070 			      struct device_attribute *attr,
3071 			      const char *buf,
3072 			      size_t count)
3073 {
3074 	int ret;
3075 	int s3_state;
3076 	struct drm_device *drm_dev = dev_get_drvdata(device);
3077 	struct amdgpu_device *adev = drm_dev->dev_private;
3078 
3079 	ret = kstrtoint(buf, 0, &s3_state);
3080 
3081 	if (ret == 0) {
3082 		if (s3_state) {
3083 			dm_resume(adev);
3084 			drm_kms_helper_hotplug_event(adev->ddev);
3085 		} else
3086 			dm_suspend(adev);
3087 	}
3088 
3089 	return ret == 0 ? count : 0;
3090 }
3091 
3092 DEVICE_ATTR_WO(s3_debug);
3093 
3094 #endif
3095 
3096 static int dm_early_init(void *handle)
3097 {
3098 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3099 
3100 	switch (adev->asic_type) {
3101 	case CHIP_BONAIRE:
3102 	case CHIP_HAWAII:
3103 		adev->mode_info.num_crtc = 6;
3104 		adev->mode_info.num_hpd = 6;
3105 		adev->mode_info.num_dig = 6;
3106 		break;
3107 	case CHIP_KAVERI:
3108 		adev->mode_info.num_crtc = 4;
3109 		adev->mode_info.num_hpd = 6;
3110 		adev->mode_info.num_dig = 7;
3111 		break;
3112 	case CHIP_KABINI:
3113 	case CHIP_MULLINS:
3114 		adev->mode_info.num_crtc = 2;
3115 		adev->mode_info.num_hpd = 6;
3116 		adev->mode_info.num_dig = 6;
3117 		break;
3118 	case CHIP_FIJI:
3119 	case CHIP_TONGA:
3120 		adev->mode_info.num_crtc = 6;
3121 		adev->mode_info.num_hpd = 6;
3122 		adev->mode_info.num_dig = 7;
3123 		break;
3124 	case CHIP_CARRIZO:
3125 		adev->mode_info.num_crtc = 3;
3126 		adev->mode_info.num_hpd = 6;
3127 		adev->mode_info.num_dig = 9;
3128 		break;
3129 	case CHIP_STONEY:
3130 		adev->mode_info.num_crtc = 2;
3131 		adev->mode_info.num_hpd = 6;
3132 		adev->mode_info.num_dig = 9;
3133 		break;
3134 	case CHIP_POLARIS11:
3135 	case CHIP_POLARIS12:
3136 		adev->mode_info.num_crtc = 5;
3137 		adev->mode_info.num_hpd = 5;
3138 		adev->mode_info.num_dig = 5;
3139 		break;
3140 	case CHIP_POLARIS10:
3141 	case CHIP_VEGAM:
3142 		adev->mode_info.num_crtc = 6;
3143 		adev->mode_info.num_hpd = 6;
3144 		adev->mode_info.num_dig = 6;
3145 		break;
3146 	case CHIP_VEGA10:
3147 	case CHIP_VEGA12:
3148 	case CHIP_VEGA20:
3149 		adev->mode_info.num_crtc = 6;
3150 		adev->mode_info.num_hpd = 6;
3151 		adev->mode_info.num_dig = 6;
3152 		break;
3153 #if defined(CONFIG_DRM_AMD_DC_DCN)
3154 	case CHIP_RAVEN:
3155 		adev->mode_info.num_crtc = 4;
3156 		adev->mode_info.num_hpd = 4;
3157 		adev->mode_info.num_dig = 4;
3158 		break;
3159 #endif
3160 	case CHIP_NAVI10:
3161 	case CHIP_NAVI12:
3162 		adev->mode_info.num_crtc = 6;
3163 		adev->mode_info.num_hpd = 6;
3164 		adev->mode_info.num_dig = 6;
3165 		break;
3166 	case CHIP_NAVI14:
3167 		adev->mode_info.num_crtc = 5;
3168 		adev->mode_info.num_hpd = 5;
3169 		adev->mode_info.num_dig = 5;
3170 		break;
3171 	case CHIP_RENOIR:
3172 		adev->mode_info.num_crtc = 4;
3173 		adev->mode_info.num_hpd = 4;
3174 		adev->mode_info.num_dig = 4;
3175 		break;
3176 	default:
3177 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3178 		return -EINVAL;
3179 	}
3180 
3181 	amdgpu_dm_set_irq_funcs(adev);
3182 
3183 	if (adev->mode_info.funcs == NULL)
3184 		adev->mode_info.funcs = &dm_display_funcs;
3185 
3186 	/*
3187 	 * Note: Do NOT change adev->audio_endpt_rreg and
3188 	 * adev->audio_endpt_wreg because they are initialised in
3189 	 * amdgpu_device_init()
3190 	 */
3191 #if defined(CONFIG_DEBUG_KERNEL_DC)
3192 	device_create_file(
3193 		adev->ddev->dev,
3194 		&dev_attr_s3_debug);
3195 #endif
3196 
3197 	return 0;
3198 }
3199 
3200 static bool modeset_required(struct drm_crtc_state *crtc_state,
3201 			     struct dc_stream_state *new_stream,
3202 			     struct dc_stream_state *old_stream)
3203 {
3204 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3205 		return false;
3206 
3207 	if (!crtc_state->enable)
3208 		return false;
3209 
3210 	return crtc_state->active;
3211 }
3212 
3213 static bool modereset_required(struct drm_crtc_state *crtc_state)
3214 {
3215 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3216 		return false;
3217 
3218 	return !crtc_state->enable || !crtc_state->active;
3219 }
3220 
3221 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3222 {
3223 	drm_encoder_cleanup(encoder);
3224 	kfree(encoder);
3225 }
3226 
3227 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3228 	.destroy = amdgpu_dm_encoder_destroy,
3229 };
3230 
3231 
3232 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3233 				struct dc_scaling_info *scaling_info)
3234 {
3235 	int scale_w, scale_h;
3236 
3237 	memset(scaling_info, 0, sizeof(*scaling_info));
3238 
3239 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3240 	scaling_info->src_rect.x = state->src_x >> 16;
3241 	scaling_info->src_rect.y = state->src_y >> 16;
3242 
3243 	scaling_info->src_rect.width = state->src_w >> 16;
3244 	if (scaling_info->src_rect.width == 0)
3245 		return -EINVAL;
3246 
3247 	scaling_info->src_rect.height = state->src_h >> 16;
3248 	if (scaling_info->src_rect.height == 0)
3249 		return -EINVAL;
3250 
3251 	scaling_info->dst_rect.x = state->crtc_x;
3252 	scaling_info->dst_rect.y = state->crtc_y;
3253 
3254 	if (state->crtc_w == 0)
3255 		return -EINVAL;
3256 
3257 	scaling_info->dst_rect.width = state->crtc_w;
3258 
3259 	if (state->crtc_h == 0)
3260 		return -EINVAL;
3261 
3262 	scaling_info->dst_rect.height = state->crtc_h;
3263 
3264 	/* DRM doesn't specify clipping on destination output. */
3265 	scaling_info->clip_rect = scaling_info->dst_rect;
3266 
3267 	/* TODO: Validate scaling per-format with DC plane caps */
3268 	scale_w = scaling_info->dst_rect.width * 1000 /
3269 		  scaling_info->src_rect.width;
3270 
3271 	if (scale_w < 250 || scale_w > 16000)
3272 		return -EINVAL;
3273 
3274 	scale_h = scaling_info->dst_rect.height * 1000 /
3275 		  scaling_info->src_rect.height;
3276 
3277 	if (scale_h < 250 || scale_h > 16000)
3278 		return -EINVAL;
3279 
3280 	/*
3281 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3282 	 * assume reasonable defaults based on the format.
3283 	 */
3284 
3285 	return 0;
3286 }
3287 
3288 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3289 		       uint64_t *tiling_flags)
3290 {
3291 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3292 	int r = amdgpu_bo_reserve(rbo, false);
3293 
3294 	if (unlikely(r)) {
3295 		/* Don't show error message when returning -ERESTARTSYS */
3296 		if (r != -ERESTARTSYS)
3297 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3298 		return r;
3299 	}
3300 
3301 	if (tiling_flags)
3302 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3303 
3304 	amdgpu_bo_unreserve(rbo);
3305 
3306 	return r;
3307 }
3308 
3309 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3310 {
3311 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3312 
3313 	return offset ? (address + offset * 256) : 0;
3314 }
3315 
3316 static int
3317 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3318 			  const struct amdgpu_framebuffer *afb,
3319 			  const enum surface_pixel_format format,
3320 			  const enum dc_rotation_angle rotation,
3321 			  const struct plane_size *plane_size,
3322 			  const union dc_tiling_info *tiling_info,
3323 			  const uint64_t info,
3324 			  struct dc_plane_dcc_param *dcc,
3325 			  struct dc_plane_address *address,
3326 			  bool force_disable_dcc)
3327 {
3328 	struct dc *dc = adev->dm.dc;
3329 	struct dc_dcc_surface_param input;
3330 	struct dc_surface_dcc_cap output;
3331 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3332 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3333 	uint64_t dcc_address;
3334 
3335 	memset(&input, 0, sizeof(input));
3336 	memset(&output, 0, sizeof(output));
3337 
3338 	if (force_disable_dcc)
3339 		return 0;
3340 
3341 	if (!offset)
3342 		return 0;
3343 
3344 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3345 		return 0;
3346 
3347 	if (!dc->cap_funcs.get_dcc_compression_cap)
3348 		return -EINVAL;
3349 
3350 	input.format = format;
3351 	input.surface_size.width = plane_size->surface_size.width;
3352 	input.surface_size.height = plane_size->surface_size.height;
3353 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3354 
3355 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3356 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3357 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3358 		input.scan = SCAN_DIRECTION_VERTICAL;
3359 
3360 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3361 		return -EINVAL;
3362 
3363 	if (!output.capable)
3364 		return -EINVAL;
3365 
3366 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3367 		return -EINVAL;
3368 
3369 	dcc->enable = 1;
3370 	dcc->meta_pitch =
3371 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3372 	dcc->independent_64b_blks = i64b;
3373 
3374 	dcc_address = get_dcc_address(afb->address, info);
3375 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3376 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3377 
3378 	return 0;
3379 }
3380 
3381 static int
3382 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3383 			     const struct amdgpu_framebuffer *afb,
3384 			     const enum surface_pixel_format format,
3385 			     const enum dc_rotation_angle rotation,
3386 			     const uint64_t tiling_flags,
3387 			     union dc_tiling_info *tiling_info,
3388 			     struct plane_size *plane_size,
3389 			     struct dc_plane_dcc_param *dcc,
3390 			     struct dc_plane_address *address,
3391 			     bool force_disable_dcc)
3392 {
3393 	const struct drm_framebuffer *fb = &afb->base;
3394 	int ret;
3395 
3396 	memset(tiling_info, 0, sizeof(*tiling_info));
3397 	memset(plane_size, 0, sizeof(*plane_size));
3398 	memset(dcc, 0, sizeof(*dcc));
3399 	memset(address, 0, sizeof(*address));
3400 
3401 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3402 		plane_size->surface_size.x = 0;
3403 		plane_size->surface_size.y = 0;
3404 		plane_size->surface_size.width = fb->width;
3405 		plane_size->surface_size.height = fb->height;
3406 		plane_size->surface_pitch =
3407 			fb->pitches[0] / fb->format->cpp[0];
3408 
3409 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3410 		address->grph.addr.low_part = lower_32_bits(afb->address);
3411 		address->grph.addr.high_part = upper_32_bits(afb->address);
3412 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3413 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3414 
3415 		plane_size->surface_size.x = 0;
3416 		plane_size->surface_size.y = 0;
3417 		plane_size->surface_size.width = fb->width;
3418 		plane_size->surface_size.height = fb->height;
3419 		plane_size->surface_pitch =
3420 			fb->pitches[0] / fb->format->cpp[0];
3421 
3422 		plane_size->chroma_size.x = 0;
3423 		plane_size->chroma_size.y = 0;
3424 		/* TODO: set these based on surface format */
3425 		plane_size->chroma_size.width = fb->width / 2;
3426 		plane_size->chroma_size.height = fb->height / 2;
3427 
3428 		plane_size->chroma_pitch =
3429 			fb->pitches[1] / fb->format->cpp[1];
3430 
3431 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3432 		address->video_progressive.luma_addr.low_part =
3433 			lower_32_bits(afb->address);
3434 		address->video_progressive.luma_addr.high_part =
3435 			upper_32_bits(afb->address);
3436 		address->video_progressive.chroma_addr.low_part =
3437 			lower_32_bits(chroma_addr);
3438 		address->video_progressive.chroma_addr.high_part =
3439 			upper_32_bits(chroma_addr);
3440 	}
3441 
3442 	/* Fill GFX8 params */
3443 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3444 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3445 
3446 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3447 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3448 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3449 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3450 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3451 
3452 		/* XXX fix me for VI */
3453 		tiling_info->gfx8.num_banks = num_banks;
3454 		tiling_info->gfx8.array_mode =
3455 				DC_ARRAY_2D_TILED_THIN1;
3456 		tiling_info->gfx8.tile_split = tile_split;
3457 		tiling_info->gfx8.bank_width = bankw;
3458 		tiling_info->gfx8.bank_height = bankh;
3459 		tiling_info->gfx8.tile_aspect = mtaspect;
3460 		tiling_info->gfx8.tile_mode =
3461 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3462 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3463 			== DC_ARRAY_1D_TILED_THIN1) {
3464 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3465 	}
3466 
3467 	tiling_info->gfx8.pipe_config =
3468 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3469 
3470 	if (adev->asic_type == CHIP_VEGA10 ||
3471 	    adev->asic_type == CHIP_VEGA12 ||
3472 	    adev->asic_type == CHIP_VEGA20 ||
3473 	    adev->asic_type == CHIP_NAVI10 ||
3474 	    adev->asic_type == CHIP_NAVI14 ||
3475 	    adev->asic_type == CHIP_NAVI12 ||
3476 	    adev->asic_type == CHIP_RENOIR ||
3477 	    adev->asic_type == CHIP_RAVEN) {
3478 		/* Fill GFX9 params */
3479 		tiling_info->gfx9.num_pipes =
3480 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3481 		tiling_info->gfx9.num_banks =
3482 			adev->gfx.config.gb_addr_config_fields.num_banks;
3483 		tiling_info->gfx9.pipe_interleave =
3484 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3485 		tiling_info->gfx9.num_shader_engines =
3486 			adev->gfx.config.gb_addr_config_fields.num_se;
3487 		tiling_info->gfx9.max_compressed_frags =
3488 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3489 		tiling_info->gfx9.num_rb_per_se =
3490 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3491 		tiling_info->gfx9.swizzle =
3492 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3493 		tiling_info->gfx9.shaderEnable = 1;
3494 
3495 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3496 						plane_size, tiling_info,
3497 						tiling_flags, dcc, address,
3498 						force_disable_dcc);
3499 		if (ret)
3500 			return ret;
3501 	}
3502 
3503 	return 0;
3504 }
3505 
3506 static void
3507 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3508 			       bool *per_pixel_alpha, bool *global_alpha,
3509 			       int *global_alpha_value)
3510 {
3511 	*per_pixel_alpha = false;
3512 	*global_alpha = false;
3513 	*global_alpha_value = 0xff;
3514 
3515 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3516 		return;
3517 
3518 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3519 		static const uint32_t alpha_formats[] = {
3520 			DRM_FORMAT_ARGB8888,
3521 			DRM_FORMAT_RGBA8888,
3522 			DRM_FORMAT_ABGR8888,
3523 		};
3524 		uint32_t format = plane_state->fb->format->format;
3525 		unsigned int i;
3526 
3527 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3528 			if (format == alpha_formats[i]) {
3529 				*per_pixel_alpha = true;
3530 				break;
3531 			}
3532 		}
3533 	}
3534 
3535 	if (plane_state->alpha < 0xffff) {
3536 		*global_alpha = true;
3537 		*global_alpha_value = plane_state->alpha >> 8;
3538 	}
3539 }
3540 
3541 static int
3542 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3543 			    const enum surface_pixel_format format,
3544 			    enum dc_color_space *color_space)
3545 {
3546 	bool full_range;
3547 
3548 	*color_space = COLOR_SPACE_SRGB;
3549 
3550 	/* DRM color properties only affect non-RGB formats. */
3551 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3552 		return 0;
3553 
3554 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3555 
3556 	switch (plane_state->color_encoding) {
3557 	case DRM_COLOR_YCBCR_BT601:
3558 		if (full_range)
3559 			*color_space = COLOR_SPACE_YCBCR601;
3560 		else
3561 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3562 		break;
3563 
3564 	case DRM_COLOR_YCBCR_BT709:
3565 		if (full_range)
3566 			*color_space = COLOR_SPACE_YCBCR709;
3567 		else
3568 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3569 		break;
3570 
3571 	case DRM_COLOR_YCBCR_BT2020:
3572 		if (full_range)
3573 			*color_space = COLOR_SPACE_2020_YCBCR;
3574 		else
3575 			return -EINVAL;
3576 		break;
3577 
3578 	default:
3579 		return -EINVAL;
3580 	}
3581 
3582 	return 0;
3583 }
3584 
3585 static int
3586 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3587 			    const struct drm_plane_state *plane_state,
3588 			    const uint64_t tiling_flags,
3589 			    struct dc_plane_info *plane_info,
3590 			    struct dc_plane_address *address,
3591 			    bool force_disable_dcc)
3592 {
3593 	const struct drm_framebuffer *fb = plane_state->fb;
3594 	const struct amdgpu_framebuffer *afb =
3595 		to_amdgpu_framebuffer(plane_state->fb);
3596 	struct drm_format_name_buf format_name;
3597 	int ret;
3598 
3599 	memset(plane_info, 0, sizeof(*plane_info));
3600 
3601 	switch (fb->format->format) {
3602 	case DRM_FORMAT_C8:
3603 		plane_info->format =
3604 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3605 		break;
3606 	case DRM_FORMAT_RGB565:
3607 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3608 		break;
3609 	case DRM_FORMAT_XRGB8888:
3610 	case DRM_FORMAT_ARGB8888:
3611 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3612 		break;
3613 	case DRM_FORMAT_XRGB2101010:
3614 	case DRM_FORMAT_ARGB2101010:
3615 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3616 		break;
3617 	case DRM_FORMAT_XBGR2101010:
3618 	case DRM_FORMAT_ABGR2101010:
3619 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3620 		break;
3621 	case DRM_FORMAT_XBGR8888:
3622 	case DRM_FORMAT_ABGR8888:
3623 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3624 		break;
3625 	case DRM_FORMAT_NV21:
3626 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3627 		break;
3628 	case DRM_FORMAT_NV12:
3629 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3630 		break;
3631 	case DRM_FORMAT_P010:
3632 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3633 		break;
3634 	default:
3635 		DRM_ERROR(
3636 			"Unsupported screen format %s\n",
3637 			drm_get_format_name(fb->format->format, &format_name));
3638 		return -EINVAL;
3639 	}
3640 
3641 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3642 	case DRM_MODE_ROTATE_0:
3643 		plane_info->rotation = ROTATION_ANGLE_0;
3644 		break;
3645 	case DRM_MODE_ROTATE_90:
3646 		plane_info->rotation = ROTATION_ANGLE_90;
3647 		break;
3648 	case DRM_MODE_ROTATE_180:
3649 		plane_info->rotation = ROTATION_ANGLE_180;
3650 		break;
3651 	case DRM_MODE_ROTATE_270:
3652 		plane_info->rotation = ROTATION_ANGLE_270;
3653 		break;
3654 	default:
3655 		plane_info->rotation = ROTATION_ANGLE_0;
3656 		break;
3657 	}
3658 
3659 	plane_info->visible = true;
3660 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3661 
3662 	plane_info->layer_index = 0;
3663 
3664 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3665 					  &plane_info->color_space);
3666 	if (ret)
3667 		return ret;
3668 
3669 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3670 					   plane_info->rotation, tiling_flags,
3671 					   &plane_info->tiling_info,
3672 					   &plane_info->plane_size,
3673 					   &plane_info->dcc, address,
3674 					   force_disable_dcc);
3675 	if (ret)
3676 		return ret;
3677 
3678 	fill_blending_from_plane_state(
3679 		plane_state, &plane_info->per_pixel_alpha,
3680 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3681 
3682 	return 0;
3683 }
3684 
3685 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3686 				    struct dc_plane_state *dc_plane_state,
3687 				    struct drm_plane_state *plane_state,
3688 				    struct drm_crtc_state *crtc_state)
3689 {
3690 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3691 	const struct amdgpu_framebuffer *amdgpu_fb =
3692 		to_amdgpu_framebuffer(plane_state->fb);
3693 	struct dc_scaling_info scaling_info;
3694 	struct dc_plane_info plane_info;
3695 	uint64_t tiling_flags;
3696 	int ret;
3697 	bool force_disable_dcc = false;
3698 
3699 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3700 	if (ret)
3701 		return ret;
3702 
3703 	dc_plane_state->src_rect = scaling_info.src_rect;
3704 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3705 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3706 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3707 
3708 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3709 	if (ret)
3710 		return ret;
3711 
3712 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3713 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3714 					  &plane_info,
3715 					  &dc_plane_state->address,
3716 					  force_disable_dcc);
3717 	if (ret)
3718 		return ret;
3719 
3720 	dc_plane_state->format = plane_info.format;
3721 	dc_plane_state->color_space = plane_info.color_space;
3722 	dc_plane_state->format = plane_info.format;
3723 	dc_plane_state->plane_size = plane_info.plane_size;
3724 	dc_plane_state->rotation = plane_info.rotation;
3725 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3726 	dc_plane_state->stereo_format = plane_info.stereo_format;
3727 	dc_plane_state->tiling_info = plane_info.tiling_info;
3728 	dc_plane_state->visible = plane_info.visible;
3729 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3730 	dc_plane_state->global_alpha = plane_info.global_alpha;
3731 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3732 	dc_plane_state->dcc = plane_info.dcc;
3733 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3734 
3735 	/*
3736 	 * Always set input transfer function, since plane state is refreshed
3737 	 * every time.
3738 	 */
3739 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3740 	if (ret)
3741 		return ret;
3742 
3743 	return 0;
3744 }
3745 
3746 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3747 					   const struct dm_connector_state *dm_state,
3748 					   struct dc_stream_state *stream)
3749 {
3750 	enum amdgpu_rmx_type rmx_type;
3751 
3752 	struct rect src = { 0 }; /* viewport in composition space*/
3753 	struct rect dst = { 0 }; /* stream addressable area */
3754 
3755 	/* no mode. nothing to be done */
3756 	if (!mode)
3757 		return;
3758 
3759 	/* Full screen scaling by default */
3760 	src.width = mode->hdisplay;
3761 	src.height = mode->vdisplay;
3762 	dst.width = stream->timing.h_addressable;
3763 	dst.height = stream->timing.v_addressable;
3764 
3765 	if (dm_state) {
3766 		rmx_type = dm_state->scaling;
3767 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3768 			if (src.width * dst.height <
3769 					src.height * dst.width) {
3770 				/* height needs less upscaling/more downscaling */
3771 				dst.width = src.width *
3772 						dst.height / src.height;
3773 			} else {
3774 				/* width needs less upscaling/more downscaling */
3775 				dst.height = src.height *
3776 						dst.width / src.width;
3777 			}
3778 		} else if (rmx_type == RMX_CENTER) {
3779 			dst = src;
3780 		}
3781 
3782 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3783 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3784 
3785 		if (dm_state->underscan_enable) {
3786 			dst.x += dm_state->underscan_hborder / 2;
3787 			dst.y += dm_state->underscan_vborder / 2;
3788 			dst.width -= dm_state->underscan_hborder;
3789 			dst.height -= dm_state->underscan_vborder;
3790 		}
3791 	}
3792 
3793 	stream->src = src;
3794 	stream->dst = dst;
3795 
3796 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3797 			dst.x, dst.y, dst.width, dst.height);
3798 
3799 }
3800 
3801 static enum dc_color_depth
3802 convert_color_depth_from_display_info(const struct drm_connector *connector,
3803 				      const struct drm_connector_state *state,
3804 				      bool is_y420)
3805 {
3806 	uint8_t bpc;
3807 
3808 	if (is_y420) {
3809 		bpc = 8;
3810 
3811 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3812 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3813 			bpc = 16;
3814 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3815 			bpc = 12;
3816 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3817 			bpc = 10;
3818 	} else {
3819 		bpc = (uint8_t)connector->display_info.bpc;
3820 		/* Assume 8 bpc by default if no bpc is specified. */
3821 		bpc = bpc ? bpc : 8;
3822 	}
3823 
3824 	if (!state)
3825 		state = connector->state;
3826 
3827 	if (state) {
3828 		/*
3829 		 * Cap display bpc based on the user requested value.
3830 		 *
3831 		 * The value for state->max_bpc may not correctly updated
3832 		 * depending on when the connector gets added to the state
3833 		 * or if this was called outside of atomic check, so it
3834 		 * can't be used directly.
3835 		 */
3836 		bpc = min(bpc, state->max_requested_bpc);
3837 
3838 		/* Round down to the nearest even number. */
3839 		bpc = bpc - (bpc & 1);
3840 	}
3841 
3842 	switch (bpc) {
3843 	case 0:
3844 		/*
3845 		 * Temporary Work around, DRM doesn't parse color depth for
3846 		 * EDID revision before 1.4
3847 		 * TODO: Fix edid parsing
3848 		 */
3849 		return COLOR_DEPTH_888;
3850 	case 6:
3851 		return COLOR_DEPTH_666;
3852 	case 8:
3853 		return COLOR_DEPTH_888;
3854 	case 10:
3855 		return COLOR_DEPTH_101010;
3856 	case 12:
3857 		return COLOR_DEPTH_121212;
3858 	case 14:
3859 		return COLOR_DEPTH_141414;
3860 	case 16:
3861 		return COLOR_DEPTH_161616;
3862 	default:
3863 		return COLOR_DEPTH_UNDEFINED;
3864 	}
3865 }
3866 
3867 static enum dc_aspect_ratio
3868 get_aspect_ratio(const struct drm_display_mode *mode_in)
3869 {
3870 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3871 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3872 }
3873 
3874 static enum dc_color_space
3875 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3876 {
3877 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3878 
3879 	switch (dc_crtc_timing->pixel_encoding)	{
3880 	case PIXEL_ENCODING_YCBCR422:
3881 	case PIXEL_ENCODING_YCBCR444:
3882 	case PIXEL_ENCODING_YCBCR420:
3883 	{
3884 		/*
3885 		 * 27030khz is the separation point between HDTV and SDTV
3886 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3887 		 * respectively
3888 		 */
3889 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3890 			if (dc_crtc_timing->flags.Y_ONLY)
3891 				color_space =
3892 					COLOR_SPACE_YCBCR709_LIMITED;
3893 			else
3894 				color_space = COLOR_SPACE_YCBCR709;
3895 		} else {
3896 			if (dc_crtc_timing->flags.Y_ONLY)
3897 				color_space =
3898 					COLOR_SPACE_YCBCR601_LIMITED;
3899 			else
3900 				color_space = COLOR_SPACE_YCBCR601;
3901 		}
3902 
3903 	}
3904 	break;
3905 	case PIXEL_ENCODING_RGB:
3906 		color_space = COLOR_SPACE_SRGB;
3907 		break;
3908 
3909 	default:
3910 		WARN_ON(1);
3911 		break;
3912 	}
3913 
3914 	return color_space;
3915 }
3916 
3917 static bool adjust_colour_depth_from_display_info(
3918 	struct dc_crtc_timing *timing_out,
3919 	const struct drm_display_info *info)
3920 {
3921 	enum dc_color_depth depth = timing_out->display_color_depth;
3922 	int normalized_clk;
3923 	do {
3924 		normalized_clk = timing_out->pix_clk_100hz / 10;
3925 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3926 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3927 			normalized_clk /= 2;
3928 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3929 		switch (depth) {
3930 		case COLOR_DEPTH_888:
3931 			break;
3932 		case COLOR_DEPTH_101010:
3933 			normalized_clk = (normalized_clk * 30) / 24;
3934 			break;
3935 		case COLOR_DEPTH_121212:
3936 			normalized_clk = (normalized_clk * 36) / 24;
3937 			break;
3938 		case COLOR_DEPTH_161616:
3939 			normalized_clk = (normalized_clk * 48) / 24;
3940 			break;
3941 		default:
3942 			/* The above depths are the only ones valid for HDMI. */
3943 			return false;
3944 		}
3945 		if (normalized_clk <= info->max_tmds_clock) {
3946 			timing_out->display_color_depth = depth;
3947 			return true;
3948 		}
3949 	} while (--depth > COLOR_DEPTH_666);
3950 	return false;
3951 }
3952 
3953 static void fill_stream_properties_from_drm_display_mode(
3954 	struct dc_stream_state *stream,
3955 	const struct drm_display_mode *mode_in,
3956 	const struct drm_connector *connector,
3957 	const struct drm_connector_state *connector_state,
3958 	const struct dc_stream_state *old_stream)
3959 {
3960 	struct dc_crtc_timing *timing_out = &stream->timing;
3961 	const struct drm_display_info *info = &connector->display_info;
3962 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3963 	struct hdmi_vendor_infoframe hv_frame;
3964 	struct hdmi_avi_infoframe avi_frame;
3965 
3966 	memset(&hv_frame, 0, sizeof(hv_frame));
3967 	memset(&avi_frame, 0, sizeof(avi_frame));
3968 
3969 	timing_out->h_border_left = 0;
3970 	timing_out->h_border_right = 0;
3971 	timing_out->v_border_top = 0;
3972 	timing_out->v_border_bottom = 0;
3973 	/* TODO: un-hardcode */
3974 	if (drm_mode_is_420_only(info, mode_in)
3975 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3976 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3977 	else if (drm_mode_is_420_also(info, mode_in)
3978 			&& aconnector->force_yuv420_output)
3979 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3980 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3981 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3982 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3983 	else
3984 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3985 
3986 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3987 	timing_out->display_color_depth = convert_color_depth_from_display_info(
3988 		connector, connector_state,
3989 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3990 	timing_out->scan_type = SCANNING_TYPE_NODATA;
3991 	timing_out->hdmi_vic = 0;
3992 
3993 	if(old_stream) {
3994 		timing_out->vic = old_stream->timing.vic;
3995 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3996 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3997 	} else {
3998 		timing_out->vic = drm_match_cea_mode(mode_in);
3999 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4000 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4001 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4002 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4003 	}
4004 
4005 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4006 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4007 		timing_out->vic = avi_frame.video_code;
4008 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4009 		timing_out->hdmi_vic = hv_frame.vic;
4010 	}
4011 
4012 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4013 	timing_out->h_total = mode_in->crtc_htotal;
4014 	timing_out->h_sync_width =
4015 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4016 	timing_out->h_front_porch =
4017 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4018 	timing_out->v_total = mode_in->crtc_vtotal;
4019 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4020 	timing_out->v_front_porch =
4021 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4022 	timing_out->v_sync_width =
4023 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4024 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4025 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4026 
4027 	stream->output_color_space = get_output_color_space(timing_out);
4028 
4029 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4030 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4031 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4032 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4033 		    drm_mode_is_420_also(info, mode_in) &&
4034 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4035 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4036 			adjust_colour_depth_from_display_info(timing_out, info);
4037 		}
4038 	}
4039 }
4040 
4041 static void fill_audio_info(struct audio_info *audio_info,
4042 			    const struct drm_connector *drm_connector,
4043 			    const struct dc_sink *dc_sink)
4044 {
4045 	int i = 0;
4046 	int cea_revision = 0;
4047 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4048 
4049 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4050 	audio_info->product_id = edid_caps->product_id;
4051 
4052 	cea_revision = drm_connector->display_info.cea_rev;
4053 
4054 	strscpy(audio_info->display_name,
4055 		edid_caps->display_name,
4056 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4057 
4058 	if (cea_revision >= 3) {
4059 		audio_info->mode_count = edid_caps->audio_mode_count;
4060 
4061 		for (i = 0; i < audio_info->mode_count; ++i) {
4062 			audio_info->modes[i].format_code =
4063 					(enum audio_format_code)
4064 					(edid_caps->audio_modes[i].format_code);
4065 			audio_info->modes[i].channel_count =
4066 					edid_caps->audio_modes[i].channel_count;
4067 			audio_info->modes[i].sample_rates.all =
4068 					edid_caps->audio_modes[i].sample_rate;
4069 			audio_info->modes[i].sample_size =
4070 					edid_caps->audio_modes[i].sample_size;
4071 		}
4072 	}
4073 
4074 	audio_info->flags.all = edid_caps->speaker_flags;
4075 
4076 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4077 	if (drm_connector->latency_present[0]) {
4078 		audio_info->video_latency = drm_connector->video_latency[0];
4079 		audio_info->audio_latency = drm_connector->audio_latency[0];
4080 	}
4081 
4082 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4083 
4084 }
4085 
4086 static void
4087 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4088 				      struct drm_display_mode *dst_mode)
4089 {
4090 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4091 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4092 	dst_mode->crtc_clock = src_mode->crtc_clock;
4093 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4094 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4095 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4096 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4097 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4098 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4099 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4100 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4101 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4102 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4103 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4104 }
4105 
4106 static void
4107 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4108 					const struct drm_display_mode *native_mode,
4109 					bool scale_enabled)
4110 {
4111 	if (scale_enabled) {
4112 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4113 	} else if (native_mode->clock == drm_mode->clock &&
4114 			native_mode->htotal == drm_mode->htotal &&
4115 			native_mode->vtotal == drm_mode->vtotal) {
4116 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4117 	} else {
4118 		/* no scaling nor amdgpu inserted, no need to patch */
4119 	}
4120 }
4121 
4122 static struct dc_sink *
4123 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4124 {
4125 	struct dc_sink_init_data sink_init_data = { 0 };
4126 	struct dc_sink *sink = NULL;
4127 	sink_init_data.link = aconnector->dc_link;
4128 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4129 
4130 	sink = dc_sink_create(&sink_init_data);
4131 	if (!sink) {
4132 		DRM_ERROR("Failed to create sink!\n");
4133 		return NULL;
4134 	}
4135 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4136 
4137 	return sink;
4138 }
4139 
4140 static void set_multisync_trigger_params(
4141 		struct dc_stream_state *stream)
4142 {
4143 	if (stream->triggered_crtc_reset.enabled) {
4144 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4145 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4146 	}
4147 }
4148 
4149 static void set_master_stream(struct dc_stream_state *stream_set[],
4150 			      int stream_count)
4151 {
4152 	int j, highest_rfr = 0, master_stream = 0;
4153 
4154 	for (j = 0;  j < stream_count; j++) {
4155 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4156 			int refresh_rate = 0;
4157 
4158 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4159 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4160 			if (refresh_rate > highest_rfr) {
4161 				highest_rfr = refresh_rate;
4162 				master_stream = j;
4163 			}
4164 		}
4165 	}
4166 	for (j = 0;  j < stream_count; j++) {
4167 		if (stream_set[j])
4168 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4169 	}
4170 }
4171 
4172 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4173 {
4174 	int i = 0;
4175 
4176 	if (context->stream_count < 2)
4177 		return;
4178 	for (i = 0; i < context->stream_count ; i++) {
4179 		if (!context->streams[i])
4180 			continue;
4181 		/*
4182 		 * TODO: add a function to read AMD VSDB bits and set
4183 		 * crtc_sync_master.multi_sync_enabled flag
4184 		 * For now it's set to false
4185 		 */
4186 		set_multisync_trigger_params(context->streams[i]);
4187 	}
4188 	set_master_stream(context->streams, context->stream_count);
4189 }
4190 
4191 static struct dc_stream_state *
4192 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4193 		       const struct drm_display_mode *drm_mode,
4194 		       const struct dm_connector_state *dm_state,
4195 		       const struct dc_stream_state *old_stream)
4196 {
4197 	struct drm_display_mode *preferred_mode = NULL;
4198 	struct drm_connector *drm_connector;
4199 	const struct drm_connector_state *con_state =
4200 		dm_state ? &dm_state->base : NULL;
4201 	struct dc_stream_state *stream = NULL;
4202 	struct drm_display_mode mode = *drm_mode;
4203 	bool native_mode_found = false;
4204 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4205 	int mode_refresh;
4206 	int preferred_refresh = 0;
4207 #if defined(CONFIG_DRM_AMD_DC_DCN)
4208 	struct dsc_dec_dpcd_caps dsc_caps;
4209 #endif
4210 	uint32_t link_bandwidth_kbps;
4211 
4212 	struct dc_sink *sink = NULL;
4213 	if (aconnector == NULL) {
4214 		DRM_ERROR("aconnector is NULL!\n");
4215 		return stream;
4216 	}
4217 
4218 	drm_connector = &aconnector->base;
4219 
4220 	if (!aconnector->dc_sink) {
4221 		sink = create_fake_sink(aconnector);
4222 		if (!sink)
4223 			return stream;
4224 	} else {
4225 		sink = aconnector->dc_sink;
4226 		dc_sink_retain(sink);
4227 	}
4228 
4229 	stream = dc_create_stream_for_sink(sink);
4230 
4231 	if (stream == NULL) {
4232 		DRM_ERROR("Failed to create stream for sink!\n");
4233 		goto finish;
4234 	}
4235 
4236 	stream->dm_stream_context = aconnector;
4237 
4238 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4239 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4240 
4241 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4242 		/* Search for preferred mode */
4243 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4244 			native_mode_found = true;
4245 			break;
4246 		}
4247 	}
4248 	if (!native_mode_found)
4249 		preferred_mode = list_first_entry_or_null(
4250 				&aconnector->base.modes,
4251 				struct drm_display_mode,
4252 				head);
4253 
4254 	mode_refresh = drm_mode_vrefresh(&mode);
4255 
4256 	if (preferred_mode == NULL) {
4257 		/*
4258 		 * This may not be an error, the use case is when we have no
4259 		 * usermode calls to reset and set mode upon hotplug. In this
4260 		 * case, we call set mode ourselves to restore the previous mode
4261 		 * and the modelist may not be filled in in time.
4262 		 */
4263 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4264 	} else {
4265 		decide_crtc_timing_for_drm_display_mode(
4266 				&mode, preferred_mode,
4267 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4268 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4269 	}
4270 
4271 	if (!dm_state)
4272 		drm_mode_set_crtcinfo(&mode, 0);
4273 
4274 	/*
4275 	* If scaling is enabled and refresh rate didn't change
4276 	* we copy the vic and polarities of the old timings
4277 	*/
4278 	if (!scale || mode_refresh != preferred_refresh)
4279 		fill_stream_properties_from_drm_display_mode(stream,
4280 			&mode, &aconnector->base, con_state, NULL);
4281 	else
4282 		fill_stream_properties_from_drm_display_mode(stream,
4283 			&mode, &aconnector->base, con_state, old_stream);
4284 
4285 	stream->timing.flags.DSC = 0;
4286 
4287 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4288 #if defined(CONFIG_DRM_AMD_DC_DCN)
4289 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4290 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4291 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4292 				      &dsc_caps);
4293 #endif
4294 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4295 							     dc_link_get_link_cap(aconnector->dc_link));
4296 
4297 #if defined(CONFIG_DRM_AMD_DC_DCN)
4298 		if (dsc_caps.is_dsc_supported)
4299 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4300 						  &dsc_caps,
4301 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4302 						  link_bandwidth_kbps,
4303 						  &stream->timing,
4304 						  &stream->timing.dsc_cfg))
4305 				stream->timing.flags.DSC = 1;
4306 #endif
4307 	}
4308 
4309 	update_stream_scaling_settings(&mode, dm_state, stream);
4310 
4311 	fill_audio_info(
4312 		&stream->audio_info,
4313 		drm_connector,
4314 		sink);
4315 
4316 	update_stream_signal(stream, sink);
4317 
4318 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4319 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4320 	if (stream->link->psr_feature_enabled)	{
4321 		struct dc  *core_dc = stream->link->ctx->dc;
4322 
4323 		if (dc_is_dmcu_initialized(core_dc)) {
4324 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4325 
4326 			stream->psr_version = dmcu->dmcu_version.psr_version;
4327 
4328 			//
4329 			// should decide stream support vsc sdp colorimetry capability
4330 			// before building vsc info packet
4331 			//
4332 			stream->use_vsc_sdp_for_colorimetry = false;
4333 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4334 				stream->use_vsc_sdp_for_colorimetry =
4335 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4336 			} else {
4337 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4338 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4339 					stream->use_vsc_sdp_for_colorimetry = true;
4340 				}
4341 			}
4342 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4343 		}
4344 	}
4345 finish:
4346 	dc_sink_release(sink);
4347 
4348 	return stream;
4349 }
4350 
4351 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4352 {
4353 	drm_crtc_cleanup(crtc);
4354 	kfree(crtc);
4355 }
4356 
4357 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4358 				  struct drm_crtc_state *state)
4359 {
4360 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4361 
4362 	/* TODO Destroy dc_stream objects are stream object is flattened */
4363 	if (cur->stream)
4364 		dc_stream_release(cur->stream);
4365 
4366 
4367 	__drm_atomic_helper_crtc_destroy_state(state);
4368 
4369 
4370 	kfree(state);
4371 }
4372 
4373 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4374 {
4375 	struct dm_crtc_state *state;
4376 
4377 	if (crtc->state)
4378 		dm_crtc_destroy_state(crtc, crtc->state);
4379 
4380 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4381 	if (WARN_ON(!state))
4382 		return;
4383 
4384 	crtc->state = &state->base;
4385 	crtc->state->crtc = crtc;
4386 
4387 }
4388 
4389 static struct drm_crtc_state *
4390 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4391 {
4392 	struct dm_crtc_state *state, *cur;
4393 
4394 	cur = to_dm_crtc_state(crtc->state);
4395 
4396 	if (WARN_ON(!crtc->state))
4397 		return NULL;
4398 
4399 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4400 	if (!state)
4401 		return NULL;
4402 
4403 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4404 
4405 	if (cur->stream) {
4406 		state->stream = cur->stream;
4407 		dc_stream_retain(state->stream);
4408 	}
4409 
4410 	state->active_planes = cur->active_planes;
4411 	state->interrupts_enabled = cur->interrupts_enabled;
4412 	state->vrr_params = cur->vrr_params;
4413 	state->vrr_infopacket = cur->vrr_infopacket;
4414 	state->abm_level = cur->abm_level;
4415 	state->vrr_supported = cur->vrr_supported;
4416 	state->freesync_config = cur->freesync_config;
4417 	state->crc_src = cur->crc_src;
4418 	state->cm_has_degamma = cur->cm_has_degamma;
4419 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4420 
4421 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4422 
4423 	return &state->base;
4424 }
4425 
4426 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4427 {
4428 	enum dc_irq_source irq_source;
4429 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4430 	struct amdgpu_device *adev = crtc->dev->dev_private;
4431 	int rc;
4432 
4433 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4434 
4435 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4436 
4437 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4438 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4439 	return rc;
4440 }
4441 
4442 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4443 {
4444 	enum dc_irq_source irq_source;
4445 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4446 	struct amdgpu_device *adev = crtc->dev->dev_private;
4447 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4448 	int rc = 0;
4449 
4450 	if (enable) {
4451 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4452 		if (amdgpu_dm_vrr_active(acrtc_state))
4453 			rc = dm_set_vupdate_irq(crtc, true);
4454 	} else {
4455 		/* vblank irq off -> vupdate irq off */
4456 		rc = dm_set_vupdate_irq(crtc, false);
4457 	}
4458 
4459 	if (rc)
4460 		return rc;
4461 
4462 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4463 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4464 }
4465 
4466 static int dm_enable_vblank(struct drm_crtc *crtc)
4467 {
4468 	return dm_set_vblank(crtc, true);
4469 }
4470 
4471 static void dm_disable_vblank(struct drm_crtc *crtc)
4472 {
4473 	dm_set_vblank(crtc, false);
4474 }
4475 
4476 /* Implemented only the options currently availible for the driver */
4477 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4478 	.reset = dm_crtc_reset_state,
4479 	.destroy = amdgpu_dm_crtc_destroy,
4480 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4481 	.set_config = drm_atomic_helper_set_config,
4482 	.page_flip = drm_atomic_helper_page_flip,
4483 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4484 	.atomic_destroy_state = dm_crtc_destroy_state,
4485 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4486 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4487 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4488 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4489 	.enable_vblank = dm_enable_vblank,
4490 	.disable_vblank = dm_disable_vblank,
4491 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4492 };
4493 
4494 static enum drm_connector_status
4495 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4496 {
4497 	bool connected;
4498 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4499 
4500 	/*
4501 	 * Notes:
4502 	 * 1. This interface is NOT called in context of HPD irq.
4503 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4504 	 * makes it a bad place for *any* MST-related activity.
4505 	 */
4506 
4507 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4508 	    !aconnector->fake_enable)
4509 		connected = (aconnector->dc_sink != NULL);
4510 	else
4511 		connected = (aconnector->base.force == DRM_FORCE_ON);
4512 
4513 	return (connected ? connector_status_connected :
4514 			connector_status_disconnected);
4515 }
4516 
4517 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4518 					    struct drm_connector_state *connector_state,
4519 					    struct drm_property *property,
4520 					    uint64_t val)
4521 {
4522 	struct drm_device *dev = connector->dev;
4523 	struct amdgpu_device *adev = dev->dev_private;
4524 	struct dm_connector_state *dm_old_state =
4525 		to_dm_connector_state(connector->state);
4526 	struct dm_connector_state *dm_new_state =
4527 		to_dm_connector_state(connector_state);
4528 
4529 	int ret = -EINVAL;
4530 
4531 	if (property == dev->mode_config.scaling_mode_property) {
4532 		enum amdgpu_rmx_type rmx_type;
4533 
4534 		switch (val) {
4535 		case DRM_MODE_SCALE_CENTER:
4536 			rmx_type = RMX_CENTER;
4537 			break;
4538 		case DRM_MODE_SCALE_ASPECT:
4539 			rmx_type = RMX_ASPECT;
4540 			break;
4541 		case DRM_MODE_SCALE_FULLSCREEN:
4542 			rmx_type = RMX_FULL;
4543 			break;
4544 		case DRM_MODE_SCALE_NONE:
4545 		default:
4546 			rmx_type = RMX_OFF;
4547 			break;
4548 		}
4549 
4550 		if (dm_old_state->scaling == rmx_type)
4551 			return 0;
4552 
4553 		dm_new_state->scaling = rmx_type;
4554 		ret = 0;
4555 	} else if (property == adev->mode_info.underscan_hborder_property) {
4556 		dm_new_state->underscan_hborder = val;
4557 		ret = 0;
4558 	} else if (property == adev->mode_info.underscan_vborder_property) {
4559 		dm_new_state->underscan_vborder = val;
4560 		ret = 0;
4561 	} else if (property == adev->mode_info.underscan_property) {
4562 		dm_new_state->underscan_enable = val;
4563 		ret = 0;
4564 	} else if (property == adev->mode_info.abm_level_property) {
4565 		dm_new_state->abm_level = val;
4566 		ret = 0;
4567 	}
4568 
4569 	return ret;
4570 }
4571 
4572 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4573 					    const struct drm_connector_state *state,
4574 					    struct drm_property *property,
4575 					    uint64_t *val)
4576 {
4577 	struct drm_device *dev = connector->dev;
4578 	struct amdgpu_device *adev = dev->dev_private;
4579 	struct dm_connector_state *dm_state =
4580 		to_dm_connector_state(state);
4581 	int ret = -EINVAL;
4582 
4583 	if (property == dev->mode_config.scaling_mode_property) {
4584 		switch (dm_state->scaling) {
4585 		case RMX_CENTER:
4586 			*val = DRM_MODE_SCALE_CENTER;
4587 			break;
4588 		case RMX_ASPECT:
4589 			*val = DRM_MODE_SCALE_ASPECT;
4590 			break;
4591 		case RMX_FULL:
4592 			*val = DRM_MODE_SCALE_FULLSCREEN;
4593 			break;
4594 		case RMX_OFF:
4595 		default:
4596 			*val = DRM_MODE_SCALE_NONE;
4597 			break;
4598 		}
4599 		ret = 0;
4600 	} else if (property == adev->mode_info.underscan_hborder_property) {
4601 		*val = dm_state->underscan_hborder;
4602 		ret = 0;
4603 	} else if (property == adev->mode_info.underscan_vborder_property) {
4604 		*val = dm_state->underscan_vborder;
4605 		ret = 0;
4606 	} else if (property == adev->mode_info.underscan_property) {
4607 		*val = dm_state->underscan_enable;
4608 		ret = 0;
4609 	} else if (property == adev->mode_info.abm_level_property) {
4610 		*val = dm_state->abm_level;
4611 		ret = 0;
4612 	}
4613 
4614 	return ret;
4615 }
4616 
4617 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4618 {
4619 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4620 
4621 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4622 }
4623 
4624 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4625 {
4626 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4627 	const struct dc_link *link = aconnector->dc_link;
4628 	struct amdgpu_device *adev = connector->dev->dev_private;
4629 	struct amdgpu_display_manager *dm = &adev->dm;
4630 
4631 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4632 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4633 
4634 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4635 	    link->type != dc_connection_none &&
4636 	    dm->backlight_dev) {
4637 		backlight_device_unregister(dm->backlight_dev);
4638 		dm->backlight_dev = NULL;
4639 	}
4640 #endif
4641 
4642 	if (aconnector->dc_em_sink)
4643 		dc_sink_release(aconnector->dc_em_sink);
4644 	aconnector->dc_em_sink = NULL;
4645 	if (aconnector->dc_sink)
4646 		dc_sink_release(aconnector->dc_sink);
4647 	aconnector->dc_sink = NULL;
4648 
4649 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4650 	drm_connector_unregister(connector);
4651 	drm_connector_cleanup(connector);
4652 	if (aconnector->i2c) {
4653 		i2c_del_adapter(&aconnector->i2c->base);
4654 		kfree(aconnector->i2c);
4655 	}
4656 	kfree(aconnector->dm_dp_aux.aux.name);
4657 
4658 	kfree(connector);
4659 }
4660 
4661 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4662 {
4663 	struct dm_connector_state *state =
4664 		to_dm_connector_state(connector->state);
4665 
4666 	if (connector->state)
4667 		__drm_atomic_helper_connector_destroy_state(connector->state);
4668 
4669 	kfree(state);
4670 
4671 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4672 
4673 	if (state) {
4674 		state->scaling = RMX_OFF;
4675 		state->underscan_enable = false;
4676 		state->underscan_hborder = 0;
4677 		state->underscan_vborder = 0;
4678 		state->base.max_requested_bpc = 8;
4679 		state->vcpi_slots = 0;
4680 		state->pbn = 0;
4681 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4682 			state->abm_level = amdgpu_dm_abm_level;
4683 
4684 		__drm_atomic_helper_connector_reset(connector, &state->base);
4685 	}
4686 }
4687 
4688 struct drm_connector_state *
4689 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4690 {
4691 	struct dm_connector_state *state =
4692 		to_dm_connector_state(connector->state);
4693 
4694 	struct dm_connector_state *new_state =
4695 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4696 
4697 	if (!new_state)
4698 		return NULL;
4699 
4700 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4701 
4702 	new_state->freesync_capable = state->freesync_capable;
4703 	new_state->abm_level = state->abm_level;
4704 	new_state->scaling = state->scaling;
4705 	new_state->underscan_enable = state->underscan_enable;
4706 	new_state->underscan_hborder = state->underscan_hborder;
4707 	new_state->underscan_vborder = state->underscan_vborder;
4708 	new_state->vcpi_slots = state->vcpi_slots;
4709 	new_state->pbn = state->pbn;
4710 	return &new_state->base;
4711 }
4712 
4713 static int
4714 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4715 {
4716 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4717 		to_amdgpu_dm_connector(connector);
4718 	int r;
4719 
4720 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4721 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4722 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4723 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4724 		if (r)
4725 			return r;
4726 	}
4727 
4728 #if defined(CONFIG_DEBUG_FS)
4729 	connector_debugfs_init(amdgpu_dm_connector);
4730 #endif
4731 
4732 	return 0;
4733 }
4734 
4735 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4736 	.reset = amdgpu_dm_connector_funcs_reset,
4737 	.detect = amdgpu_dm_connector_detect,
4738 	.fill_modes = drm_helper_probe_single_connector_modes,
4739 	.destroy = amdgpu_dm_connector_destroy,
4740 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4741 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4742 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4743 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4744 	.late_register = amdgpu_dm_connector_late_register,
4745 	.early_unregister = amdgpu_dm_connector_unregister
4746 };
4747 
4748 static int get_modes(struct drm_connector *connector)
4749 {
4750 	return amdgpu_dm_connector_get_modes(connector);
4751 }
4752 
4753 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4754 {
4755 	struct dc_sink_init_data init_params = {
4756 			.link = aconnector->dc_link,
4757 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4758 	};
4759 	struct edid *edid;
4760 
4761 	if (!aconnector->base.edid_blob_ptr) {
4762 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4763 				aconnector->base.name);
4764 
4765 		aconnector->base.force = DRM_FORCE_OFF;
4766 		aconnector->base.override_edid = false;
4767 		return;
4768 	}
4769 
4770 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4771 
4772 	aconnector->edid = edid;
4773 
4774 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4775 		aconnector->dc_link,
4776 		(uint8_t *)edid,
4777 		(edid->extensions + 1) * EDID_LENGTH,
4778 		&init_params);
4779 
4780 	if (aconnector->base.force == DRM_FORCE_ON) {
4781 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4782 		aconnector->dc_link->local_sink :
4783 		aconnector->dc_em_sink;
4784 		dc_sink_retain(aconnector->dc_sink);
4785 	}
4786 }
4787 
4788 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4789 {
4790 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4791 
4792 	/*
4793 	 * In case of headless boot with force on for DP managed connector
4794 	 * Those settings have to be != 0 to get initial modeset
4795 	 */
4796 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4797 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4798 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4799 	}
4800 
4801 
4802 	aconnector->base.override_edid = true;
4803 	create_eml_sink(aconnector);
4804 }
4805 
4806 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4807 				   struct drm_display_mode *mode)
4808 {
4809 	int result = MODE_ERROR;
4810 	struct dc_sink *dc_sink;
4811 	struct amdgpu_device *adev = connector->dev->dev_private;
4812 	/* TODO: Unhardcode stream count */
4813 	struct dc_stream_state *stream;
4814 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4815 	enum dc_status dc_result = DC_OK;
4816 
4817 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4818 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4819 		return result;
4820 
4821 	/*
4822 	 * Only run this the first time mode_valid is called to initilialize
4823 	 * EDID mgmt
4824 	 */
4825 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4826 		!aconnector->dc_em_sink)
4827 		handle_edid_mgmt(aconnector);
4828 
4829 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4830 
4831 	if (dc_sink == NULL) {
4832 		DRM_ERROR("dc_sink is NULL!\n");
4833 		goto fail;
4834 	}
4835 
4836 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4837 	if (stream == NULL) {
4838 		DRM_ERROR("Failed to create stream for sink!\n");
4839 		goto fail;
4840 	}
4841 
4842 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4843 
4844 	if (dc_result == DC_OK)
4845 		result = MODE_OK;
4846 	else
4847 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4848 			      mode->hdisplay,
4849 			      mode->vdisplay,
4850 			      mode->clock,
4851 			      dc_result);
4852 
4853 	dc_stream_release(stream);
4854 
4855 fail:
4856 	/* TODO: error handling*/
4857 	return result;
4858 }
4859 
4860 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4861 				struct dc_info_packet *out)
4862 {
4863 	struct hdmi_drm_infoframe frame;
4864 	unsigned char buf[30]; /* 26 + 4 */
4865 	ssize_t len;
4866 	int ret, i;
4867 
4868 	memset(out, 0, sizeof(*out));
4869 
4870 	if (!state->hdr_output_metadata)
4871 		return 0;
4872 
4873 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4874 	if (ret)
4875 		return ret;
4876 
4877 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4878 	if (len < 0)
4879 		return (int)len;
4880 
4881 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4882 	if (len != 30)
4883 		return -EINVAL;
4884 
4885 	/* Prepare the infopacket for DC. */
4886 	switch (state->connector->connector_type) {
4887 	case DRM_MODE_CONNECTOR_HDMIA:
4888 		out->hb0 = 0x87; /* type */
4889 		out->hb1 = 0x01; /* version */
4890 		out->hb2 = 0x1A; /* length */
4891 		out->sb[0] = buf[3]; /* checksum */
4892 		i = 1;
4893 		break;
4894 
4895 	case DRM_MODE_CONNECTOR_DisplayPort:
4896 	case DRM_MODE_CONNECTOR_eDP:
4897 		out->hb0 = 0x00; /* sdp id, zero */
4898 		out->hb1 = 0x87; /* type */
4899 		out->hb2 = 0x1D; /* payload len - 1 */
4900 		out->hb3 = (0x13 << 2); /* sdp version */
4901 		out->sb[0] = 0x01; /* version */
4902 		out->sb[1] = 0x1A; /* length */
4903 		i = 2;
4904 		break;
4905 
4906 	default:
4907 		return -EINVAL;
4908 	}
4909 
4910 	memcpy(&out->sb[i], &buf[4], 26);
4911 	out->valid = true;
4912 
4913 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4914 		       sizeof(out->sb), false);
4915 
4916 	return 0;
4917 }
4918 
4919 static bool
4920 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4921 			  const struct drm_connector_state *new_state)
4922 {
4923 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4924 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4925 
4926 	if (old_blob != new_blob) {
4927 		if (old_blob && new_blob &&
4928 		    old_blob->length == new_blob->length)
4929 			return memcmp(old_blob->data, new_blob->data,
4930 				      old_blob->length);
4931 
4932 		return true;
4933 	}
4934 
4935 	return false;
4936 }
4937 
4938 static int
4939 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4940 				 struct drm_atomic_state *state)
4941 {
4942 	struct drm_connector_state *new_con_state =
4943 		drm_atomic_get_new_connector_state(state, conn);
4944 	struct drm_connector_state *old_con_state =
4945 		drm_atomic_get_old_connector_state(state, conn);
4946 	struct drm_crtc *crtc = new_con_state->crtc;
4947 	struct drm_crtc_state *new_crtc_state;
4948 	int ret;
4949 
4950 	if (!crtc)
4951 		return 0;
4952 
4953 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4954 		struct dc_info_packet hdr_infopacket;
4955 
4956 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4957 		if (ret)
4958 			return ret;
4959 
4960 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4961 		if (IS_ERR(new_crtc_state))
4962 			return PTR_ERR(new_crtc_state);
4963 
4964 		/*
4965 		 * DC considers the stream backends changed if the
4966 		 * static metadata changes. Forcing the modeset also
4967 		 * gives a simple way for userspace to switch from
4968 		 * 8bpc to 10bpc when setting the metadata to enter
4969 		 * or exit HDR.
4970 		 *
4971 		 * Changing the static metadata after it's been
4972 		 * set is permissible, however. So only force a
4973 		 * modeset if we're entering or exiting HDR.
4974 		 */
4975 		new_crtc_state->mode_changed =
4976 			!old_con_state->hdr_output_metadata ||
4977 			!new_con_state->hdr_output_metadata;
4978 	}
4979 
4980 	return 0;
4981 }
4982 
4983 static const struct drm_connector_helper_funcs
4984 amdgpu_dm_connector_helper_funcs = {
4985 	/*
4986 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4987 	 * modes will be filtered by drm_mode_validate_size(), and those modes
4988 	 * are missing after user start lightdm. So we need to renew modes list.
4989 	 * in get_modes call back, not just return the modes count
4990 	 */
4991 	.get_modes = get_modes,
4992 	.mode_valid = amdgpu_dm_connector_mode_valid,
4993 	.atomic_check = amdgpu_dm_connector_atomic_check,
4994 };
4995 
4996 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4997 {
4998 }
4999 
5000 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5001 {
5002 	struct drm_device *dev = new_crtc_state->crtc->dev;
5003 	struct drm_plane *plane;
5004 
5005 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5006 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5007 			return true;
5008 	}
5009 
5010 	return false;
5011 }
5012 
5013 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5014 {
5015 	struct drm_atomic_state *state = new_crtc_state->state;
5016 	struct drm_plane *plane;
5017 	int num_active = 0;
5018 
5019 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5020 		struct drm_plane_state *new_plane_state;
5021 
5022 		/* Cursor planes are "fake". */
5023 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5024 			continue;
5025 
5026 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5027 
5028 		if (!new_plane_state) {
5029 			/*
5030 			 * The plane is enable on the CRTC and hasn't changed
5031 			 * state. This means that it previously passed
5032 			 * validation and is therefore enabled.
5033 			 */
5034 			num_active += 1;
5035 			continue;
5036 		}
5037 
5038 		/* We need a framebuffer to be considered enabled. */
5039 		num_active += (new_plane_state->fb != NULL);
5040 	}
5041 
5042 	return num_active;
5043 }
5044 
5045 /*
5046  * Sets whether interrupts should be enabled on a specific CRTC.
5047  * We require that the stream be enabled and that there exist active
5048  * DC planes on the stream.
5049  */
5050 static void
5051 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5052 			       struct drm_crtc_state *new_crtc_state)
5053 {
5054 	struct dm_crtc_state *dm_new_crtc_state =
5055 		to_dm_crtc_state(new_crtc_state);
5056 
5057 	dm_new_crtc_state->active_planes = 0;
5058 	dm_new_crtc_state->interrupts_enabled = false;
5059 
5060 	if (!dm_new_crtc_state->stream)
5061 		return;
5062 
5063 	dm_new_crtc_state->active_planes =
5064 		count_crtc_active_planes(new_crtc_state);
5065 
5066 	dm_new_crtc_state->interrupts_enabled =
5067 		dm_new_crtc_state->active_planes > 0;
5068 }
5069 
5070 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5071 				       struct drm_crtc_state *state)
5072 {
5073 	struct amdgpu_device *adev = crtc->dev->dev_private;
5074 	struct dc *dc = adev->dm.dc;
5075 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5076 	int ret = -EINVAL;
5077 
5078 	/*
5079 	 * Update interrupt state for the CRTC. This needs to happen whenever
5080 	 * the CRTC has changed or whenever any of its planes have changed.
5081 	 * Atomic check satisfies both of these requirements since the CRTC
5082 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5083 	 */
5084 	dm_update_crtc_interrupt_state(crtc, state);
5085 
5086 	if (unlikely(!dm_crtc_state->stream &&
5087 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5088 		WARN_ON(1);
5089 		return ret;
5090 	}
5091 
5092 	/* In some use cases, like reset, no stream is attached */
5093 	if (!dm_crtc_state->stream)
5094 		return 0;
5095 
5096 	/*
5097 	 * We want at least one hardware plane enabled to use
5098 	 * the stream with a cursor enabled.
5099 	 */
5100 	if (state->enable && state->active &&
5101 	    does_crtc_have_active_cursor(state) &&
5102 	    dm_crtc_state->active_planes == 0)
5103 		return -EINVAL;
5104 
5105 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5106 		return 0;
5107 
5108 	return ret;
5109 }
5110 
5111 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5112 				      const struct drm_display_mode *mode,
5113 				      struct drm_display_mode *adjusted_mode)
5114 {
5115 	return true;
5116 }
5117 
5118 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5119 	.disable = dm_crtc_helper_disable,
5120 	.atomic_check = dm_crtc_helper_atomic_check,
5121 	.mode_fixup = dm_crtc_helper_mode_fixup,
5122 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5123 };
5124 
5125 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5126 {
5127 
5128 }
5129 
5130 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5131 {
5132 	switch (display_color_depth) {
5133 		case COLOR_DEPTH_666:
5134 			return 6;
5135 		case COLOR_DEPTH_888:
5136 			return 8;
5137 		case COLOR_DEPTH_101010:
5138 			return 10;
5139 		case COLOR_DEPTH_121212:
5140 			return 12;
5141 		case COLOR_DEPTH_141414:
5142 			return 14;
5143 		case COLOR_DEPTH_161616:
5144 			return 16;
5145 		default:
5146 			break;
5147 		}
5148 	return 0;
5149 }
5150 
5151 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5152 					  struct drm_crtc_state *crtc_state,
5153 					  struct drm_connector_state *conn_state)
5154 {
5155 	struct drm_atomic_state *state = crtc_state->state;
5156 	struct drm_connector *connector = conn_state->connector;
5157 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5158 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5159 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5160 	struct drm_dp_mst_topology_mgr *mst_mgr;
5161 	struct drm_dp_mst_port *mst_port;
5162 	enum dc_color_depth color_depth;
5163 	int clock, bpp = 0;
5164 	bool is_y420 = false;
5165 
5166 	if (!aconnector->port || !aconnector->dc_sink)
5167 		return 0;
5168 
5169 	mst_port = aconnector->port;
5170 	mst_mgr = &aconnector->mst_port->mst_mgr;
5171 
5172 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5173 		return 0;
5174 
5175 	if (!state->duplicated) {
5176 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5177 				aconnector->force_yuv420_output;
5178 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5179 								    is_y420);
5180 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5181 		clock = adjusted_mode->clock;
5182 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5183 	}
5184 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5185 									   mst_mgr,
5186 									   mst_port,
5187 									   dm_new_connector_state->pbn,
5188 									   0);
5189 	if (dm_new_connector_state->vcpi_slots < 0) {
5190 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5191 		return dm_new_connector_state->vcpi_slots;
5192 	}
5193 	return 0;
5194 }
5195 
5196 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5197 	.disable = dm_encoder_helper_disable,
5198 	.atomic_check = dm_encoder_helper_atomic_check
5199 };
5200 
5201 #if defined(CONFIG_DRM_AMD_DC_DCN)
5202 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5203 					    struct dc_state *dc_state)
5204 {
5205 	struct dc_stream_state *stream = NULL;
5206 	struct drm_connector *connector;
5207 	struct drm_connector_state *new_con_state, *old_con_state;
5208 	struct amdgpu_dm_connector *aconnector;
5209 	struct dm_connector_state *dm_conn_state;
5210 	int i, j, clock, bpp;
5211 	int vcpi, pbn_div, pbn = 0;
5212 
5213 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5214 
5215 		aconnector = to_amdgpu_dm_connector(connector);
5216 
5217 		if (!aconnector->port)
5218 			continue;
5219 
5220 		if (!new_con_state || !new_con_state->crtc)
5221 			continue;
5222 
5223 		dm_conn_state = to_dm_connector_state(new_con_state);
5224 
5225 		for (j = 0; j < dc_state->stream_count; j++) {
5226 			stream = dc_state->streams[j];
5227 			if (!stream)
5228 				continue;
5229 
5230 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5231 				break;
5232 
5233 			stream = NULL;
5234 		}
5235 
5236 		if (!stream)
5237 			continue;
5238 
5239 		if (stream->timing.flags.DSC != 1) {
5240 			drm_dp_mst_atomic_enable_dsc(state,
5241 						     aconnector->port,
5242 						     dm_conn_state->pbn,
5243 						     0,
5244 						     false);
5245 			continue;
5246 		}
5247 
5248 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5249 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5250 		clock = stream->timing.pix_clk_100hz / 10;
5251 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5252 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5253 						    aconnector->port,
5254 						    pbn, pbn_div,
5255 						    true);
5256 		if (vcpi < 0)
5257 			return vcpi;
5258 
5259 		dm_conn_state->pbn = pbn;
5260 		dm_conn_state->vcpi_slots = vcpi;
5261 	}
5262 	return 0;
5263 }
5264 #endif
5265 
5266 static void dm_drm_plane_reset(struct drm_plane *plane)
5267 {
5268 	struct dm_plane_state *amdgpu_state = NULL;
5269 
5270 	if (plane->state)
5271 		plane->funcs->atomic_destroy_state(plane, plane->state);
5272 
5273 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5274 	WARN_ON(amdgpu_state == NULL);
5275 
5276 	if (amdgpu_state)
5277 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5278 }
5279 
5280 static struct drm_plane_state *
5281 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5282 {
5283 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5284 
5285 	old_dm_plane_state = to_dm_plane_state(plane->state);
5286 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5287 	if (!dm_plane_state)
5288 		return NULL;
5289 
5290 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5291 
5292 	if (old_dm_plane_state->dc_state) {
5293 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5294 		dc_plane_state_retain(dm_plane_state->dc_state);
5295 	}
5296 
5297 	return &dm_plane_state->base;
5298 }
5299 
5300 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5301 				struct drm_plane_state *state)
5302 {
5303 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5304 
5305 	if (dm_plane_state->dc_state)
5306 		dc_plane_state_release(dm_plane_state->dc_state);
5307 
5308 	drm_atomic_helper_plane_destroy_state(plane, state);
5309 }
5310 
5311 static const struct drm_plane_funcs dm_plane_funcs = {
5312 	.update_plane	= drm_atomic_helper_update_plane,
5313 	.disable_plane	= drm_atomic_helper_disable_plane,
5314 	.destroy	= drm_primary_helper_destroy,
5315 	.reset = dm_drm_plane_reset,
5316 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5317 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5318 };
5319 
5320 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5321 				      struct drm_plane_state *new_state)
5322 {
5323 	struct amdgpu_framebuffer *afb;
5324 	struct drm_gem_object *obj;
5325 	struct amdgpu_device *adev;
5326 	struct amdgpu_bo *rbo;
5327 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5328 	struct list_head list;
5329 	struct ttm_validate_buffer tv;
5330 	struct ww_acquire_ctx ticket;
5331 	uint64_t tiling_flags;
5332 	uint32_t domain;
5333 	int r;
5334 	bool force_disable_dcc = false;
5335 
5336 	dm_plane_state_old = to_dm_plane_state(plane->state);
5337 	dm_plane_state_new = to_dm_plane_state(new_state);
5338 
5339 	if (!new_state->fb) {
5340 		DRM_DEBUG_DRIVER("No FB bound\n");
5341 		return 0;
5342 	}
5343 
5344 	afb = to_amdgpu_framebuffer(new_state->fb);
5345 	obj = new_state->fb->obj[0];
5346 	rbo = gem_to_amdgpu_bo(obj);
5347 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5348 	INIT_LIST_HEAD(&list);
5349 
5350 	tv.bo = &rbo->tbo;
5351 	tv.num_shared = 1;
5352 	list_add(&tv.head, &list);
5353 
5354 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5355 	if (r) {
5356 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5357 		return r;
5358 	}
5359 
5360 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5361 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5362 	else
5363 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5364 
5365 	r = amdgpu_bo_pin(rbo, domain);
5366 	if (unlikely(r != 0)) {
5367 		if (r != -ERESTARTSYS)
5368 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5369 		ttm_eu_backoff_reservation(&ticket, &list);
5370 		return r;
5371 	}
5372 
5373 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5374 	if (unlikely(r != 0)) {
5375 		amdgpu_bo_unpin(rbo);
5376 		ttm_eu_backoff_reservation(&ticket, &list);
5377 		DRM_ERROR("%p bind failed\n", rbo);
5378 		return r;
5379 	}
5380 
5381 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5382 
5383 	ttm_eu_backoff_reservation(&ticket, &list);
5384 
5385 	afb->address = amdgpu_bo_gpu_offset(rbo);
5386 
5387 	amdgpu_bo_ref(rbo);
5388 
5389 	if (dm_plane_state_new->dc_state &&
5390 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5391 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5392 
5393 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5394 		fill_plane_buffer_attributes(
5395 			adev, afb, plane_state->format, plane_state->rotation,
5396 			tiling_flags, &plane_state->tiling_info,
5397 			&plane_state->plane_size, &plane_state->dcc,
5398 			&plane_state->address,
5399 			force_disable_dcc);
5400 	}
5401 
5402 	return 0;
5403 }
5404 
5405 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5406 				       struct drm_plane_state *old_state)
5407 {
5408 	struct amdgpu_bo *rbo;
5409 	int r;
5410 
5411 	if (!old_state->fb)
5412 		return;
5413 
5414 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5415 	r = amdgpu_bo_reserve(rbo, false);
5416 	if (unlikely(r)) {
5417 		DRM_ERROR("failed to reserve rbo before unpin\n");
5418 		return;
5419 	}
5420 
5421 	amdgpu_bo_unpin(rbo);
5422 	amdgpu_bo_unreserve(rbo);
5423 	amdgpu_bo_unref(&rbo);
5424 }
5425 
5426 static int dm_plane_atomic_check(struct drm_plane *plane,
5427 				 struct drm_plane_state *state)
5428 {
5429 	struct amdgpu_device *adev = plane->dev->dev_private;
5430 	struct dc *dc = adev->dm.dc;
5431 	struct dm_plane_state *dm_plane_state;
5432 	struct dc_scaling_info scaling_info;
5433 	int ret;
5434 
5435 	dm_plane_state = to_dm_plane_state(state);
5436 
5437 	if (!dm_plane_state->dc_state)
5438 		return 0;
5439 
5440 	ret = fill_dc_scaling_info(state, &scaling_info);
5441 	if (ret)
5442 		return ret;
5443 
5444 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5445 		return 0;
5446 
5447 	return -EINVAL;
5448 }
5449 
5450 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5451 				       struct drm_plane_state *new_plane_state)
5452 {
5453 	/* Only support async updates on cursor planes. */
5454 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5455 		return -EINVAL;
5456 
5457 	return 0;
5458 }
5459 
5460 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5461 					 struct drm_plane_state *new_state)
5462 {
5463 	struct drm_plane_state *old_state =
5464 		drm_atomic_get_old_plane_state(new_state->state, plane);
5465 
5466 	swap(plane->state->fb, new_state->fb);
5467 
5468 	plane->state->src_x = new_state->src_x;
5469 	plane->state->src_y = new_state->src_y;
5470 	plane->state->src_w = new_state->src_w;
5471 	plane->state->src_h = new_state->src_h;
5472 	plane->state->crtc_x = new_state->crtc_x;
5473 	plane->state->crtc_y = new_state->crtc_y;
5474 	plane->state->crtc_w = new_state->crtc_w;
5475 	plane->state->crtc_h = new_state->crtc_h;
5476 
5477 	handle_cursor_update(plane, old_state);
5478 }
5479 
5480 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5481 	.prepare_fb = dm_plane_helper_prepare_fb,
5482 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5483 	.atomic_check = dm_plane_atomic_check,
5484 	.atomic_async_check = dm_plane_atomic_async_check,
5485 	.atomic_async_update = dm_plane_atomic_async_update
5486 };
5487 
5488 /*
5489  * TODO: these are currently initialized to rgb formats only.
5490  * For future use cases we should either initialize them dynamically based on
5491  * plane capabilities, or initialize this array to all formats, so internal drm
5492  * check will succeed, and let DC implement proper check
5493  */
5494 static const uint32_t rgb_formats[] = {
5495 	DRM_FORMAT_XRGB8888,
5496 	DRM_FORMAT_ARGB8888,
5497 	DRM_FORMAT_RGBA8888,
5498 	DRM_FORMAT_XRGB2101010,
5499 	DRM_FORMAT_XBGR2101010,
5500 	DRM_FORMAT_ARGB2101010,
5501 	DRM_FORMAT_ABGR2101010,
5502 	DRM_FORMAT_XBGR8888,
5503 	DRM_FORMAT_ABGR8888,
5504 	DRM_FORMAT_RGB565,
5505 };
5506 
5507 static const uint32_t overlay_formats[] = {
5508 	DRM_FORMAT_XRGB8888,
5509 	DRM_FORMAT_ARGB8888,
5510 	DRM_FORMAT_RGBA8888,
5511 	DRM_FORMAT_XBGR8888,
5512 	DRM_FORMAT_ABGR8888,
5513 	DRM_FORMAT_RGB565
5514 };
5515 
5516 static const u32 cursor_formats[] = {
5517 	DRM_FORMAT_ARGB8888
5518 };
5519 
5520 static int get_plane_formats(const struct drm_plane *plane,
5521 			     const struct dc_plane_cap *plane_cap,
5522 			     uint32_t *formats, int max_formats)
5523 {
5524 	int i, num_formats = 0;
5525 
5526 	/*
5527 	 * TODO: Query support for each group of formats directly from
5528 	 * DC plane caps. This will require adding more formats to the
5529 	 * caps list.
5530 	 */
5531 
5532 	switch (plane->type) {
5533 	case DRM_PLANE_TYPE_PRIMARY:
5534 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5535 			if (num_formats >= max_formats)
5536 				break;
5537 
5538 			formats[num_formats++] = rgb_formats[i];
5539 		}
5540 
5541 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5542 			formats[num_formats++] = DRM_FORMAT_NV12;
5543 		if (plane_cap && plane_cap->pixel_format_support.p010)
5544 			formats[num_formats++] = DRM_FORMAT_P010;
5545 		break;
5546 
5547 	case DRM_PLANE_TYPE_OVERLAY:
5548 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5549 			if (num_formats >= max_formats)
5550 				break;
5551 
5552 			formats[num_formats++] = overlay_formats[i];
5553 		}
5554 		break;
5555 
5556 	case DRM_PLANE_TYPE_CURSOR:
5557 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5558 			if (num_formats >= max_formats)
5559 				break;
5560 
5561 			formats[num_formats++] = cursor_formats[i];
5562 		}
5563 		break;
5564 	}
5565 
5566 	return num_formats;
5567 }
5568 
5569 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5570 				struct drm_plane *plane,
5571 				unsigned long possible_crtcs,
5572 				const struct dc_plane_cap *plane_cap)
5573 {
5574 	uint32_t formats[32];
5575 	int num_formats;
5576 	int res = -EPERM;
5577 
5578 	num_formats = get_plane_formats(plane, plane_cap, formats,
5579 					ARRAY_SIZE(formats));
5580 
5581 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5582 				       &dm_plane_funcs, formats, num_formats,
5583 				       NULL, plane->type, NULL);
5584 	if (res)
5585 		return res;
5586 
5587 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5588 	    plane_cap && plane_cap->per_pixel_alpha) {
5589 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5590 					  BIT(DRM_MODE_BLEND_PREMULTI);
5591 
5592 		drm_plane_create_alpha_property(plane);
5593 		drm_plane_create_blend_mode_property(plane, blend_caps);
5594 	}
5595 
5596 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5597 	    plane_cap &&
5598 	    (plane_cap->pixel_format_support.nv12 ||
5599 	     plane_cap->pixel_format_support.p010)) {
5600 		/* This only affects YUV formats. */
5601 		drm_plane_create_color_properties(
5602 			plane,
5603 			BIT(DRM_COLOR_YCBCR_BT601) |
5604 			BIT(DRM_COLOR_YCBCR_BT709) |
5605 			BIT(DRM_COLOR_YCBCR_BT2020),
5606 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5607 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5608 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5609 	}
5610 
5611 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5612 
5613 	/* Create (reset) the plane state */
5614 	if (plane->funcs->reset)
5615 		plane->funcs->reset(plane);
5616 
5617 	return 0;
5618 }
5619 
5620 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5621 			       struct drm_plane *plane,
5622 			       uint32_t crtc_index)
5623 {
5624 	struct amdgpu_crtc *acrtc = NULL;
5625 	struct drm_plane *cursor_plane;
5626 
5627 	int res = -ENOMEM;
5628 
5629 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5630 	if (!cursor_plane)
5631 		goto fail;
5632 
5633 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5634 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5635 
5636 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5637 	if (!acrtc)
5638 		goto fail;
5639 
5640 	res = drm_crtc_init_with_planes(
5641 			dm->ddev,
5642 			&acrtc->base,
5643 			plane,
5644 			cursor_plane,
5645 			&amdgpu_dm_crtc_funcs, NULL);
5646 
5647 	if (res)
5648 		goto fail;
5649 
5650 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5651 
5652 	/* Create (reset) the plane state */
5653 	if (acrtc->base.funcs->reset)
5654 		acrtc->base.funcs->reset(&acrtc->base);
5655 
5656 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5657 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5658 
5659 	acrtc->crtc_id = crtc_index;
5660 	acrtc->base.enabled = false;
5661 	acrtc->otg_inst = -1;
5662 
5663 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5664 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5665 				   true, MAX_COLOR_LUT_ENTRIES);
5666 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5667 
5668 	return 0;
5669 
5670 fail:
5671 	kfree(acrtc);
5672 	kfree(cursor_plane);
5673 	return res;
5674 }
5675 
5676 
5677 static int to_drm_connector_type(enum signal_type st)
5678 {
5679 	switch (st) {
5680 	case SIGNAL_TYPE_HDMI_TYPE_A:
5681 		return DRM_MODE_CONNECTOR_HDMIA;
5682 	case SIGNAL_TYPE_EDP:
5683 		return DRM_MODE_CONNECTOR_eDP;
5684 	case SIGNAL_TYPE_LVDS:
5685 		return DRM_MODE_CONNECTOR_LVDS;
5686 	case SIGNAL_TYPE_RGB:
5687 		return DRM_MODE_CONNECTOR_VGA;
5688 	case SIGNAL_TYPE_DISPLAY_PORT:
5689 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5690 		return DRM_MODE_CONNECTOR_DisplayPort;
5691 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5692 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5693 		return DRM_MODE_CONNECTOR_DVID;
5694 	case SIGNAL_TYPE_VIRTUAL:
5695 		return DRM_MODE_CONNECTOR_VIRTUAL;
5696 
5697 	default:
5698 		return DRM_MODE_CONNECTOR_Unknown;
5699 	}
5700 }
5701 
5702 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5703 {
5704 	struct drm_encoder *encoder;
5705 
5706 	/* There is only one encoder per connector */
5707 	drm_connector_for_each_possible_encoder(connector, encoder)
5708 		return encoder;
5709 
5710 	return NULL;
5711 }
5712 
5713 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5714 {
5715 	struct drm_encoder *encoder;
5716 	struct amdgpu_encoder *amdgpu_encoder;
5717 
5718 	encoder = amdgpu_dm_connector_to_encoder(connector);
5719 
5720 	if (encoder == NULL)
5721 		return;
5722 
5723 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5724 
5725 	amdgpu_encoder->native_mode.clock = 0;
5726 
5727 	if (!list_empty(&connector->probed_modes)) {
5728 		struct drm_display_mode *preferred_mode = NULL;
5729 
5730 		list_for_each_entry(preferred_mode,
5731 				    &connector->probed_modes,
5732 				    head) {
5733 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5734 				amdgpu_encoder->native_mode = *preferred_mode;
5735 
5736 			break;
5737 		}
5738 
5739 	}
5740 }
5741 
5742 static struct drm_display_mode *
5743 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5744 			     char *name,
5745 			     int hdisplay, int vdisplay)
5746 {
5747 	struct drm_device *dev = encoder->dev;
5748 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5749 	struct drm_display_mode *mode = NULL;
5750 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5751 
5752 	mode = drm_mode_duplicate(dev, native_mode);
5753 
5754 	if (mode == NULL)
5755 		return NULL;
5756 
5757 	mode->hdisplay = hdisplay;
5758 	mode->vdisplay = vdisplay;
5759 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5760 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5761 
5762 	return mode;
5763 
5764 }
5765 
5766 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5767 						 struct drm_connector *connector)
5768 {
5769 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5770 	struct drm_display_mode *mode = NULL;
5771 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5772 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5773 				to_amdgpu_dm_connector(connector);
5774 	int i;
5775 	int n;
5776 	struct mode_size {
5777 		char name[DRM_DISPLAY_MODE_LEN];
5778 		int w;
5779 		int h;
5780 	} common_modes[] = {
5781 		{  "640x480",  640,  480},
5782 		{  "800x600",  800,  600},
5783 		{ "1024x768", 1024,  768},
5784 		{ "1280x720", 1280,  720},
5785 		{ "1280x800", 1280,  800},
5786 		{"1280x1024", 1280, 1024},
5787 		{ "1440x900", 1440,  900},
5788 		{"1680x1050", 1680, 1050},
5789 		{"1600x1200", 1600, 1200},
5790 		{"1920x1080", 1920, 1080},
5791 		{"1920x1200", 1920, 1200}
5792 	};
5793 
5794 	n = ARRAY_SIZE(common_modes);
5795 
5796 	for (i = 0; i < n; i++) {
5797 		struct drm_display_mode *curmode = NULL;
5798 		bool mode_existed = false;
5799 
5800 		if (common_modes[i].w > native_mode->hdisplay ||
5801 		    common_modes[i].h > native_mode->vdisplay ||
5802 		   (common_modes[i].w == native_mode->hdisplay &&
5803 		    common_modes[i].h == native_mode->vdisplay))
5804 			continue;
5805 
5806 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5807 			if (common_modes[i].w == curmode->hdisplay &&
5808 			    common_modes[i].h == curmode->vdisplay) {
5809 				mode_existed = true;
5810 				break;
5811 			}
5812 		}
5813 
5814 		if (mode_existed)
5815 			continue;
5816 
5817 		mode = amdgpu_dm_create_common_mode(encoder,
5818 				common_modes[i].name, common_modes[i].w,
5819 				common_modes[i].h);
5820 		drm_mode_probed_add(connector, mode);
5821 		amdgpu_dm_connector->num_modes++;
5822 	}
5823 }
5824 
5825 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5826 					      struct edid *edid)
5827 {
5828 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5829 			to_amdgpu_dm_connector(connector);
5830 
5831 	if (edid) {
5832 		/* empty probed_modes */
5833 		INIT_LIST_HEAD(&connector->probed_modes);
5834 		amdgpu_dm_connector->num_modes =
5835 				drm_add_edid_modes(connector, edid);
5836 
5837 		/* sorting the probed modes before calling function
5838 		 * amdgpu_dm_get_native_mode() since EDID can have
5839 		 * more than one preferred mode. The modes that are
5840 		 * later in the probed mode list could be of higher
5841 		 * and preferred resolution. For example, 3840x2160
5842 		 * resolution in base EDID preferred timing and 4096x2160
5843 		 * preferred resolution in DID extension block later.
5844 		 */
5845 		drm_mode_sort(&connector->probed_modes);
5846 		amdgpu_dm_get_native_mode(connector);
5847 	} else {
5848 		amdgpu_dm_connector->num_modes = 0;
5849 	}
5850 }
5851 
5852 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5853 {
5854 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5855 			to_amdgpu_dm_connector(connector);
5856 	struct drm_encoder *encoder;
5857 	struct edid *edid = amdgpu_dm_connector->edid;
5858 
5859 	encoder = amdgpu_dm_connector_to_encoder(connector);
5860 
5861 	if (!edid || !drm_edid_is_valid(edid)) {
5862 		amdgpu_dm_connector->num_modes =
5863 				drm_add_modes_noedid(connector, 640, 480);
5864 	} else {
5865 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5866 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5867 	}
5868 	amdgpu_dm_fbc_init(connector);
5869 
5870 	return amdgpu_dm_connector->num_modes;
5871 }
5872 
5873 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5874 				     struct amdgpu_dm_connector *aconnector,
5875 				     int connector_type,
5876 				     struct dc_link *link,
5877 				     int link_index)
5878 {
5879 	struct amdgpu_device *adev = dm->ddev->dev_private;
5880 
5881 	/*
5882 	 * Some of the properties below require access to state, like bpc.
5883 	 * Allocate some default initial connector state with our reset helper.
5884 	 */
5885 	if (aconnector->base.funcs->reset)
5886 		aconnector->base.funcs->reset(&aconnector->base);
5887 
5888 	aconnector->connector_id = link_index;
5889 	aconnector->dc_link = link;
5890 	aconnector->base.interlace_allowed = false;
5891 	aconnector->base.doublescan_allowed = false;
5892 	aconnector->base.stereo_allowed = false;
5893 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5894 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5895 	aconnector->audio_inst = -1;
5896 	mutex_init(&aconnector->hpd_lock);
5897 
5898 	/*
5899 	 * configure support HPD hot plug connector_>polled default value is 0
5900 	 * which means HPD hot plug not supported
5901 	 */
5902 	switch (connector_type) {
5903 	case DRM_MODE_CONNECTOR_HDMIA:
5904 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5905 		aconnector->base.ycbcr_420_allowed =
5906 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5907 		break;
5908 	case DRM_MODE_CONNECTOR_DisplayPort:
5909 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5910 		aconnector->base.ycbcr_420_allowed =
5911 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5912 		break;
5913 	case DRM_MODE_CONNECTOR_DVID:
5914 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5915 		break;
5916 	default:
5917 		break;
5918 	}
5919 
5920 	drm_object_attach_property(&aconnector->base.base,
5921 				dm->ddev->mode_config.scaling_mode_property,
5922 				DRM_MODE_SCALE_NONE);
5923 
5924 	drm_object_attach_property(&aconnector->base.base,
5925 				adev->mode_info.underscan_property,
5926 				UNDERSCAN_OFF);
5927 	drm_object_attach_property(&aconnector->base.base,
5928 				adev->mode_info.underscan_hborder_property,
5929 				0);
5930 	drm_object_attach_property(&aconnector->base.base,
5931 				adev->mode_info.underscan_vborder_property,
5932 				0);
5933 
5934 	if (!aconnector->mst_port)
5935 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5936 
5937 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5938 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5939 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5940 
5941 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5942 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5943 		drm_object_attach_property(&aconnector->base.base,
5944 				adev->mode_info.abm_level_property, 0);
5945 	}
5946 
5947 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5948 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5949 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5950 		drm_object_attach_property(
5951 			&aconnector->base.base,
5952 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5953 
5954 		if (!aconnector->mst_port)
5955 			drm_connector_attach_vrr_capable_property(&aconnector->base);
5956 
5957 #ifdef CONFIG_DRM_AMD_DC_HDCP
5958 		if (adev->dm.hdcp_workqueue)
5959 			drm_connector_attach_content_protection_property(&aconnector->base, true);
5960 #endif
5961 	}
5962 }
5963 
5964 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5965 			      struct i2c_msg *msgs, int num)
5966 {
5967 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5968 	struct ddc_service *ddc_service = i2c->ddc_service;
5969 	struct i2c_command cmd;
5970 	int i;
5971 	int result = -EIO;
5972 
5973 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5974 
5975 	if (!cmd.payloads)
5976 		return result;
5977 
5978 	cmd.number_of_payloads = num;
5979 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5980 	cmd.speed = 100;
5981 
5982 	for (i = 0; i < num; i++) {
5983 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5984 		cmd.payloads[i].address = msgs[i].addr;
5985 		cmd.payloads[i].length = msgs[i].len;
5986 		cmd.payloads[i].data = msgs[i].buf;
5987 	}
5988 
5989 	if (dc_submit_i2c(
5990 			ddc_service->ctx->dc,
5991 			ddc_service->ddc_pin->hw_info.ddc_channel,
5992 			&cmd))
5993 		result = num;
5994 
5995 	kfree(cmd.payloads);
5996 	return result;
5997 }
5998 
5999 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6000 {
6001 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6002 }
6003 
6004 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6005 	.master_xfer = amdgpu_dm_i2c_xfer,
6006 	.functionality = amdgpu_dm_i2c_func,
6007 };
6008 
6009 static struct amdgpu_i2c_adapter *
6010 create_i2c(struct ddc_service *ddc_service,
6011 	   int link_index,
6012 	   int *res)
6013 {
6014 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6015 	struct amdgpu_i2c_adapter *i2c;
6016 
6017 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6018 	if (!i2c)
6019 		return NULL;
6020 	i2c->base.owner = THIS_MODULE;
6021 	i2c->base.class = I2C_CLASS_DDC;
6022 	i2c->base.dev.parent = &adev->pdev->dev;
6023 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6024 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6025 	i2c_set_adapdata(&i2c->base, i2c);
6026 	i2c->ddc_service = ddc_service;
6027 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6028 
6029 	return i2c;
6030 }
6031 
6032 
6033 /*
6034  * Note: this function assumes that dc_link_detect() was called for the
6035  * dc_link which will be represented by this aconnector.
6036  */
6037 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6038 				    struct amdgpu_dm_connector *aconnector,
6039 				    uint32_t link_index,
6040 				    struct amdgpu_encoder *aencoder)
6041 {
6042 	int res = 0;
6043 	int connector_type;
6044 	struct dc *dc = dm->dc;
6045 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6046 	struct amdgpu_i2c_adapter *i2c;
6047 
6048 	link->priv = aconnector;
6049 
6050 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6051 
6052 	i2c = create_i2c(link->ddc, link->link_index, &res);
6053 	if (!i2c) {
6054 		DRM_ERROR("Failed to create i2c adapter data\n");
6055 		return -ENOMEM;
6056 	}
6057 
6058 	aconnector->i2c = i2c;
6059 	res = i2c_add_adapter(&i2c->base);
6060 
6061 	if (res) {
6062 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6063 		goto out_free;
6064 	}
6065 
6066 	connector_type = to_drm_connector_type(link->connector_signal);
6067 
6068 	res = drm_connector_init_with_ddc(
6069 			dm->ddev,
6070 			&aconnector->base,
6071 			&amdgpu_dm_connector_funcs,
6072 			connector_type,
6073 			&i2c->base);
6074 
6075 	if (res) {
6076 		DRM_ERROR("connector_init failed\n");
6077 		aconnector->connector_id = -1;
6078 		goto out_free;
6079 	}
6080 
6081 	drm_connector_helper_add(
6082 			&aconnector->base,
6083 			&amdgpu_dm_connector_helper_funcs);
6084 
6085 	amdgpu_dm_connector_init_helper(
6086 		dm,
6087 		aconnector,
6088 		connector_type,
6089 		link,
6090 		link_index);
6091 
6092 	drm_connector_attach_encoder(
6093 		&aconnector->base, &aencoder->base);
6094 
6095 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6096 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6097 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6098 
6099 out_free:
6100 	if (res) {
6101 		kfree(i2c);
6102 		aconnector->i2c = NULL;
6103 	}
6104 	return res;
6105 }
6106 
6107 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6108 {
6109 	switch (adev->mode_info.num_crtc) {
6110 	case 1:
6111 		return 0x1;
6112 	case 2:
6113 		return 0x3;
6114 	case 3:
6115 		return 0x7;
6116 	case 4:
6117 		return 0xf;
6118 	case 5:
6119 		return 0x1f;
6120 	case 6:
6121 	default:
6122 		return 0x3f;
6123 	}
6124 }
6125 
6126 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6127 				  struct amdgpu_encoder *aencoder,
6128 				  uint32_t link_index)
6129 {
6130 	struct amdgpu_device *adev = dev->dev_private;
6131 
6132 	int res = drm_encoder_init(dev,
6133 				   &aencoder->base,
6134 				   &amdgpu_dm_encoder_funcs,
6135 				   DRM_MODE_ENCODER_TMDS,
6136 				   NULL);
6137 
6138 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6139 
6140 	if (!res)
6141 		aencoder->encoder_id = link_index;
6142 	else
6143 		aencoder->encoder_id = -1;
6144 
6145 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6146 
6147 	return res;
6148 }
6149 
6150 static void manage_dm_interrupts(struct amdgpu_device *adev,
6151 				 struct amdgpu_crtc *acrtc,
6152 				 bool enable)
6153 {
6154 	/*
6155 	 * this is not correct translation but will work as soon as VBLANK
6156 	 * constant is the same as PFLIP
6157 	 */
6158 	int irq_type =
6159 		amdgpu_display_crtc_idx_to_irq_type(
6160 			adev,
6161 			acrtc->crtc_id);
6162 
6163 	if (enable) {
6164 		drm_crtc_vblank_on(&acrtc->base);
6165 		amdgpu_irq_get(
6166 			adev,
6167 			&adev->pageflip_irq,
6168 			irq_type);
6169 	} else {
6170 
6171 		amdgpu_irq_put(
6172 			adev,
6173 			&adev->pageflip_irq,
6174 			irq_type);
6175 		drm_crtc_vblank_off(&acrtc->base);
6176 	}
6177 }
6178 
6179 static bool
6180 is_scaling_state_different(const struct dm_connector_state *dm_state,
6181 			   const struct dm_connector_state *old_dm_state)
6182 {
6183 	if (dm_state->scaling != old_dm_state->scaling)
6184 		return true;
6185 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6186 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6187 			return true;
6188 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6189 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6190 			return true;
6191 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6192 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6193 		return true;
6194 	return false;
6195 }
6196 
6197 #ifdef CONFIG_DRM_AMD_DC_HDCP
6198 static bool is_content_protection_different(struct drm_connector_state *state,
6199 					    const struct drm_connector_state *old_state,
6200 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6201 {
6202 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6203 
6204 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6205 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6206 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6207 		return true;
6208 	}
6209 
6210 	/* CP is being re enabled, ignore this */
6211 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6212 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6213 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6214 		return false;
6215 	}
6216 
6217 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6218 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6219 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6220 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6221 
6222 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6223 	 * hot-plug, headless s3, dpms
6224 	 */
6225 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6226 	    aconnector->dc_sink != NULL)
6227 		return true;
6228 
6229 	if (old_state->content_protection == state->content_protection)
6230 		return false;
6231 
6232 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6233 		return true;
6234 
6235 	return false;
6236 }
6237 
6238 #endif
6239 static void remove_stream(struct amdgpu_device *adev,
6240 			  struct amdgpu_crtc *acrtc,
6241 			  struct dc_stream_state *stream)
6242 {
6243 	/* this is the update mode case */
6244 
6245 	acrtc->otg_inst = -1;
6246 	acrtc->enabled = false;
6247 }
6248 
6249 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6250 			       struct dc_cursor_position *position)
6251 {
6252 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6253 	int x, y;
6254 	int xorigin = 0, yorigin = 0;
6255 
6256 	position->enable = false;
6257 	position->x = 0;
6258 	position->y = 0;
6259 
6260 	if (!crtc || !plane->state->fb)
6261 		return 0;
6262 
6263 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6264 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6265 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6266 			  __func__,
6267 			  plane->state->crtc_w,
6268 			  plane->state->crtc_h);
6269 		return -EINVAL;
6270 	}
6271 
6272 	x = plane->state->crtc_x;
6273 	y = plane->state->crtc_y;
6274 
6275 	if (x <= -amdgpu_crtc->max_cursor_width ||
6276 	    y <= -amdgpu_crtc->max_cursor_height)
6277 		return 0;
6278 
6279 	if (x < 0) {
6280 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6281 		x = 0;
6282 	}
6283 	if (y < 0) {
6284 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6285 		y = 0;
6286 	}
6287 	position->enable = true;
6288 	position->translate_by_source = true;
6289 	position->x = x;
6290 	position->y = y;
6291 	position->x_hotspot = xorigin;
6292 	position->y_hotspot = yorigin;
6293 
6294 	return 0;
6295 }
6296 
6297 static void handle_cursor_update(struct drm_plane *plane,
6298 				 struct drm_plane_state *old_plane_state)
6299 {
6300 	struct amdgpu_device *adev = plane->dev->dev_private;
6301 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6302 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6303 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6304 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6305 	uint64_t address = afb ? afb->address : 0;
6306 	struct dc_cursor_position position;
6307 	struct dc_cursor_attributes attributes;
6308 	int ret;
6309 
6310 	if (!plane->state->fb && !old_plane_state->fb)
6311 		return;
6312 
6313 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6314 			 __func__,
6315 			 amdgpu_crtc->crtc_id,
6316 			 plane->state->crtc_w,
6317 			 plane->state->crtc_h);
6318 
6319 	ret = get_cursor_position(plane, crtc, &position);
6320 	if (ret)
6321 		return;
6322 
6323 	if (!position.enable) {
6324 		/* turn off cursor */
6325 		if (crtc_state && crtc_state->stream) {
6326 			mutex_lock(&adev->dm.dc_lock);
6327 			dc_stream_set_cursor_position(crtc_state->stream,
6328 						      &position);
6329 			mutex_unlock(&adev->dm.dc_lock);
6330 		}
6331 		return;
6332 	}
6333 
6334 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6335 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6336 
6337 	memset(&attributes, 0, sizeof(attributes));
6338 	attributes.address.high_part = upper_32_bits(address);
6339 	attributes.address.low_part  = lower_32_bits(address);
6340 	attributes.width             = plane->state->crtc_w;
6341 	attributes.height            = plane->state->crtc_h;
6342 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6343 	attributes.rotation_angle    = 0;
6344 	attributes.attribute_flags.value = 0;
6345 
6346 	attributes.pitch = attributes.width;
6347 
6348 	if (crtc_state->stream) {
6349 		mutex_lock(&adev->dm.dc_lock);
6350 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6351 							 &attributes))
6352 			DRM_ERROR("DC failed to set cursor attributes\n");
6353 
6354 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6355 						   &position))
6356 			DRM_ERROR("DC failed to set cursor position\n");
6357 		mutex_unlock(&adev->dm.dc_lock);
6358 	}
6359 }
6360 
6361 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6362 {
6363 
6364 	assert_spin_locked(&acrtc->base.dev->event_lock);
6365 	WARN_ON(acrtc->event);
6366 
6367 	acrtc->event = acrtc->base.state->event;
6368 
6369 	/* Set the flip status */
6370 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6371 
6372 	/* Mark this event as consumed */
6373 	acrtc->base.state->event = NULL;
6374 
6375 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6376 						 acrtc->crtc_id);
6377 }
6378 
6379 static void update_freesync_state_on_stream(
6380 	struct amdgpu_display_manager *dm,
6381 	struct dm_crtc_state *new_crtc_state,
6382 	struct dc_stream_state *new_stream,
6383 	struct dc_plane_state *surface,
6384 	u32 flip_timestamp_in_us)
6385 {
6386 	struct mod_vrr_params vrr_params;
6387 	struct dc_info_packet vrr_infopacket = {0};
6388 	struct amdgpu_device *adev = dm->adev;
6389 	unsigned long flags;
6390 
6391 	if (!new_stream)
6392 		return;
6393 
6394 	/*
6395 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6396 	 * For now it's sufficient to just guard against these conditions.
6397 	 */
6398 
6399 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6400 		return;
6401 
6402 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6403 	vrr_params = new_crtc_state->vrr_params;
6404 
6405 	if (surface) {
6406 		mod_freesync_handle_preflip(
6407 			dm->freesync_module,
6408 			surface,
6409 			new_stream,
6410 			flip_timestamp_in_us,
6411 			&vrr_params);
6412 
6413 		if (adev->family < AMDGPU_FAMILY_AI &&
6414 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6415 			mod_freesync_handle_v_update(dm->freesync_module,
6416 						     new_stream, &vrr_params);
6417 
6418 			/* Need to call this before the frame ends. */
6419 			dc_stream_adjust_vmin_vmax(dm->dc,
6420 						   new_crtc_state->stream,
6421 						   &vrr_params.adjust);
6422 		}
6423 	}
6424 
6425 	mod_freesync_build_vrr_infopacket(
6426 		dm->freesync_module,
6427 		new_stream,
6428 		&vrr_params,
6429 		PACKET_TYPE_VRR,
6430 		TRANSFER_FUNC_UNKNOWN,
6431 		&vrr_infopacket);
6432 
6433 	new_crtc_state->freesync_timing_changed |=
6434 		(memcmp(&new_crtc_state->vrr_params.adjust,
6435 			&vrr_params.adjust,
6436 			sizeof(vrr_params.adjust)) != 0);
6437 
6438 	new_crtc_state->freesync_vrr_info_changed |=
6439 		(memcmp(&new_crtc_state->vrr_infopacket,
6440 			&vrr_infopacket,
6441 			sizeof(vrr_infopacket)) != 0);
6442 
6443 	new_crtc_state->vrr_params = vrr_params;
6444 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6445 
6446 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6447 	new_stream->vrr_infopacket = vrr_infopacket;
6448 
6449 	if (new_crtc_state->freesync_vrr_info_changed)
6450 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6451 			      new_crtc_state->base.crtc->base.id,
6452 			      (int)new_crtc_state->base.vrr_enabled,
6453 			      (int)vrr_params.state);
6454 
6455 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6456 }
6457 
6458 static void pre_update_freesync_state_on_stream(
6459 	struct amdgpu_display_manager *dm,
6460 	struct dm_crtc_state *new_crtc_state)
6461 {
6462 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6463 	struct mod_vrr_params vrr_params;
6464 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6465 	struct amdgpu_device *adev = dm->adev;
6466 	unsigned long flags;
6467 
6468 	if (!new_stream)
6469 		return;
6470 
6471 	/*
6472 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6473 	 * For now it's sufficient to just guard against these conditions.
6474 	 */
6475 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6476 		return;
6477 
6478 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6479 	vrr_params = new_crtc_state->vrr_params;
6480 
6481 	if (new_crtc_state->vrr_supported &&
6482 	    config.min_refresh_in_uhz &&
6483 	    config.max_refresh_in_uhz) {
6484 		config.state = new_crtc_state->base.vrr_enabled ?
6485 			VRR_STATE_ACTIVE_VARIABLE :
6486 			VRR_STATE_INACTIVE;
6487 	} else {
6488 		config.state = VRR_STATE_UNSUPPORTED;
6489 	}
6490 
6491 	mod_freesync_build_vrr_params(dm->freesync_module,
6492 				      new_stream,
6493 				      &config, &vrr_params);
6494 
6495 	new_crtc_state->freesync_timing_changed |=
6496 		(memcmp(&new_crtc_state->vrr_params.adjust,
6497 			&vrr_params.adjust,
6498 			sizeof(vrr_params.adjust)) != 0);
6499 
6500 	new_crtc_state->vrr_params = vrr_params;
6501 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6502 }
6503 
6504 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6505 					    struct dm_crtc_state *new_state)
6506 {
6507 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6508 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6509 
6510 	if (!old_vrr_active && new_vrr_active) {
6511 		/* Transition VRR inactive -> active:
6512 		 * While VRR is active, we must not disable vblank irq, as a
6513 		 * reenable after disable would compute bogus vblank/pflip
6514 		 * timestamps if it likely happened inside display front-porch.
6515 		 *
6516 		 * We also need vupdate irq for the actual core vblank handling
6517 		 * at end of vblank.
6518 		 */
6519 		dm_set_vupdate_irq(new_state->base.crtc, true);
6520 		drm_crtc_vblank_get(new_state->base.crtc);
6521 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6522 				 __func__, new_state->base.crtc->base.id);
6523 	} else if (old_vrr_active && !new_vrr_active) {
6524 		/* Transition VRR active -> inactive:
6525 		 * Allow vblank irq disable again for fixed refresh rate.
6526 		 */
6527 		dm_set_vupdate_irq(new_state->base.crtc, false);
6528 		drm_crtc_vblank_put(new_state->base.crtc);
6529 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6530 				 __func__, new_state->base.crtc->base.id);
6531 	}
6532 }
6533 
6534 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6535 {
6536 	struct drm_plane *plane;
6537 	struct drm_plane_state *old_plane_state, *new_plane_state;
6538 	int i;
6539 
6540 	/*
6541 	 * TODO: Make this per-stream so we don't issue redundant updates for
6542 	 * commits with multiple streams.
6543 	 */
6544 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6545 				       new_plane_state, i)
6546 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6547 			handle_cursor_update(plane, old_plane_state);
6548 }
6549 
6550 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6551 				    struct dc_state *dc_state,
6552 				    struct drm_device *dev,
6553 				    struct amdgpu_display_manager *dm,
6554 				    struct drm_crtc *pcrtc,
6555 				    bool wait_for_vblank)
6556 {
6557 	uint32_t i;
6558 	uint64_t timestamp_ns;
6559 	struct drm_plane *plane;
6560 	struct drm_plane_state *old_plane_state, *new_plane_state;
6561 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6562 	struct drm_crtc_state *new_pcrtc_state =
6563 			drm_atomic_get_new_crtc_state(state, pcrtc);
6564 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6565 	struct dm_crtc_state *dm_old_crtc_state =
6566 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6567 	int planes_count = 0, vpos, hpos;
6568 	long r;
6569 	unsigned long flags;
6570 	struct amdgpu_bo *abo;
6571 	uint64_t tiling_flags;
6572 	uint32_t target_vblank, last_flip_vblank;
6573 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6574 	bool pflip_present = false;
6575 	struct {
6576 		struct dc_surface_update surface_updates[MAX_SURFACES];
6577 		struct dc_plane_info plane_infos[MAX_SURFACES];
6578 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6579 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6580 		struct dc_stream_update stream_update;
6581 	} *bundle;
6582 
6583 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6584 
6585 	if (!bundle) {
6586 		dm_error("Failed to allocate update bundle\n");
6587 		goto cleanup;
6588 	}
6589 
6590 	/*
6591 	 * Disable the cursor first if we're disabling all the planes.
6592 	 * It'll remain on the screen after the planes are re-enabled
6593 	 * if we don't.
6594 	 */
6595 	if (acrtc_state->active_planes == 0)
6596 		amdgpu_dm_commit_cursors(state);
6597 
6598 	/* update planes when needed */
6599 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6600 		struct drm_crtc *crtc = new_plane_state->crtc;
6601 		struct drm_crtc_state *new_crtc_state;
6602 		struct drm_framebuffer *fb = new_plane_state->fb;
6603 		bool plane_needs_flip;
6604 		struct dc_plane_state *dc_plane;
6605 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6606 
6607 		/* Cursor plane is handled after stream updates */
6608 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6609 			continue;
6610 
6611 		if (!fb || !crtc || pcrtc != crtc)
6612 			continue;
6613 
6614 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6615 		if (!new_crtc_state->active)
6616 			continue;
6617 
6618 		dc_plane = dm_new_plane_state->dc_state;
6619 
6620 		bundle->surface_updates[planes_count].surface = dc_plane;
6621 		if (new_pcrtc_state->color_mgmt_changed) {
6622 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6623 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6624 		}
6625 
6626 		fill_dc_scaling_info(new_plane_state,
6627 				     &bundle->scaling_infos[planes_count]);
6628 
6629 		bundle->surface_updates[planes_count].scaling_info =
6630 			&bundle->scaling_infos[planes_count];
6631 
6632 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6633 
6634 		pflip_present = pflip_present || plane_needs_flip;
6635 
6636 		if (!plane_needs_flip) {
6637 			planes_count += 1;
6638 			continue;
6639 		}
6640 
6641 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6642 
6643 		/*
6644 		 * Wait for all fences on this FB. Do limited wait to avoid
6645 		 * deadlock during GPU reset when this fence will not signal
6646 		 * but we hold reservation lock for the BO.
6647 		 */
6648 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6649 							false,
6650 							msecs_to_jiffies(5000));
6651 		if (unlikely(r <= 0))
6652 			DRM_ERROR("Waiting for fences timed out!");
6653 
6654 		/*
6655 		 * TODO This might fail and hence better not used, wait
6656 		 * explicitly on fences instead
6657 		 * and in general should be called for
6658 		 * blocking commit to as per framework helpers
6659 		 */
6660 		r = amdgpu_bo_reserve(abo, true);
6661 		if (unlikely(r != 0))
6662 			DRM_ERROR("failed to reserve buffer before flip\n");
6663 
6664 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6665 
6666 		amdgpu_bo_unreserve(abo);
6667 
6668 		fill_dc_plane_info_and_addr(
6669 			dm->adev, new_plane_state, tiling_flags,
6670 			&bundle->plane_infos[planes_count],
6671 			&bundle->flip_addrs[planes_count].address,
6672 			false);
6673 
6674 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6675 				 new_plane_state->plane->index,
6676 				 bundle->plane_infos[planes_count].dcc.enable);
6677 
6678 		bundle->surface_updates[planes_count].plane_info =
6679 			&bundle->plane_infos[planes_count];
6680 
6681 		/*
6682 		 * Only allow immediate flips for fast updates that don't
6683 		 * change FB pitch, DCC state, rotation or mirroing.
6684 		 */
6685 		bundle->flip_addrs[planes_count].flip_immediate =
6686 			crtc->state->async_flip &&
6687 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6688 
6689 		timestamp_ns = ktime_get_ns();
6690 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6691 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6692 		bundle->surface_updates[planes_count].surface = dc_plane;
6693 
6694 		if (!bundle->surface_updates[planes_count].surface) {
6695 			DRM_ERROR("No surface for CRTC: id=%d\n",
6696 					acrtc_attach->crtc_id);
6697 			continue;
6698 		}
6699 
6700 		if (plane == pcrtc->primary)
6701 			update_freesync_state_on_stream(
6702 				dm,
6703 				acrtc_state,
6704 				acrtc_state->stream,
6705 				dc_plane,
6706 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6707 
6708 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6709 				 __func__,
6710 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6711 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6712 
6713 		planes_count += 1;
6714 
6715 	}
6716 
6717 	if (pflip_present) {
6718 		if (!vrr_active) {
6719 			/* Use old throttling in non-vrr fixed refresh rate mode
6720 			 * to keep flip scheduling based on target vblank counts
6721 			 * working in a backwards compatible way, e.g., for
6722 			 * clients using the GLX_OML_sync_control extension or
6723 			 * DRI3/Present extension with defined target_msc.
6724 			 */
6725 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6726 		}
6727 		else {
6728 			/* For variable refresh rate mode only:
6729 			 * Get vblank of last completed flip to avoid > 1 vrr
6730 			 * flips per video frame by use of throttling, but allow
6731 			 * flip programming anywhere in the possibly large
6732 			 * variable vrr vblank interval for fine-grained flip
6733 			 * timing control and more opportunity to avoid stutter
6734 			 * on late submission of flips.
6735 			 */
6736 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6737 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6738 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6739 		}
6740 
6741 		target_vblank = last_flip_vblank + wait_for_vblank;
6742 
6743 		/*
6744 		 * Wait until we're out of the vertical blank period before the one
6745 		 * targeted by the flip
6746 		 */
6747 		while ((acrtc_attach->enabled &&
6748 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6749 							    0, &vpos, &hpos, NULL,
6750 							    NULL, &pcrtc->hwmode)
6751 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6752 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6753 			(int)(target_vblank -
6754 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6755 			usleep_range(1000, 1100);
6756 		}
6757 
6758 		if (acrtc_attach->base.state->event) {
6759 			drm_crtc_vblank_get(pcrtc);
6760 
6761 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6762 
6763 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6764 			prepare_flip_isr(acrtc_attach);
6765 
6766 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6767 		}
6768 
6769 		if (acrtc_state->stream) {
6770 			if (acrtc_state->freesync_vrr_info_changed)
6771 				bundle->stream_update.vrr_infopacket =
6772 					&acrtc_state->stream->vrr_infopacket;
6773 		}
6774 	}
6775 
6776 	/* Update the planes if changed or disable if we don't have any. */
6777 	if ((planes_count || acrtc_state->active_planes == 0) &&
6778 		acrtc_state->stream) {
6779 		bundle->stream_update.stream = acrtc_state->stream;
6780 		if (new_pcrtc_state->mode_changed) {
6781 			bundle->stream_update.src = acrtc_state->stream->src;
6782 			bundle->stream_update.dst = acrtc_state->stream->dst;
6783 		}
6784 
6785 		if (new_pcrtc_state->color_mgmt_changed) {
6786 			/*
6787 			 * TODO: This isn't fully correct since we've actually
6788 			 * already modified the stream in place.
6789 			 */
6790 			bundle->stream_update.gamut_remap =
6791 				&acrtc_state->stream->gamut_remap_matrix;
6792 			bundle->stream_update.output_csc_transform =
6793 				&acrtc_state->stream->csc_color_matrix;
6794 			bundle->stream_update.out_transfer_func =
6795 				acrtc_state->stream->out_transfer_func;
6796 		}
6797 
6798 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6799 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6800 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6801 
6802 		/*
6803 		 * If FreeSync state on the stream has changed then we need to
6804 		 * re-adjust the min/max bounds now that DC doesn't handle this
6805 		 * as part of commit.
6806 		 */
6807 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6808 		    amdgpu_dm_vrr_active(acrtc_state)) {
6809 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6810 			dc_stream_adjust_vmin_vmax(
6811 				dm->dc, acrtc_state->stream,
6812 				&acrtc_state->vrr_params.adjust);
6813 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6814 		}
6815 		mutex_lock(&dm->dc_lock);
6816 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6817 				acrtc_state->stream->link->psr_allow_active)
6818 			amdgpu_dm_psr_disable(acrtc_state->stream);
6819 
6820 		dc_commit_updates_for_stream(dm->dc,
6821 						     bundle->surface_updates,
6822 						     planes_count,
6823 						     acrtc_state->stream,
6824 						     &bundle->stream_update,
6825 						     dc_state);
6826 
6827 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6828 						acrtc_state->stream->psr_version &&
6829 						!acrtc_state->stream->link->psr_feature_enabled)
6830 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6831 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6832 						acrtc_state->stream->link->psr_feature_enabled &&
6833 						!acrtc_state->stream->link->psr_allow_active) {
6834 			amdgpu_dm_psr_enable(acrtc_state->stream);
6835 		}
6836 
6837 		mutex_unlock(&dm->dc_lock);
6838 	}
6839 
6840 	/*
6841 	 * Update cursor state *after* programming all the planes.
6842 	 * This avoids redundant programming in the case where we're going
6843 	 * to be disabling a single plane - those pipes are being disabled.
6844 	 */
6845 	if (acrtc_state->active_planes)
6846 		amdgpu_dm_commit_cursors(state);
6847 
6848 cleanup:
6849 	kfree(bundle);
6850 }
6851 
6852 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6853 				   struct drm_atomic_state *state)
6854 {
6855 	struct amdgpu_device *adev = dev->dev_private;
6856 	struct amdgpu_dm_connector *aconnector;
6857 	struct drm_connector *connector;
6858 	struct drm_connector_state *old_con_state, *new_con_state;
6859 	struct drm_crtc_state *new_crtc_state;
6860 	struct dm_crtc_state *new_dm_crtc_state;
6861 	const struct dc_stream_status *status;
6862 	int i, inst;
6863 
6864 	/* Notify device removals. */
6865 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6866 		if (old_con_state->crtc != new_con_state->crtc) {
6867 			/* CRTC changes require notification. */
6868 			goto notify;
6869 		}
6870 
6871 		if (!new_con_state->crtc)
6872 			continue;
6873 
6874 		new_crtc_state = drm_atomic_get_new_crtc_state(
6875 			state, new_con_state->crtc);
6876 
6877 		if (!new_crtc_state)
6878 			continue;
6879 
6880 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6881 			continue;
6882 
6883 	notify:
6884 		aconnector = to_amdgpu_dm_connector(connector);
6885 
6886 		mutex_lock(&adev->dm.audio_lock);
6887 		inst = aconnector->audio_inst;
6888 		aconnector->audio_inst = -1;
6889 		mutex_unlock(&adev->dm.audio_lock);
6890 
6891 		amdgpu_dm_audio_eld_notify(adev, inst);
6892 	}
6893 
6894 	/* Notify audio device additions. */
6895 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6896 		if (!new_con_state->crtc)
6897 			continue;
6898 
6899 		new_crtc_state = drm_atomic_get_new_crtc_state(
6900 			state, new_con_state->crtc);
6901 
6902 		if (!new_crtc_state)
6903 			continue;
6904 
6905 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6906 			continue;
6907 
6908 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6909 		if (!new_dm_crtc_state->stream)
6910 			continue;
6911 
6912 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6913 		if (!status)
6914 			continue;
6915 
6916 		aconnector = to_amdgpu_dm_connector(connector);
6917 
6918 		mutex_lock(&adev->dm.audio_lock);
6919 		inst = status->audio_inst;
6920 		aconnector->audio_inst = inst;
6921 		mutex_unlock(&adev->dm.audio_lock);
6922 
6923 		amdgpu_dm_audio_eld_notify(adev, inst);
6924 	}
6925 }
6926 
6927 /*
6928  * Enable interrupts on CRTCs that are newly active, undergone
6929  * a modeset, or have active planes again.
6930  *
6931  * Done in two passes, based on the for_modeset flag:
6932  * Pass 1: For CRTCs going through modeset
6933  * Pass 2: For CRTCs going from 0 to n active planes
6934  *
6935  * Interrupts can only be enabled after the planes are programmed,
6936  * so this requires a two-pass approach since we don't want to
6937  * just defer the interrupts until after commit planes every time.
6938  */
6939 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6940 					     struct drm_atomic_state *state,
6941 					     bool for_modeset)
6942 {
6943 	struct amdgpu_device *adev = dev->dev_private;
6944 	struct drm_crtc *crtc;
6945 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6946 	int i;
6947 #ifdef CONFIG_DEBUG_FS
6948 	enum amdgpu_dm_pipe_crc_source source;
6949 #endif
6950 
6951 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6952 				      new_crtc_state, i) {
6953 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6954 		struct dm_crtc_state *dm_new_crtc_state =
6955 			to_dm_crtc_state(new_crtc_state);
6956 		struct dm_crtc_state *dm_old_crtc_state =
6957 			to_dm_crtc_state(old_crtc_state);
6958 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6959 		bool run_pass;
6960 
6961 		run_pass = (for_modeset && modeset) ||
6962 			   (!for_modeset && !modeset &&
6963 			    !dm_old_crtc_state->interrupts_enabled);
6964 
6965 		if (!run_pass)
6966 			continue;
6967 
6968 		if (!dm_new_crtc_state->interrupts_enabled)
6969 			continue;
6970 
6971 		manage_dm_interrupts(adev, acrtc, true);
6972 
6973 #ifdef CONFIG_DEBUG_FS
6974 		/* The stream has changed so CRC capture needs to re-enabled. */
6975 		source = dm_new_crtc_state->crc_src;
6976 		if (amdgpu_dm_is_valid_crc_source(source)) {
6977 			amdgpu_dm_crtc_configure_crc_source(
6978 				crtc, dm_new_crtc_state,
6979 				dm_new_crtc_state->crc_src);
6980 		}
6981 #endif
6982 	}
6983 }
6984 
6985 /*
6986  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6987  * @crtc_state: the DRM CRTC state
6988  * @stream_state: the DC stream state.
6989  *
6990  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6991  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6992  */
6993 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6994 						struct dc_stream_state *stream_state)
6995 {
6996 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6997 }
6998 
6999 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7000 				   struct drm_atomic_state *state,
7001 				   bool nonblock)
7002 {
7003 	struct drm_crtc *crtc;
7004 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7005 	struct amdgpu_device *adev = dev->dev_private;
7006 	int i;
7007 
7008 	/*
7009 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7010 	 * a modeset, being disabled, or have no active planes.
7011 	 *
7012 	 * It's done in atomic commit rather than commit tail for now since
7013 	 * some of these interrupt handlers access the current CRTC state and
7014 	 * potentially the stream pointer itself.
7015 	 *
7016 	 * Since the atomic state is swapped within atomic commit and not within
7017 	 * commit tail this would leave to new state (that hasn't been committed yet)
7018 	 * being accesssed from within the handlers.
7019 	 *
7020 	 * TODO: Fix this so we can do this in commit tail and not have to block
7021 	 * in atomic check.
7022 	 */
7023 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7024 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7025 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7026 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7027 
7028 		if (dm_old_crtc_state->interrupts_enabled &&
7029 		    (!dm_new_crtc_state->interrupts_enabled ||
7030 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7031 			manage_dm_interrupts(adev, acrtc, false);
7032 	}
7033 	/*
7034 	 * Add check here for SoC's that support hardware cursor plane, to
7035 	 * unset legacy_cursor_update
7036 	 */
7037 
7038 	return drm_atomic_helper_commit(dev, state, nonblock);
7039 
7040 	/*TODO Handle EINTR, reenable IRQ*/
7041 }
7042 
7043 /**
7044  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7045  * @state: The atomic state to commit
7046  *
7047  * This will tell DC to commit the constructed DC state from atomic_check,
7048  * programming the hardware. Any failures here implies a hardware failure, since
7049  * atomic check should have filtered anything non-kosher.
7050  */
7051 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7052 {
7053 	struct drm_device *dev = state->dev;
7054 	struct amdgpu_device *adev = dev->dev_private;
7055 	struct amdgpu_display_manager *dm = &adev->dm;
7056 	struct dm_atomic_state *dm_state;
7057 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7058 	uint32_t i, j;
7059 	struct drm_crtc *crtc;
7060 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7061 	unsigned long flags;
7062 	bool wait_for_vblank = true;
7063 	struct drm_connector *connector;
7064 	struct drm_connector_state *old_con_state, *new_con_state;
7065 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7066 	int crtc_disable_count = 0;
7067 
7068 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7069 
7070 	dm_state = dm_atomic_get_new_state(state);
7071 	if (dm_state && dm_state->context) {
7072 		dc_state = dm_state->context;
7073 	} else {
7074 		/* No state changes, retain current state. */
7075 		dc_state_temp = dc_create_state(dm->dc);
7076 		ASSERT(dc_state_temp);
7077 		dc_state = dc_state_temp;
7078 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7079 	}
7080 
7081 	/* update changed items */
7082 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7083 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7084 
7085 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7086 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7087 
7088 		DRM_DEBUG_DRIVER(
7089 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7090 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7091 			"connectors_changed:%d\n",
7092 			acrtc->crtc_id,
7093 			new_crtc_state->enable,
7094 			new_crtc_state->active,
7095 			new_crtc_state->planes_changed,
7096 			new_crtc_state->mode_changed,
7097 			new_crtc_state->active_changed,
7098 			new_crtc_state->connectors_changed);
7099 
7100 		/* Copy all transient state flags into dc state */
7101 		if (dm_new_crtc_state->stream) {
7102 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7103 							    dm_new_crtc_state->stream);
7104 		}
7105 
7106 		/* handles headless hotplug case, updating new_state and
7107 		 * aconnector as needed
7108 		 */
7109 
7110 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7111 
7112 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7113 
7114 			if (!dm_new_crtc_state->stream) {
7115 				/*
7116 				 * this could happen because of issues with
7117 				 * userspace notifications delivery.
7118 				 * In this case userspace tries to set mode on
7119 				 * display which is disconnected in fact.
7120 				 * dc_sink is NULL in this case on aconnector.
7121 				 * We expect reset mode will come soon.
7122 				 *
7123 				 * This can also happen when unplug is done
7124 				 * during resume sequence ended
7125 				 *
7126 				 * In this case, we want to pretend we still
7127 				 * have a sink to keep the pipe running so that
7128 				 * hw state is consistent with the sw state
7129 				 */
7130 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7131 						__func__, acrtc->base.base.id);
7132 				continue;
7133 			}
7134 
7135 			if (dm_old_crtc_state->stream)
7136 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7137 
7138 			pm_runtime_get_noresume(dev->dev);
7139 
7140 			acrtc->enabled = true;
7141 			acrtc->hw_mode = new_crtc_state->mode;
7142 			crtc->hwmode = new_crtc_state->mode;
7143 		} else if (modereset_required(new_crtc_state)) {
7144 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7145 			/* i.e. reset mode */
7146 			if (dm_old_crtc_state->stream) {
7147 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7148 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7149 
7150 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7151 			}
7152 		}
7153 	} /* for_each_crtc_in_state() */
7154 
7155 	if (dc_state) {
7156 		dm_enable_per_frame_crtc_master_sync(dc_state);
7157 		mutex_lock(&dm->dc_lock);
7158 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7159 		mutex_unlock(&dm->dc_lock);
7160 	}
7161 
7162 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7163 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7164 
7165 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7166 
7167 		if (dm_new_crtc_state->stream != NULL) {
7168 			const struct dc_stream_status *status =
7169 					dc_stream_get_status(dm_new_crtc_state->stream);
7170 
7171 			if (!status)
7172 				status = dc_stream_get_status_from_state(dc_state,
7173 									 dm_new_crtc_state->stream);
7174 
7175 			if (!status)
7176 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7177 			else
7178 				acrtc->otg_inst = status->primary_otg_inst;
7179 		}
7180 	}
7181 #ifdef CONFIG_DRM_AMD_DC_HDCP
7182 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7183 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7184 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7185 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7186 
7187 		new_crtc_state = NULL;
7188 
7189 		if (acrtc)
7190 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7191 
7192 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7193 
7194 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7195 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7196 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7197 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7198 			continue;
7199 		}
7200 
7201 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7202 			hdcp_update_display(
7203 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7204 				new_con_state->hdcp_content_type,
7205 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7206 													 : false);
7207 	}
7208 #endif
7209 
7210 	/* Handle connector state changes */
7211 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7212 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7213 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7214 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7215 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7216 		struct dc_stream_update stream_update;
7217 		struct dc_info_packet hdr_packet;
7218 		struct dc_stream_status *status = NULL;
7219 		bool abm_changed, hdr_changed, scaling_changed;
7220 
7221 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7222 		memset(&stream_update, 0, sizeof(stream_update));
7223 
7224 		if (acrtc) {
7225 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7226 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7227 		}
7228 
7229 		/* Skip any modesets/resets */
7230 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7231 			continue;
7232 
7233 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7234 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7235 
7236 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7237 							     dm_old_con_state);
7238 
7239 		abm_changed = dm_new_crtc_state->abm_level !=
7240 			      dm_old_crtc_state->abm_level;
7241 
7242 		hdr_changed =
7243 			is_hdr_metadata_different(old_con_state, new_con_state);
7244 
7245 		if (!scaling_changed && !abm_changed && !hdr_changed)
7246 			continue;
7247 
7248 		stream_update.stream = dm_new_crtc_state->stream;
7249 		if (scaling_changed) {
7250 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7251 					dm_new_con_state, dm_new_crtc_state->stream);
7252 
7253 			stream_update.src = dm_new_crtc_state->stream->src;
7254 			stream_update.dst = dm_new_crtc_state->stream->dst;
7255 		}
7256 
7257 		if (abm_changed) {
7258 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7259 
7260 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7261 		}
7262 
7263 		if (hdr_changed) {
7264 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7265 			stream_update.hdr_static_metadata = &hdr_packet;
7266 		}
7267 
7268 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7269 		WARN_ON(!status);
7270 		WARN_ON(!status->plane_count);
7271 
7272 		/*
7273 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7274 		 * Here we create an empty update on each plane.
7275 		 * To fix this, DC should permit updating only stream properties.
7276 		 */
7277 		for (j = 0; j < status->plane_count; j++)
7278 			dummy_updates[j].surface = status->plane_states[0];
7279 
7280 
7281 		mutex_lock(&dm->dc_lock);
7282 		dc_commit_updates_for_stream(dm->dc,
7283 						     dummy_updates,
7284 						     status->plane_count,
7285 						     dm_new_crtc_state->stream,
7286 						     &stream_update,
7287 						     dc_state);
7288 		mutex_unlock(&dm->dc_lock);
7289 	}
7290 
7291 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7292 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7293 				      new_crtc_state, i) {
7294 		if (old_crtc_state->active && !new_crtc_state->active)
7295 			crtc_disable_count++;
7296 
7297 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7298 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7299 
7300 		/* Update freesync active state. */
7301 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7302 
7303 		/* Handle vrr on->off / off->on transitions */
7304 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7305 						dm_new_crtc_state);
7306 	}
7307 
7308 	/* Enable interrupts for CRTCs going through a modeset. */
7309 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7310 
7311 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7312 		if (new_crtc_state->async_flip)
7313 			wait_for_vblank = false;
7314 
7315 	/* update planes when needed per crtc*/
7316 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7317 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7318 
7319 		if (dm_new_crtc_state->stream)
7320 			amdgpu_dm_commit_planes(state, dc_state, dev,
7321 						dm, crtc, wait_for_vblank);
7322 	}
7323 
7324 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7325 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7326 
7327 	/* Update audio instances for each connector. */
7328 	amdgpu_dm_commit_audio(dev, state);
7329 
7330 	/*
7331 	 * send vblank event on all events not handled in flip and
7332 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7333 	 */
7334 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7335 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7336 
7337 		if (new_crtc_state->event)
7338 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7339 
7340 		new_crtc_state->event = NULL;
7341 	}
7342 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7343 
7344 	/* Signal HW programming completion */
7345 	drm_atomic_helper_commit_hw_done(state);
7346 
7347 	if (wait_for_vblank)
7348 		drm_atomic_helper_wait_for_flip_done(dev, state);
7349 
7350 	drm_atomic_helper_cleanup_planes(dev, state);
7351 
7352 	/*
7353 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7354 	 * so we can put the GPU into runtime suspend if we're not driving any
7355 	 * displays anymore
7356 	 */
7357 	for (i = 0; i < crtc_disable_count; i++)
7358 		pm_runtime_put_autosuspend(dev->dev);
7359 	pm_runtime_mark_last_busy(dev->dev);
7360 
7361 	if (dc_state_temp)
7362 		dc_release_state(dc_state_temp);
7363 }
7364 
7365 
7366 static int dm_force_atomic_commit(struct drm_connector *connector)
7367 {
7368 	int ret = 0;
7369 	struct drm_device *ddev = connector->dev;
7370 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7371 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7372 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7373 	struct drm_connector_state *conn_state;
7374 	struct drm_crtc_state *crtc_state;
7375 	struct drm_plane_state *plane_state;
7376 
7377 	if (!state)
7378 		return -ENOMEM;
7379 
7380 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7381 
7382 	/* Construct an atomic state to restore previous display setting */
7383 
7384 	/*
7385 	 * Attach connectors to drm_atomic_state
7386 	 */
7387 	conn_state = drm_atomic_get_connector_state(state, connector);
7388 
7389 	ret = PTR_ERR_OR_ZERO(conn_state);
7390 	if (ret)
7391 		goto err;
7392 
7393 	/* Attach crtc to drm_atomic_state*/
7394 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7395 
7396 	ret = PTR_ERR_OR_ZERO(crtc_state);
7397 	if (ret)
7398 		goto err;
7399 
7400 	/* force a restore */
7401 	crtc_state->mode_changed = true;
7402 
7403 	/* Attach plane to drm_atomic_state */
7404 	plane_state = drm_atomic_get_plane_state(state, plane);
7405 
7406 	ret = PTR_ERR_OR_ZERO(plane_state);
7407 	if (ret)
7408 		goto err;
7409 
7410 
7411 	/* Call commit internally with the state we just constructed */
7412 	ret = drm_atomic_commit(state);
7413 	if (!ret)
7414 		return 0;
7415 
7416 err:
7417 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7418 	drm_atomic_state_put(state);
7419 
7420 	return ret;
7421 }
7422 
7423 /*
7424  * This function handles all cases when set mode does not come upon hotplug.
7425  * This includes when a display is unplugged then plugged back into the
7426  * same port and when running without usermode desktop manager supprot
7427  */
7428 void dm_restore_drm_connector_state(struct drm_device *dev,
7429 				    struct drm_connector *connector)
7430 {
7431 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7432 	struct amdgpu_crtc *disconnected_acrtc;
7433 	struct dm_crtc_state *acrtc_state;
7434 
7435 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7436 		return;
7437 
7438 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7439 	if (!disconnected_acrtc)
7440 		return;
7441 
7442 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7443 	if (!acrtc_state->stream)
7444 		return;
7445 
7446 	/*
7447 	 * If the previous sink is not released and different from the current,
7448 	 * we deduce we are in a state where we can not rely on usermode call
7449 	 * to turn on the display, so we do it here
7450 	 */
7451 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7452 		dm_force_atomic_commit(&aconnector->base);
7453 }
7454 
7455 /*
7456  * Grabs all modesetting locks to serialize against any blocking commits,
7457  * Waits for completion of all non blocking commits.
7458  */
7459 static int do_aquire_global_lock(struct drm_device *dev,
7460 				 struct drm_atomic_state *state)
7461 {
7462 	struct drm_crtc *crtc;
7463 	struct drm_crtc_commit *commit;
7464 	long ret;
7465 
7466 	/*
7467 	 * Adding all modeset locks to aquire_ctx will
7468 	 * ensure that when the framework release it the
7469 	 * extra locks we are locking here will get released to
7470 	 */
7471 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7472 	if (ret)
7473 		return ret;
7474 
7475 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7476 		spin_lock(&crtc->commit_lock);
7477 		commit = list_first_entry_or_null(&crtc->commit_list,
7478 				struct drm_crtc_commit, commit_entry);
7479 		if (commit)
7480 			drm_crtc_commit_get(commit);
7481 		spin_unlock(&crtc->commit_lock);
7482 
7483 		if (!commit)
7484 			continue;
7485 
7486 		/*
7487 		 * Make sure all pending HW programming completed and
7488 		 * page flips done
7489 		 */
7490 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7491 
7492 		if (ret > 0)
7493 			ret = wait_for_completion_interruptible_timeout(
7494 					&commit->flip_done, 10*HZ);
7495 
7496 		if (ret == 0)
7497 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7498 				  "timed out\n", crtc->base.id, crtc->name);
7499 
7500 		drm_crtc_commit_put(commit);
7501 	}
7502 
7503 	return ret < 0 ? ret : 0;
7504 }
7505 
7506 static void get_freesync_config_for_crtc(
7507 	struct dm_crtc_state *new_crtc_state,
7508 	struct dm_connector_state *new_con_state)
7509 {
7510 	struct mod_freesync_config config = {0};
7511 	struct amdgpu_dm_connector *aconnector =
7512 			to_amdgpu_dm_connector(new_con_state->base.connector);
7513 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7514 	int vrefresh = drm_mode_vrefresh(mode);
7515 
7516 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7517 					vrefresh >= aconnector->min_vfreq &&
7518 					vrefresh <= aconnector->max_vfreq;
7519 
7520 	if (new_crtc_state->vrr_supported) {
7521 		new_crtc_state->stream->ignore_msa_timing_param = true;
7522 		config.state = new_crtc_state->base.vrr_enabled ?
7523 				VRR_STATE_ACTIVE_VARIABLE :
7524 				VRR_STATE_INACTIVE;
7525 		config.min_refresh_in_uhz =
7526 				aconnector->min_vfreq * 1000000;
7527 		config.max_refresh_in_uhz =
7528 				aconnector->max_vfreq * 1000000;
7529 		config.vsif_supported = true;
7530 		config.btr = true;
7531 	}
7532 
7533 	new_crtc_state->freesync_config = config;
7534 }
7535 
7536 static void reset_freesync_config_for_crtc(
7537 	struct dm_crtc_state *new_crtc_state)
7538 {
7539 	new_crtc_state->vrr_supported = false;
7540 
7541 	memset(&new_crtc_state->vrr_params, 0,
7542 	       sizeof(new_crtc_state->vrr_params));
7543 	memset(&new_crtc_state->vrr_infopacket, 0,
7544 	       sizeof(new_crtc_state->vrr_infopacket));
7545 }
7546 
7547 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7548 				struct drm_atomic_state *state,
7549 				struct drm_crtc *crtc,
7550 				struct drm_crtc_state *old_crtc_state,
7551 				struct drm_crtc_state *new_crtc_state,
7552 				bool enable,
7553 				bool *lock_and_validation_needed)
7554 {
7555 	struct dm_atomic_state *dm_state = NULL;
7556 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7557 	struct dc_stream_state *new_stream;
7558 	int ret = 0;
7559 
7560 	/*
7561 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7562 	 * update changed items
7563 	 */
7564 	struct amdgpu_crtc *acrtc = NULL;
7565 	struct amdgpu_dm_connector *aconnector = NULL;
7566 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7567 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7568 
7569 	new_stream = NULL;
7570 
7571 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7572 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7573 	acrtc = to_amdgpu_crtc(crtc);
7574 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7575 
7576 	/* TODO This hack should go away */
7577 	if (aconnector && enable) {
7578 		/* Make sure fake sink is created in plug-in scenario */
7579 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7580 							    &aconnector->base);
7581 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7582 							    &aconnector->base);
7583 
7584 		if (IS_ERR(drm_new_conn_state)) {
7585 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7586 			goto fail;
7587 		}
7588 
7589 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7590 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7591 
7592 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7593 			goto skip_modeset;
7594 
7595 		new_stream = create_stream_for_sink(aconnector,
7596 						     &new_crtc_state->mode,
7597 						    dm_new_conn_state,
7598 						    dm_old_crtc_state->stream);
7599 
7600 		/*
7601 		 * we can have no stream on ACTION_SET if a display
7602 		 * was disconnected during S3, in this case it is not an
7603 		 * error, the OS will be updated after detection, and
7604 		 * will do the right thing on next atomic commit
7605 		 */
7606 
7607 		if (!new_stream) {
7608 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7609 					__func__, acrtc->base.base.id);
7610 			ret = -ENOMEM;
7611 			goto fail;
7612 		}
7613 
7614 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7615 
7616 		ret = fill_hdr_info_packet(drm_new_conn_state,
7617 					   &new_stream->hdr_static_metadata);
7618 		if (ret)
7619 			goto fail;
7620 
7621 		/*
7622 		 * If we already removed the old stream from the context
7623 		 * (and set the new stream to NULL) then we can't reuse
7624 		 * the old stream even if the stream and scaling are unchanged.
7625 		 * We'll hit the BUG_ON and black screen.
7626 		 *
7627 		 * TODO: Refactor this function to allow this check to work
7628 		 * in all conditions.
7629 		 */
7630 		if (dm_new_crtc_state->stream &&
7631 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7632 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7633 			new_crtc_state->mode_changed = false;
7634 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7635 					 new_crtc_state->mode_changed);
7636 		}
7637 	}
7638 
7639 	/* mode_changed flag may get updated above, need to check again */
7640 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7641 		goto skip_modeset;
7642 
7643 	DRM_DEBUG_DRIVER(
7644 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7645 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7646 		"connectors_changed:%d\n",
7647 		acrtc->crtc_id,
7648 		new_crtc_state->enable,
7649 		new_crtc_state->active,
7650 		new_crtc_state->planes_changed,
7651 		new_crtc_state->mode_changed,
7652 		new_crtc_state->active_changed,
7653 		new_crtc_state->connectors_changed);
7654 
7655 	/* Remove stream for any changed/disabled CRTC */
7656 	if (!enable) {
7657 
7658 		if (!dm_old_crtc_state->stream)
7659 			goto skip_modeset;
7660 
7661 		ret = dm_atomic_get_state(state, &dm_state);
7662 		if (ret)
7663 			goto fail;
7664 
7665 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7666 				crtc->base.id);
7667 
7668 		/* i.e. reset mode */
7669 		if (dc_remove_stream_from_ctx(
7670 				dm->dc,
7671 				dm_state->context,
7672 				dm_old_crtc_state->stream) != DC_OK) {
7673 			ret = -EINVAL;
7674 			goto fail;
7675 		}
7676 
7677 		dc_stream_release(dm_old_crtc_state->stream);
7678 		dm_new_crtc_state->stream = NULL;
7679 
7680 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7681 
7682 		*lock_and_validation_needed = true;
7683 
7684 	} else {/* Add stream for any updated/enabled CRTC */
7685 		/*
7686 		 * Quick fix to prevent NULL pointer on new_stream when
7687 		 * added MST connectors not found in existing crtc_state in the chained mode
7688 		 * TODO: need to dig out the root cause of that
7689 		 */
7690 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7691 			goto skip_modeset;
7692 
7693 		if (modereset_required(new_crtc_state))
7694 			goto skip_modeset;
7695 
7696 		if (modeset_required(new_crtc_state, new_stream,
7697 				     dm_old_crtc_state->stream)) {
7698 
7699 			WARN_ON(dm_new_crtc_state->stream);
7700 
7701 			ret = dm_atomic_get_state(state, &dm_state);
7702 			if (ret)
7703 				goto fail;
7704 
7705 			dm_new_crtc_state->stream = new_stream;
7706 
7707 			dc_stream_retain(new_stream);
7708 
7709 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7710 						crtc->base.id);
7711 
7712 			if (dc_add_stream_to_ctx(
7713 					dm->dc,
7714 					dm_state->context,
7715 					dm_new_crtc_state->stream) != DC_OK) {
7716 				ret = -EINVAL;
7717 				goto fail;
7718 			}
7719 
7720 			*lock_and_validation_needed = true;
7721 		}
7722 	}
7723 
7724 skip_modeset:
7725 	/* Release extra reference */
7726 	if (new_stream)
7727 		 dc_stream_release(new_stream);
7728 
7729 	/*
7730 	 * We want to do dc stream updates that do not require a
7731 	 * full modeset below.
7732 	 */
7733 	if (!(enable && aconnector && new_crtc_state->enable &&
7734 	      new_crtc_state->active))
7735 		return 0;
7736 	/*
7737 	 * Given above conditions, the dc state cannot be NULL because:
7738 	 * 1. We're in the process of enabling CRTCs (just been added
7739 	 *    to the dc context, or already is on the context)
7740 	 * 2. Has a valid connector attached, and
7741 	 * 3. Is currently active and enabled.
7742 	 * => The dc stream state currently exists.
7743 	 */
7744 	BUG_ON(dm_new_crtc_state->stream == NULL);
7745 
7746 	/* Scaling or underscan settings */
7747 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7748 		update_stream_scaling_settings(
7749 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7750 
7751 	/* ABM settings */
7752 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7753 
7754 	/*
7755 	 * Color management settings. We also update color properties
7756 	 * when a modeset is needed, to ensure it gets reprogrammed.
7757 	 */
7758 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7759 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7760 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7761 		if (ret)
7762 			goto fail;
7763 	}
7764 
7765 	/* Update Freesync settings. */
7766 	get_freesync_config_for_crtc(dm_new_crtc_state,
7767 				     dm_new_conn_state);
7768 
7769 	return ret;
7770 
7771 fail:
7772 	if (new_stream)
7773 		dc_stream_release(new_stream);
7774 	return ret;
7775 }
7776 
7777 static bool should_reset_plane(struct drm_atomic_state *state,
7778 			       struct drm_plane *plane,
7779 			       struct drm_plane_state *old_plane_state,
7780 			       struct drm_plane_state *new_plane_state)
7781 {
7782 	struct drm_plane *other;
7783 	struct drm_plane_state *old_other_state, *new_other_state;
7784 	struct drm_crtc_state *new_crtc_state;
7785 	int i;
7786 
7787 	/*
7788 	 * TODO: Remove this hack once the checks below are sufficient
7789 	 * enough to determine when we need to reset all the planes on
7790 	 * the stream.
7791 	 */
7792 	if (state->allow_modeset)
7793 		return true;
7794 
7795 	/* Exit early if we know that we're adding or removing the plane. */
7796 	if (old_plane_state->crtc != new_plane_state->crtc)
7797 		return true;
7798 
7799 	/* old crtc == new_crtc == NULL, plane not in context. */
7800 	if (!new_plane_state->crtc)
7801 		return false;
7802 
7803 	new_crtc_state =
7804 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7805 
7806 	if (!new_crtc_state)
7807 		return true;
7808 
7809 	/* CRTC Degamma changes currently require us to recreate planes. */
7810 	if (new_crtc_state->color_mgmt_changed)
7811 		return true;
7812 
7813 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7814 		return true;
7815 
7816 	/*
7817 	 * If there are any new primary or overlay planes being added or
7818 	 * removed then the z-order can potentially change. To ensure
7819 	 * correct z-order and pipe acquisition the current DC architecture
7820 	 * requires us to remove and recreate all existing planes.
7821 	 *
7822 	 * TODO: Come up with a more elegant solution for this.
7823 	 */
7824 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7825 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7826 			continue;
7827 
7828 		if (old_other_state->crtc != new_plane_state->crtc &&
7829 		    new_other_state->crtc != new_plane_state->crtc)
7830 			continue;
7831 
7832 		if (old_other_state->crtc != new_other_state->crtc)
7833 			return true;
7834 
7835 		/* TODO: Remove this once we can handle fast format changes. */
7836 		if (old_other_state->fb && new_other_state->fb &&
7837 		    old_other_state->fb->format != new_other_state->fb->format)
7838 			return true;
7839 	}
7840 
7841 	return false;
7842 }
7843 
7844 static int dm_update_plane_state(struct dc *dc,
7845 				 struct drm_atomic_state *state,
7846 				 struct drm_plane *plane,
7847 				 struct drm_plane_state *old_plane_state,
7848 				 struct drm_plane_state *new_plane_state,
7849 				 bool enable,
7850 				 bool *lock_and_validation_needed)
7851 {
7852 
7853 	struct dm_atomic_state *dm_state = NULL;
7854 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7855 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7856 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7857 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7858 	struct amdgpu_crtc *new_acrtc;
7859 	bool needs_reset;
7860 	int ret = 0;
7861 
7862 
7863 	new_plane_crtc = new_plane_state->crtc;
7864 	old_plane_crtc = old_plane_state->crtc;
7865 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7866 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7867 
7868 	/*TODO Implement better atomic check for cursor plane */
7869 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7870 		if (!enable || !new_plane_crtc ||
7871 			drm_atomic_plane_disabling(plane->state, new_plane_state))
7872 			return 0;
7873 
7874 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7875 
7876 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7877 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7878 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7879 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
7880 			return -EINVAL;
7881 		}
7882 
7883 		return 0;
7884 	}
7885 
7886 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7887 					 new_plane_state);
7888 
7889 	/* Remove any changed/removed planes */
7890 	if (!enable) {
7891 		if (!needs_reset)
7892 			return 0;
7893 
7894 		if (!old_plane_crtc)
7895 			return 0;
7896 
7897 		old_crtc_state = drm_atomic_get_old_crtc_state(
7898 				state, old_plane_crtc);
7899 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7900 
7901 		if (!dm_old_crtc_state->stream)
7902 			return 0;
7903 
7904 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7905 				plane->base.id, old_plane_crtc->base.id);
7906 
7907 		ret = dm_atomic_get_state(state, &dm_state);
7908 		if (ret)
7909 			return ret;
7910 
7911 		if (!dc_remove_plane_from_context(
7912 				dc,
7913 				dm_old_crtc_state->stream,
7914 				dm_old_plane_state->dc_state,
7915 				dm_state->context)) {
7916 
7917 			ret = EINVAL;
7918 			return ret;
7919 		}
7920 
7921 
7922 		dc_plane_state_release(dm_old_plane_state->dc_state);
7923 		dm_new_plane_state->dc_state = NULL;
7924 
7925 		*lock_and_validation_needed = true;
7926 
7927 	} else { /* Add new planes */
7928 		struct dc_plane_state *dc_new_plane_state;
7929 
7930 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7931 			return 0;
7932 
7933 		if (!new_plane_crtc)
7934 			return 0;
7935 
7936 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7937 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7938 
7939 		if (!dm_new_crtc_state->stream)
7940 			return 0;
7941 
7942 		if (!needs_reset)
7943 			return 0;
7944 
7945 		WARN_ON(dm_new_plane_state->dc_state);
7946 
7947 		dc_new_plane_state = dc_create_plane_state(dc);
7948 		if (!dc_new_plane_state)
7949 			return -ENOMEM;
7950 
7951 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7952 				plane->base.id, new_plane_crtc->base.id);
7953 
7954 		ret = fill_dc_plane_attributes(
7955 			new_plane_crtc->dev->dev_private,
7956 			dc_new_plane_state,
7957 			new_plane_state,
7958 			new_crtc_state);
7959 		if (ret) {
7960 			dc_plane_state_release(dc_new_plane_state);
7961 			return ret;
7962 		}
7963 
7964 		ret = dm_atomic_get_state(state, &dm_state);
7965 		if (ret) {
7966 			dc_plane_state_release(dc_new_plane_state);
7967 			return ret;
7968 		}
7969 
7970 		/*
7971 		 * Any atomic check errors that occur after this will
7972 		 * not need a release. The plane state will be attached
7973 		 * to the stream, and therefore part of the atomic
7974 		 * state. It'll be released when the atomic state is
7975 		 * cleaned.
7976 		 */
7977 		if (!dc_add_plane_to_context(
7978 				dc,
7979 				dm_new_crtc_state->stream,
7980 				dc_new_plane_state,
7981 				dm_state->context)) {
7982 
7983 			dc_plane_state_release(dc_new_plane_state);
7984 			return -EINVAL;
7985 		}
7986 
7987 		dm_new_plane_state->dc_state = dc_new_plane_state;
7988 
7989 		/* Tell DC to do a full surface update every time there
7990 		 * is a plane change. Inefficient, but works for now.
7991 		 */
7992 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7993 
7994 		*lock_and_validation_needed = true;
7995 	}
7996 
7997 
7998 	return ret;
7999 }
8000 
8001 static int
8002 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8003 				    struct drm_atomic_state *state,
8004 				    enum surface_update_type *out_type)
8005 {
8006 	struct dc *dc = dm->dc;
8007 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8008 	int i, j, num_plane, ret = 0;
8009 	struct drm_plane_state *old_plane_state, *new_plane_state;
8010 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8011 	struct drm_crtc *new_plane_crtc;
8012 	struct drm_plane *plane;
8013 
8014 	struct drm_crtc *crtc;
8015 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8016 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8017 	struct dc_stream_status *status = NULL;
8018 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8019 	struct surface_info_bundle {
8020 		struct dc_surface_update surface_updates[MAX_SURFACES];
8021 		struct dc_plane_info plane_infos[MAX_SURFACES];
8022 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8023 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8024 		struct dc_stream_update stream_update;
8025 	} *bundle;
8026 
8027 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8028 
8029 	if (!bundle) {
8030 		DRM_ERROR("Failed to allocate update bundle\n");
8031 		/* Set type to FULL to avoid crashing in DC*/
8032 		update_type = UPDATE_TYPE_FULL;
8033 		goto cleanup;
8034 	}
8035 
8036 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8037 
8038 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8039 
8040 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8041 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8042 		num_plane = 0;
8043 
8044 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8045 			update_type = UPDATE_TYPE_FULL;
8046 			goto cleanup;
8047 		}
8048 
8049 		if (!new_dm_crtc_state->stream)
8050 			continue;
8051 
8052 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8053 			const struct amdgpu_framebuffer *amdgpu_fb =
8054 				to_amdgpu_framebuffer(new_plane_state->fb);
8055 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8056 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8057 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8058 			uint64_t tiling_flags;
8059 
8060 			new_plane_crtc = new_plane_state->crtc;
8061 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8062 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8063 
8064 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8065 				continue;
8066 
8067 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8068 				update_type = UPDATE_TYPE_FULL;
8069 				goto cleanup;
8070 			}
8071 
8072 			if (crtc != new_plane_crtc)
8073 				continue;
8074 
8075 			bundle->surface_updates[num_plane].surface =
8076 					new_dm_plane_state->dc_state;
8077 
8078 			if (new_crtc_state->mode_changed) {
8079 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8080 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8081 			}
8082 
8083 			if (new_crtc_state->color_mgmt_changed) {
8084 				bundle->surface_updates[num_plane].gamma =
8085 						new_dm_plane_state->dc_state->gamma_correction;
8086 				bundle->surface_updates[num_plane].in_transfer_func =
8087 						new_dm_plane_state->dc_state->in_transfer_func;
8088 				bundle->stream_update.gamut_remap =
8089 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8090 				bundle->stream_update.output_csc_transform =
8091 						&new_dm_crtc_state->stream->csc_color_matrix;
8092 				bundle->stream_update.out_transfer_func =
8093 						new_dm_crtc_state->stream->out_transfer_func;
8094 			}
8095 
8096 			ret = fill_dc_scaling_info(new_plane_state,
8097 						   scaling_info);
8098 			if (ret)
8099 				goto cleanup;
8100 
8101 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8102 
8103 			if (amdgpu_fb) {
8104 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8105 				if (ret)
8106 					goto cleanup;
8107 
8108 				ret = fill_dc_plane_info_and_addr(
8109 					dm->adev, new_plane_state, tiling_flags,
8110 					plane_info,
8111 					&flip_addr->address,
8112 					false);
8113 				if (ret)
8114 					goto cleanup;
8115 
8116 				bundle->surface_updates[num_plane].plane_info = plane_info;
8117 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8118 			}
8119 
8120 			num_plane++;
8121 		}
8122 
8123 		if (num_plane == 0)
8124 			continue;
8125 
8126 		ret = dm_atomic_get_state(state, &dm_state);
8127 		if (ret)
8128 			goto cleanup;
8129 
8130 		old_dm_state = dm_atomic_get_old_state(state);
8131 		if (!old_dm_state) {
8132 			ret = -EINVAL;
8133 			goto cleanup;
8134 		}
8135 
8136 		status = dc_stream_get_status_from_state(old_dm_state->context,
8137 							 new_dm_crtc_state->stream);
8138 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8139 		/*
8140 		 * TODO: DC modifies the surface during this call so we need
8141 		 * to lock here - find a way to do this without locking.
8142 		 */
8143 		mutex_lock(&dm->dc_lock);
8144 		update_type = dc_check_update_surfaces_for_stream(
8145 				dc,	bundle->surface_updates, num_plane,
8146 				&bundle->stream_update, status);
8147 		mutex_unlock(&dm->dc_lock);
8148 
8149 		if (update_type > UPDATE_TYPE_MED) {
8150 			update_type = UPDATE_TYPE_FULL;
8151 			goto cleanup;
8152 		}
8153 	}
8154 
8155 cleanup:
8156 	kfree(bundle);
8157 
8158 	*out_type = update_type;
8159 	return ret;
8160 }
8161 
8162 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8163 {
8164 	struct drm_connector *connector;
8165 	struct drm_connector_state *conn_state;
8166 	struct amdgpu_dm_connector *aconnector = NULL;
8167 	int i;
8168 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8169 		if (conn_state->crtc != crtc)
8170 			continue;
8171 
8172 		aconnector = to_amdgpu_dm_connector(connector);
8173 		if (!aconnector->port || !aconnector->mst_port)
8174 			aconnector = NULL;
8175 		else
8176 			break;
8177 	}
8178 
8179 	if (!aconnector)
8180 		return 0;
8181 
8182 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8183 }
8184 
8185 /**
8186  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8187  * @dev: The DRM device
8188  * @state: The atomic state to commit
8189  *
8190  * Validate that the given atomic state is programmable by DC into hardware.
8191  * This involves constructing a &struct dc_state reflecting the new hardware
8192  * state we wish to commit, then querying DC to see if it is programmable. It's
8193  * important not to modify the existing DC state. Otherwise, atomic_check
8194  * may unexpectedly commit hardware changes.
8195  *
8196  * When validating the DC state, it's important that the right locks are
8197  * acquired. For full updates case which removes/adds/updates streams on one
8198  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8199  * that any such full update commit will wait for completion of any outstanding
8200  * flip using DRMs synchronization events. See
8201  * dm_determine_update_type_for_commit()
8202  *
8203  * Note that DM adds the affected connectors for all CRTCs in state, when that
8204  * might not seem necessary. This is because DC stream creation requires the
8205  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8206  * be possible but non-trivial - a possible TODO item.
8207  *
8208  * Return: -Error code if validation failed.
8209  */
8210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8211 				  struct drm_atomic_state *state)
8212 {
8213 	struct amdgpu_device *adev = dev->dev_private;
8214 	struct dm_atomic_state *dm_state = NULL;
8215 	struct dc *dc = adev->dm.dc;
8216 	struct drm_connector *connector;
8217 	struct drm_connector_state *old_con_state, *new_con_state;
8218 	struct drm_crtc *crtc;
8219 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8220 	struct drm_plane *plane;
8221 	struct drm_plane_state *old_plane_state, *new_plane_state;
8222 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8223 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8224 
8225 	int ret, i;
8226 
8227 	/*
8228 	 * This bool will be set for true for any modeset/reset
8229 	 * or plane update which implies non fast surface update.
8230 	 */
8231 	bool lock_and_validation_needed = false;
8232 
8233 	ret = drm_atomic_helper_check_modeset(dev, state);
8234 	if (ret)
8235 		goto fail;
8236 
8237 	if (adev->asic_type >= CHIP_NAVI10) {
8238 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8239 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8240 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8241 				if (ret)
8242 					goto fail;
8243 			}
8244 		}
8245 	}
8246 
8247 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8248 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8249 		    !new_crtc_state->color_mgmt_changed &&
8250 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8251 			continue;
8252 
8253 		if (!new_crtc_state->enable)
8254 			continue;
8255 
8256 		ret = drm_atomic_add_affected_connectors(state, crtc);
8257 		if (ret)
8258 			return ret;
8259 
8260 		ret = drm_atomic_add_affected_planes(state, crtc);
8261 		if (ret)
8262 			goto fail;
8263 	}
8264 
8265 	/*
8266 	 * Add all primary and overlay planes on the CRTC to the state
8267 	 * whenever a plane is enabled to maintain correct z-ordering
8268 	 * and to enable fast surface updates.
8269 	 */
8270 	drm_for_each_crtc(crtc, dev) {
8271 		bool modified = false;
8272 
8273 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8274 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8275 				continue;
8276 
8277 			if (new_plane_state->crtc == crtc ||
8278 			    old_plane_state->crtc == crtc) {
8279 				modified = true;
8280 				break;
8281 			}
8282 		}
8283 
8284 		if (!modified)
8285 			continue;
8286 
8287 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8288 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8289 				continue;
8290 
8291 			new_plane_state =
8292 				drm_atomic_get_plane_state(state, plane);
8293 
8294 			if (IS_ERR(new_plane_state)) {
8295 				ret = PTR_ERR(new_plane_state);
8296 				goto fail;
8297 			}
8298 		}
8299 	}
8300 
8301 	/* Remove exiting planes if they are modified */
8302 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8303 		ret = dm_update_plane_state(dc, state, plane,
8304 					    old_plane_state,
8305 					    new_plane_state,
8306 					    false,
8307 					    &lock_and_validation_needed);
8308 		if (ret)
8309 			goto fail;
8310 	}
8311 
8312 	/* Disable all crtcs which require disable */
8313 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8314 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8315 					   old_crtc_state,
8316 					   new_crtc_state,
8317 					   false,
8318 					   &lock_and_validation_needed);
8319 		if (ret)
8320 			goto fail;
8321 	}
8322 
8323 	/* Enable all crtcs which require enable */
8324 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8325 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8326 					   old_crtc_state,
8327 					   new_crtc_state,
8328 					   true,
8329 					   &lock_and_validation_needed);
8330 		if (ret)
8331 			goto fail;
8332 	}
8333 
8334 	/* Add new/modified planes */
8335 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8336 		ret = dm_update_plane_state(dc, state, plane,
8337 					    old_plane_state,
8338 					    new_plane_state,
8339 					    true,
8340 					    &lock_and_validation_needed);
8341 		if (ret)
8342 			goto fail;
8343 	}
8344 
8345 	/* Run this here since we want to validate the streams we created */
8346 	ret = drm_atomic_helper_check_planes(dev, state);
8347 	if (ret)
8348 		goto fail;
8349 
8350 	if (state->legacy_cursor_update) {
8351 		/*
8352 		 * This is a fast cursor update coming from the plane update
8353 		 * helper, check if it can be done asynchronously for better
8354 		 * performance.
8355 		 */
8356 		state->async_update =
8357 			!drm_atomic_helper_async_check(dev, state);
8358 
8359 		/*
8360 		 * Skip the remaining global validation if this is an async
8361 		 * update. Cursor updates can be done without affecting
8362 		 * state or bandwidth calcs and this avoids the performance
8363 		 * penalty of locking the private state object and
8364 		 * allocating a new dc_state.
8365 		 */
8366 		if (state->async_update)
8367 			return 0;
8368 	}
8369 
8370 	/* Check scaling and underscan changes*/
8371 	/* TODO Removed scaling changes validation due to inability to commit
8372 	 * new stream into context w\o causing full reset. Need to
8373 	 * decide how to handle.
8374 	 */
8375 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8376 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8377 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8378 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8379 
8380 		/* Skip any modesets/resets */
8381 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8382 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8383 			continue;
8384 
8385 		/* Skip any thing not scale or underscan changes */
8386 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8387 			continue;
8388 
8389 		overall_update_type = UPDATE_TYPE_FULL;
8390 		lock_and_validation_needed = true;
8391 	}
8392 
8393 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8394 	if (ret)
8395 		goto fail;
8396 
8397 	if (overall_update_type < update_type)
8398 		overall_update_type = update_type;
8399 
8400 	/*
8401 	 * lock_and_validation_needed was an old way to determine if we need to set
8402 	 * the global lock. Leaving it in to check if we broke any corner cases
8403 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8404 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8405 	 */
8406 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8407 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8408 
8409 	if (overall_update_type > UPDATE_TYPE_FAST) {
8410 		ret = dm_atomic_get_state(state, &dm_state);
8411 		if (ret)
8412 			goto fail;
8413 
8414 		ret = do_aquire_global_lock(dev, state);
8415 		if (ret)
8416 			goto fail;
8417 
8418 #if defined(CONFIG_DRM_AMD_DC_DCN)
8419 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8420 			goto fail;
8421 
8422 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8423 		if (ret)
8424 			goto fail;
8425 #endif
8426 
8427 		/*
8428 		 * Perform validation of MST topology in the state:
8429 		 * We need to perform MST atomic check before calling
8430 		 * dc_validate_global_state(), or there is a chance
8431 		 * to get stuck in an infinite loop and hang eventually.
8432 		 */
8433 		ret = drm_dp_mst_atomic_check(state);
8434 		if (ret)
8435 			goto fail;
8436 
8437 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8438 			ret = -EINVAL;
8439 			goto fail;
8440 		}
8441 	} else {
8442 		/*
8443 		 * The commit is a fast update. Fast updates shouldn't change
8444 		 * the DC context, affect global validation, and can have their
8445 		 * commit work done in parallel with other commits not touching
8446 		 * the same resource. If we have a new DC context as part of
8447 		 * the DM atomic state from validation we need to free it and
8448 		 * retain the existing one instead.
8449 		 */
8450 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8451 
8452 		new_dm_state = dm_atomic_get_new_state(state);
8453 		old_dm_state = dm_atomic_get_old_state(state);
8454 
8455 		if (new_dm_state && old_dm_state) {
8456 			if (new_dm_state->context)
8457 				dc_release_state(new_dm_state->context);
8458 
8459 			new_dm_state->context = old_dm_state->context;
8460 
8461 			if (old_dm_state->context)
8462 				dc_retain_state(old_dm_state->context);
8463 		}
8464 	}
8465 
8466 	/* Store the overall update type for use later in atomic check. */
8467 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8468 		struct dm_crtc_state *dm_new_crtc_state =
8469 			to_dm_crtc_state(new_crtc_state);
8470 
8471 		dm_new_crtc_state->update_type = (int)overall_update_type;
8472 	}
8473 
8474 	/* Must be success */
8475 	WARN_ON(ret);
8476 	return ret;
8477 
8478 fail:
8479 	if (ret == -EDEADLK)
8480 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8481 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8482 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8483 	else
8484 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8485 
8486 	return ret;
8487 }
8488 
8489 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8490 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8491 {
8492 	uint8_t dpcd_data;
8493 	bool capable = false;
8494 
8495 	if (amdgpu_dm_connector->dc_link &&
8496 		dm_helpers_dp_read_dpcd(
8497 				NULL,
8498 				amdgpu_dm_connector->dc_link,
8499 				DP_DOWN_STREAM_PORT_COUNT,
8500 				&dpcd_data,
8501 				sizeof(dpcd_data))) {
8502 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8503 	}
8504 
8505 	return capable;
8506 }
8507 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8508 					struct edid *edid)
8509 {
8510 	int i;
8511 	bool edid_check_required;
8512 	struct detailed_timing *timing;
8513 	struct detailed_non_pixel *data;
8514 	struct detailed_data_monitor_range *range;
8515 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8516 			to_amdgpu_dm_connector(connector);
8517 	struct dm_connector_state *dm_con_state = NULL;
8518 
8519 	struct drm_device *dev = connector->dev;
8520 	struct amdgpu_device *adev = dev->dev_private;
8521 	bool freesync_capable = false;
8522 
8523 	if (!connector->state) {
8524 		DRM_ERROR("%s - Connector has no state", __func__);
8525 		goto update;
8526 	}
8527 
8528 	if (!edid) {
8529 		dm_con_state = to_dm_connector_state(connector->state);
8530 
8531 		amdgpu_dm_connector->min_vfreq = 0;
8532 		amdgpu_dm_connector->max_vfreq = 0;
8533 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8534 
8535 		goto update;
8536 	}
8537 
8538 	dm_con_state = to_dm_connector_state(connector->state);
8539 
8540 	edid_check_required = false;
8541 	if (!amdgpu_dm_connector->dc_sink) {
8542 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8543 		goto update;
8544 	}
8545 	if (!adev->dm.freesync_module)
8546 		goto update;
8547 	/*
8548 	 * if edid non zero restrict freesync only for dp and edp
8549 	 */
8550 	if (edid) {
8551 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8552 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8553 			edid_check_required = is_dp_capable_without_timing_msa(
8554 						adev->dm.dc,
8555 						amdgpu_dm_connector);
8556 		}
8557 	}
8558 	if (edid_check_required == true && (edid->version > 1 ||
8559 	   (edid->version == 1 && edid->revision > 1))) {
8560 		for (i = 0; i < 4; i++) {
8561 
8562 			timing	= &edid->detailed_timings[i];
8563 			data	= &timing->data.other_data;
8564 			range	= &data->data.range;
8565 			/*
8566 			 * Check if monitor has continuous frequency mode
8567 			 */
8568 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8569 				continue;
8570 			/*
8571 			 * Check for flag range limits only. If flag == 1 then
8572 			 * no additional timing information provided.
8573 			 * Default GTF, GTF Secondary curve and CVT are not
8574 			 * supported
8575 			 */
8576 			if (range->flags != 1)
8577 				continue;
8578 
8579 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8580 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8581 			amdgpu_dm_connector->pixel_clock_mhz =
8582 				range->pixel_clock_mhz * 10;
8583 			break;
8584 		}
8585 
8586 		if (amdgpu_dm_connector->max_vfreq -
8587 		    amdgpu_dm_connector->min_vfreq > 10) {
8588 
8589 			freesync_capable = true;
8590 		}
8591 	}
8592 
8593 update:
8594 	if (dm_con_state)
8595 		dm_con_state->freesync_capable = freesync_capable;
8596 
8597 	if (connector->vrr_capable_property)
8598 		drm_connector_set_vrr_capable_property(connector,
8599 						       freesync_capable);
8600 }
8601 
8602 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8603 {
8604 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8605 
8606 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8607 		return;
8608 	if (link->type == dc_connection_none)
8609 		return;
8610 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8611 					dpcd_data, sizeof(dpcd_data))) {
8612 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8613 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8614 	}
8615 }
8616 
8617 /*
8618  * amdgpu_dm_link_setup_psr() - configure psr link
8619  * @stream: stream state
8620  *
8621  * Return: true if success
8622  */
8623 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8624 {
8625 	struct dc_link *link = NULL;
8626 	struct psr_config psr_config = {0};
8627 	struct psr_context psr_context = {0};
8628 	struct dc *dc = NULL;
8629 	bool ret = false;
8630 
8631 	if (stream == NULL)
8632 		return false;
8633 
8634 	link = stream->link;
8635 	dc = link->ctx->dc;
8636 
8637 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8638 
8639 	if (psr_config.psr_version > 0) {
8640 		psr_config.psr_exit_link_training_required = 0x1;
8641 		psr_config.psr_frame_capture_indication_req = 0;
8642 		psr_config.psr_rfb_setup_time = 0x37;
8643 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8644 		psr_config.allow_smu_optimizations = 0x0;
8645 
8646 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8647 
8648 	}
8649 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8650 
8651 	return ret;
8652 }
8653 
8654 /*
8655  * amdgpu_dm_psr_enable() - enable psr f/w
8656  * @stream: stream state
8657  *
8658  * Return: true if success
8659  */
8660 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8661 {
8662 	struct dc_link *link = stream->link;
8663 	unsigned int vsync_rate_hz = 0;
8664 	struct dc_static_screen_params params = {0};
8665 	/* Calculate number of static frames before generating interrupt to
8666 	 * enter PSR.
8667 	 */
8668 	// Init fail safe of 2 frames static
8669 	unsigned int num_frames_static = 2;
8670 
8671 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8672 
8673 	vsync_rate_hz = div64_u64(div64_u64((
8674 			stream->timing.pix_clk_100hz * 100),
8675 			stream->timing.v_total),
8676 			stream->timing.h_total);
8677 
8678 	/* Round up
8679 	 * Calculate number of frames such that at least 30 ms of time has
8680 	 * passed.
8681 	 */
8682 	if (vsync_rate_hz != 0) {
8683 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8684 		num_frames_static = (30000 / frame_time_microsec) + 1;
8685 	}
8686 
8687 	params.triggers.cursor_update = true;
8688 	params.triggers.overlay_update = true;
8689 	params.triggers.surface_update = true;
8690 	params.num_frames = num_frames_static;
8691 
8692 	dc_stream_set_static_screen_params(link->ctx->dc,
8693 					   &stream, 1,
8694 					   &params);
8695 
8696 	return dc_link_set_psr_allow_active(link, true, false);
8697 }
8698 
8699 /*
8700  * amdgpu_dm_psr_disable() - disable psr f/w
8701  * @stream:  stream state
8702  *
8703  * Return: true if success
8704  */
8705 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8706 {
8707 
8708 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8709 
8710 	return dc_link_set_psr_allow_active(stream->link, false, true);
8711 }
8712