1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 
459 	if (acrtc) {
460 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
461 
462 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463 			      acrtc->crtc_id,
464 			      amdgpu_dm_vrr_active(acrtc_state));
465 
466 		/* Core vblank handling at start of front-porch is only possible
467 		 * in non-vrr mode, as only there vblank timestamping will give
468 		 * valid results while done in front-porch. Otherwise defer it
469 		 * to dm_vupdate_high_irq after end of front-porch.
470 		 */
471 		if (!amdgpu_dm_vrr_active(acrtc_state))
472 			drm_crtc_handle_vblank(&acrtc->base);
473 
474 		/* Following stuff must happen at start of vblank, for crc
475 		 * computation and below-the-range btr support in vrr mode.
476 		 */
477 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478 
479 		if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 		    acrtc_state->vrr_params.supported &&
481 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 			spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 			mod_freesync_handle_v_update(
484 				adev->dm.freesync_module,
485 				acrtc_state->stream,
486 				&acrtc_state->vrr_params);
487 
488 			dc_stream_adjust_vmin_vmax(
489 				adev->dm.dc,
490 				acrtc_state->stream,
491 				&acrtc_state->vrr_params.adjust);
492 			spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493 		}
494 	}
495 }
496 
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514 	struct common_irq_params *irq_params = interrupt_params;
515 	struct amdgpu_device *adev = irq_params->adev;
516 	struct amdgpu_crtc *acrtc;
517 	struct dm_crtc_state *acrtc_state;
518 	unsigned long flags;
519 
520 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521 
522 	if (!acrtc)
523 		return;
524 
525 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
526 
527 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 			 amdgpu_dm_vrr_active(acrtc_state),
529 			 acrtc_state->active_planes);
530 
531 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 	drm_crtc_handle_vblank(&acrtc->base);
533 
534 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
535 
536 	if (acrtc_state->vrr_params.supported &&
537 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 		mod_freesync_handle_v_update(
539 		adev->dm.freesync_module,
540 		acrtc_state->stream,
541 		&acrtc_state->vrr_params);
542 
543 		dc_stream_adjust_vmin_vmax(
544 			adev->dm.dc,
545 			acrtc_state->stream,
546 			&acrtc_state->vrr_params.adjust);
547 	}
548 
549 	/*
550 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 	 * In that case, pageflip completion interrupts won't fire and pageflip
552 	 * completion events won't get delivered. Prevent this by sending
553 	 * pending pageflip events from here if a flip is still pending.
554 	 *
555 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 	 * avoid race conditions between flip programming and completion,
557 	 * which could cause too early flip completion events.
558 	 */
559 	if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 	    acrtc_state->active_planes == 0) {
561 		if (acrtc->event) {
562 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563 			acrtc->event = NULL;
564 			drm_crtc_vblank_put(&acrtc->base);
565 		}
566 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 	}
568 
569 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572 
573 static int dm_set_clockgating_state(void *handle,
574 		  enum amd_clockgating_state state)
575 {
576 	return 0;
577 }
578 
579 static int dm_set_powergating_state(void *handle,
580 		  enum amd_powergating_state state)
581 {
582 	return 0;
583 }
584 
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587 
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591 	struct drm_device *dev = connector->dev;
592 	struct amdgpu_device *adev = dev->dev_private;
593 	struct dm_comressor_info *compressor = &adev->dm.compressor;
594 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 	struct drm_display_mode *mode;
596 	unsigned long max_size = 0;
597 
598 	if (adev->dm.dc->fbc_compressor == NULL)
599 		return;
600 
601 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602 		return;
603 
604 	if (compressor->bo_ptr)
605 		return;
606 
607 
608 	list_for_each_entry(mode, &connector->modes, head) {
609 		if (max_size < mode->htotal * mode->vtotal)
610 			max_size = mode->htotal * mode->vtotal;
611 	}
612 
613 	if (max_size) {
614 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 			    &compressor->gpu_addr, &compressor->cpu_addr);
617 
618 		if (r)
619 			DRM_ERROR("DM: Failed to initialize FBC\n");
620 		else {
621 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623 		}
624 
625 	}
626 
627 }
628 
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 					  int pipe, bool *enabled,
631 					  unsigned char *buf, int max_bytes)
632 {
633 	struct drm_device *dev = dev_get_drvdata(kdev);
634 	struct amdgpu_device *adev = dev->dev_private;
635 	struct drm_connector *connector;
636 	struct drm_connector_list_iter conn_iter;
637 	struct amdgpu_dm_connector *aconnector;
638 	int ret = 0;
639 
640 	*enabled = false;
641 
642 	mutex_lock(&adev->dm.audio_lock);
643 
644 	drm_connector_list_iter_begin(dev, &conn_iter);
645 	drm_for_each_connector_iter(connector, &conn_iter) {
646 		aconnector = to_amdgpu_dm_connector(connector);
647 		if (aconnector->audio_inst != port)
648 			continue;
649 
650 		*enabled = true;
651 		ret = drm_eld_size(connector->eld);
652 		memcpy(buf, connector->eld, min(max_bytes, ret));
653 
654 		break;
655 	}
656 	drm_connector_list_iter_end(&conn_iter);
657 
658 	mutex_unlock(&adev->dm.audio_lock);
659 
660 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 
662 	return ret;
663 }
664 
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 	.get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668 
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 				       struct device *hda_kdev, void *data)
671 {
672 	struct drm_device *dev = dev_get_drvdata(kdev);
673 	struct amdgpu_device *adev = dev->dev_private;
674 	struct drm_audio_component *acomp = data;
675 
676 	acomp->ops = &amdgpu_dm_audio_component_ops;
677 	acomp->dev = kdev;
678 	adev->dm.audio_component = acomp;
679 
680 	return 0;
681 }
682 
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 					  struct device *hda_kdev, void *data)
685 {
686 	struct drm_device *dev = dev_get_drvdata(kdev);
687 	struct amdgpu_device *adev = dev->dev_private;
688 	struct drm_audio_component *acomp = data;
689 
690 	acomp->ops = NULL;
691 	acomp->dev = NULL;
692 	adev->dm.audio_component = NULL;
693 }
694 
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 	.bind	= amdgpu_dm_audio_component_bind,
697 	.unbind	= amdgpu_dm_audio_component_unbind,
698 };
699 
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702 	int i, ret;
703 
704 	if (!amdgpu_audio)
705 		return 0;
706 
707 	adev->mode_info.audio.enabled = true;
708 
709 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710 
711 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 		adev->mode_info.audio.pin[i].channels = -1;
713 		adev->mode_info.audio.pin[i].rate = -1;
714 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 		adev->mode_info.audio.pin[i].status_bits = 0;
716 		adev->mode_info.audio.pin[i].category_code = 0;
717 		adev->mode_info.audio.pin[i].connected = false;
718 		adev->mode_info.audio.pin[i].id =
719 			adev->dm.dc->res_pool->audios[i]->inst;
720 		adev->mode_info.audio.pin[i].offset = 0;
721 	}
722 
723 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 	if (ret < 0)
725 		return ret;
726 
727 	adev->dm.audio_registered = true;
728 
729 	return 0;
730 }
731 
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734 	if (!amdgpu_audio)
735 		return;
736 
737 	if (!adev->mode_info.audio.enabled)
738 		return;
739 
740 	if (adev->dm.audio_registered) {
741 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 		adev->dm.audio_registered = false;
743 	}
744 
745 	/* TODO: Disable audio? */
746 
747 	adev->mode_info.audio.enabled = false;
748 }
749 
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752 	struct drm_audio_component *acomp = adev->dm.audio_component;
753 
754 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756 
757 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 						 pin, -1);
759 	}
760 }
761 
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764 	const struct dmcub_firmware_header_v1_0 *hdr;
765 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 	struct abm *abm = adev->dm.dc->res_pool->abm;
770 	struct dmub_srv_hw_params hw_params;
771 	enum dmub_status status;
772 	const unsigned char *fw_inst_const, *fw_bss_data;
773 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 	bool has_hw_support;
775 
776 	if (!dmub_srv)
777 		/* DMUB isn't supported on the ASIC. */
778 		return 0;
779 
780 	if (!fb_info) {
781 		DRM_ERROR("No framebuffer info for DMUB service.\n");
782 		return -EINVAL;
783 	}
784 
785 	if (!dmub_fw) {
786 		/* Firmware required for DMUB support. */
787 		DRM_ERROR("No firmware provided for DMUB.\n");
788 		return -EINVAL;
789 	}
790 
791 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 	if (status != DMUB_STATUS_OK) {
793 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 		return -EINVAL;
795 	}
796 
797 	if (!has_hw_support) {
798 		DRM_INFO("DMUB unsupported on ASIC\n");
799 		return 0;
800 	}
801 
802 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803 
804 	fw_inst_const = dmub_fw->data +
805 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 			PSP_HEADER_BYTES;
807 
808 	fw_bss_data = dmub_fw->data +
809 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 		      le32_to_cpu(hdr->inst_const_bytes);
811 
812 	/* Copy firmware and bios info into FB memory. */
813 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815 
816 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817 
818 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 	 * amdgpu_ucode_init_single_fw will load dmub firmware
820 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 	 * will be done by dm_dmub_hw_init
822 	 */
823 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 				fw_inst_const_size);
826 	}
827 
828 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
829 	       fw_bss_data_size);
830 
831 	/* Copy firmware bios info into FB memory. */
832 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833 	       adev->bios_size);
834 
835 	/* Reset regions that need to be reset. */
836 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
841 
842 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
844 
845 	/* Initialize hardware. */
846 	memset(&hw_params, 0, sizeof(hw_params));
847 	hw_params.fb_base = adev->gmc.fb_start;
848 	hw_params.fb_offset = adev->gmc.aper_base;
849 
850 	/* backdoor load firmware and trigger dmub running */
851 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852 		hw_params.load_inst_const = true;
853 
854 	if (dmcu)
855 		hw_params.psp_version = dmcu->psp_version;
856 
857 	for (i = 0; i < fb_info->num_fb; ++i)
858 		hw_params.fb[i] = &fb_info->fb[i];
859 
860 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
861 	if (status != DMUB_STATUS_OK) {
862 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 		return -EINVAL;
864 	}
865 
866 	/* Wait for firmware load to finish. */
867 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868 	if (status != DMUB_STATUS_OK)
869 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
870 
871 	/* Init DMCU and ABM if available. */
872 	if (dmcu && abm) {
873 		dmcu->funcs->dmcu_init(dmcu);
874 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875 	}
876 
877 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878 	if (!adev->dm.dc->ctx->dmub_srv) {
879 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 		return -ENOMEM;
881 	}
882 
883 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884 		 adev->dm.dmcub_fw_version);
885 
886 	return 0;
887 }
888 
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
890 {
891 	struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893 	struct dc_callback_init init_params;
894 #endif
895 	int r;
896 
897 	adev->dm.ddev = adev->ddev;
898 	adev->dm.adev = adev;
899 
900 	/* Zero all the fields */
901 	memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903 	memset(&init_params, 0, sizeof(init_params));
904 #endif
905 
906 	mutex_init(&adev->dm.dc_lock);
907 	mutex_init(&adev->dm.audio_lock);
908 
909 	if(amdgpu_dm_irq_init(adev)) {
910 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
911 		goto error;
912 	}
913 
914 	init_data.asic_id.chip_family = adev->family;
915 
916 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
917 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
918 
919 	init_data.asic_id.vram_width = adev->gmc.vram_width;
920 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
921 	init_data.asic_id.atombios_base_address =
922 		adev->mode_info.atom_context->bios;
923 
924 	init_data.driver = adev;
925 
926 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
927 
928 	if (!adev->dm.cgs_device) {
929 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
930 		goto error;
931 	}
932 
933 	init_data.cgs_device = adev->dm.cgs_device;
934 
935 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
936 
937 	switch (adev->asic_type) {
938 	case CHIP_CARRIZO:
939 	case CHIP_STONEY:
940 	case CHIP_RAVEN:
941 	case CHIP_RENOIR:
942 		init_data.flags.gpu_vm_support = true;
943 		break;
944 	default:
945 		break;
946 	}
947 
948 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949 		init_data.flags.fbc_support = true;
950 
951 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952 		init_data.flags.multi_mon_pp_mclk_switch = true;
953 
954 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955 		init_data.flags.disable_fractional_pwm = true;
956 
957 	init_data.flags.power_down_display_on_boot = true;
958 
959 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
960 
961 	/* Display Core create. */
962 	adev->dm.dc = dc_create(&init_data);
963 
964 	if (adev->dm.dc) {
965 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
966 	} else {
967 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
968 		goto error;
969 	}
970 
971 	r = dm_dmub_hw_init(adev);
972 	if (r) {
973 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
974 		goto error;
975 	}
976 
977 	dc_hardware_init(adev->dm.dc);
978 
979 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980 	if (!adev->dm.freesync_module) {
981 		DRM_ERROR(
982 		"amdgpu: failed to initialize freesync_module.\n");
983 	} else
984 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985 				adev->dm.freesync_module);
986 
987 	amdgpu_dm_init_color_mod();
988 
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990 	if (adev->asic_type >= CHIP_RAVEN) {
991 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
992 
993 		if (!adev->dm.hdcp_workqueue)
994 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
995 		else
996 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
997 
998 		dc_init_callbacks(adev->dm.dc, &init_params);
999 	}
1000 #endif
1001 	if (amdgpu_dm_initialize_drm_device(adev)) {
1002 		DRM_ERROR(
1003 		"amdgpu: failed to initialize sw for display support.\n");
1004 		goto error;
1005 	}
1006 
1007 	/* Update the actual used number of crtc */
1008 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1009 
1010 	/* TODO: Add_display_info? */
1011 
1012 	/* TODO use dynamic cursor width */
1013 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1015 
1016 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1017 		DRM_ERROR(
1018 		"amdgpu: failed to initialize sw for display support.\n");
1019 		goto error;
1020 	}
1021 
1022 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1023 
1024 	return 0;
1025 error:
1026 	amdgpu_dm_fini(adev);
1027 
1028 	return -EINVAL;
1029 }
1030 
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1032 {
1033 	amdgpu_dm_audio_fini(adev);
1034 
1035 	amdgpu_dm_destroy_drm_device(&adev->dm);
1036 
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038 	if (adev->dm.hdcp_workqueue) {
1039 		hdcp_destroy(adev->dm.hdcp_workqueue);
1040 		adev->dm.hdcp_workqueue = NULL;
1041 	}
1042 
1043 	if (adev->dm.dc)
1044 		dc_deinit_callbacks(adev->dm.dc);
1045 #endif
1046 	if (adev->dm.dc->ctx->dmub_srv) {
1047 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048 		adev->dm.dc->ctx->dmub_srv = NULL;
1049 	}
1050 
1051 	if (adev->dm.dmub_bo)
1052 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053 				      &adev->dm.dmub_bo_gpu_addr,
1054 				      &adev->dm.dmub_bo_cpu_addr);
1055 
1056 	/* DC Destroy TODO: Replace destroy DAL */
1057 	if (adev->dm.dc)
1058 		dc_destroy(&adev->dm.dc);
1059 	/*
1060 	 * TODO: pageflip, vlank interrupt
1061 	 *
1062 	 * amdgpu_dm_irq_fini(adev);
1063 	 */
1064 
1065 	if (adev->dm.cgs_device) {
1066 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067 		adev->dm.cgs_device = NULL;
1068 	}
1069 	if (adev->dm.freesync_module) {
1070 		mod_freesync_destroy(adev->dm.freesync_module);
1071 		adev->dm.freesync_module = NULL;
1072 	}
1073 
1074 	mutex_destroy(&adev->dm.audio_lock);
1075 	mutex_destroy(&adev->dm.dc_lock);
1076 
1077 	return;
1078 }
1079 
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1081 {
1082 	const char *fw_name_dmcu = NULL;
1083 	int r;
1084 	const struct dmcu_firmware_header_v1_0 *hdr;
1085 
1086 	switch(adev->asic_type) {
1087 	case CHIP_BONAIRE:
1088 	case CHIP_HAWAII:
1089 	case CHIP_KAVERI:
1090 	case CHIP_KABINI:
1091 	case CHIP_MULLINS:
1092 	case CHIP_TONGA:
1093 	case CHIP_FIJI:
1094 	case CHIP_CARRIZO:
1095 	case CHIP_STONEY:
1096 	case CHIP_POLARIS11:
1097 	case CHIP_POLARIS10:
1098 	case CHIP_POLARIS12:
1099 	case CHIP_VEGAM:
1100 	case CHIP_VEGA10:
1101 	case CHIP_VEGA12:
1102 	case CHIP_VEGA20:
1103 	case CHIP_NAVI10:
1104 	case CHIP_NAVI14:
1105 	case CHIP_RENOIR:
1106 		return 0;
1107 	case CHIP_NAVI12:
1108 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1109 		break;
1110 	case CHIP_RAVEN:
1111 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1115 		else
1116 			return 0;
1117 		break;
1118 	default:
1119 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1125 		return 0;
1126 	}
1127 
1128 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1129 	if (r == -ENOENT) {
1130 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132 		adev->dm.fw_dmcu = NULL;
1133 		return 0;
1134 	}
1135 	if (r) {
1136 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1137 			fw_name_dmcu);
1138 		return r;
1139 	}
1140 
1141 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1142 	if (r) {
1143 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1144 			fw_name_dmcu);
1145 		release_firmware(adev->dm.fw_dmcu);
1146 		adev->dm.fw_dmcu = NULL;
1147 		return r;
1148 	}
1149 
1150 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153 	adev->firmware.fw_size +=
1154 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1155 
1156 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158 	adev->firmware.fw_size +=
1159 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1160 
1161 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1162 
1163 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1164 
1165 	return 0;
1166 }
1167 
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1169 {
1170 	struct amdgpu_device *adev = ctx;
1171 
1172 	return dm_read_reg(adev->dm.dc->ctx, address);
1173 }
1174 
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1176 				     uint32_t value)
1177 {
1178 	struct amdgpu_device *adev = ctx;
1179 
1180 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1181 }
1182 
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1184 {
1185 	struct dmub_srv_create_params create_params;
1186 	struct dmub_srv_region_params region_params;
1187 	struct dmub_srv_region_info region_info;
1188 	struct dmub_srv_fb_params fb_params;
1189 	struct dmub_srv_fb_info *fb_info;
1190 	struct dmub_srv *dmub_srv;
1191 	const struct dmcub_firmware_header_v1_0 *hdr;
1192 	const char *fw_name_dmub;
1193 	enum dmub_asic dmub_asic;
1194 	enum dmub_status status;
1195 	int r;
1196 
1197 	switch (adev->asic_type) {
1198 	case CHIP_RENOIR:
1199 		dmub_asic = DMUB_ASIC_DCN21;
1200 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1201 		break;
1202 
1203 	default:
1204 		/* ASIC doesn't support DMUB. */
1205 		return 0;
1206 	}
1207 
1208 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1209 	if (r) {
1210 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1211 		return 0;
1212 	}
1213 
1214 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1215 	if (r) {
1216 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1217 		return 0;
1218 	}
1219 
1220 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1221 
1222 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224 			AMDGPU_UCODE_ID_DMCUB;
1225 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1226 			adev->dm.dmub_fw;
1227 		adev->firmware.fw_size +=
1228 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1229 
1230 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231 			 adev->dm.dmcub_fw_version);
1232 	}
1233 
1234 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235 
1236 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237 	dmub_srv = adev->dm.dmub_srv;
1238 
1239 	if (!dmub_srv) {
1240 		DRM_ERROR("Failed to allocate DMUB service!\n");
1241 		return -ENOMEM;
1242 	}
1243 
1244 	memset(&create_params, 0, sizeof(create_params));
1245 	create_params.user_ctx = adev;
1246 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248 	create_params.asic = dmub_asic;
1249 
1250 	/* Create the DMUB service. */
1251 	status = dmub_srv_create(dmub_srv, &create_params);
1252 	if (status != DMUB_STATUS_OK) {
1253 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1254 		return -EINVAL;
1255 	}
1256 
1257 	/* Calculate the size of all the regions for the DMUB service. */
1258 	memset(&region_params, 0, sizeof(region_params));
1259 
1260 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263 	region_params.vbios_size = adev->bios_size;
1264 	region_params.fw_bss_data =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		le32_to_cpu(hdr->inst_const_bytes);
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	if (adev->dm.dmub_fw) {
1338 		release_firmware(adev->dm.dmub_fw);
1339 		adev->dm.dmub_fw = NULL;
1340 	}
1341 
1342 	if(adev->dm.fw_dmcu) {
1343 		release_firmware(adev->dm.fw_dmcu);
1344 		adev->dm.fw_dmcu = NULL;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1351 {
1352 	struct amdgpu_dm_connector *aconnector;
1353 	struct drm_connector *connector;
1354 	struct drm_connector_list_iter iter;
1355 	int ret = 0;
1356 
1357 	drm_connector_list_iter_begin(dev, &iter);
1358 	drm_for_each_connector_iter(connector, &iter) {
1359 		aconnector = to_amdgpu_dm_connector(connector);
1360 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361 		    aconnector->mst_mgr.aux) {
1362 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1363 					 aconnector,
1364 					 aconnector->base.base.id);
1365 
1366 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1367 			if (ret < 0) {
1368 				DRM_ERROR("DM_MST: Failed to start MST\n");
1369 				aconnector->dc_link->type =
1370 					dc_connection_single;
1371 				break;
1372 			}
1373 		}
1374 	}
1375 	drm_connector_list_iter_end(&iter);
1376 
1377 	return ret;
1378 }
1379 
1380 static int dm_late_init(void *handle)
1381 {
1382 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383 
1384 	struct dmcu_iram_parameters params;
1385 	unsigned int linear_lut[16];
1386 	int i;
1387 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1388 	bool ret = false;
1389 
1390 	for (i = 0; i < 16; i++)
1391 		linear_lut[i] = 0xFFFF * i / 15;
1392 
1393 	params.set = 0;
1394 	params.backlight_ramping_start = 0xCCCC;
1395 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1396 	params.backlight_lut_array_size = 16;
1397 	params.backlight_lut_array = linear_lut;
1398 
1399 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1400 	 * 0xFFFF x 0.01 = 0x28F
1401 	 */
1402 	params.min_abm_backlight = 0x28F;
1403 
1404 	/* todo will enable for navi10 */
1405 	if (adev->asic_type <= CHIP_RAVEN) {
1406 		ret = dmcu_load_iram(dmcu, params);
1407 
1408 		if (!ret)
1409 			return -EINVAL;
1410 	}
1411 
1412 	return detect_mst_link_for_all_connectors(adev->ddev);
1413 }
1414 
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1416 {
1417 	struct amdgpu_dm_connector *aconnector;
1418 	struct drm_connector *connector;
1419 	struct drm_connector_list_iter iter;
1420 	struct drm_dp_mst_topology_mgr *mgr;
1421 	int ret;
1422 	bool need_hotplug = false;
1423 
1424 	drm_connector_list_iter_begin(dev, &iter);
1425 	drm_for_each_connector_iter(connector, &iter) {
1426 		aconnector = to_amdgpu_dm_connector(connector);
1427 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428 		    aconnector->mst_port)
1429 			continue;
1430 
1431 		mgr = &aconnector->mst_mgr;
1432 
1433 		if (suspend) {
1434 			drm_dp_mst_topology_mgr_suspend(mgr);
1435 		} else {
1436 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1437 			if (ret < 0) {
1438 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439 				need_hotplug = true;
1440 			}
1441 		}
1442 	}
1443 	drm_connector_list_iter_end(&iter);
1444 
1445 	if (need_hotplug)
1446 		drm_kms_helper_hotplug_event(dev);
1447 }
1448 
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1450 {
1451 	struct smu_context *smu = &adev->smu;
1452 	int ret = 0;
1453 
1454 	if (!is_support_sw_smu(adev))
1455 		return 0;
1456 
1457 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458 	 * on window driver dc implementation.
1459 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460 	 * should be passed to smu during boot up and resume from s3.
1461 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1462 	 * dcn20_resource_construct
1463 	 * then call pplib functions below to pass the settings to smu:
1464 	 * smu_set_watermarks_for_clock_ranges
1465 	 * smu_set_watermarks_table
1466 	 * navi10_set_watermarks_table
1467 	 * smu_write_watermarks_table
1468 	 *
1469 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1470 	 * dc has implemented different flow for window driver:
1471 	 * dc_hardware_init / dc_set_power_state
1472 	 * dcn10_init_hw
1473 	 * notify_wm_ranges
1474 	 * set_wm_ranges
1475 	 * -- Linux
1476 	 * smu_set_watermarks_for_clock_ranges
1477 	 * renoir_set_watermarks_table
1478 	 * smu_write_watermarks_table
1479 	 *
1480 	 * For Linux,
1481 	 * dc_hardware_init -> amdgpu_dm_init
1482 	 * dc_set_power_state --> dm_resume
1483 	 *
1484 	 * therefore, this function apply to navi10/12/14 but not Renoir
1485 	 * *
1486 	 */
1487 	switch(adev->asic_type) {
1488 	case CHIP_NAVI10:
1489 	case CHIP_NAVI14:
1490 	case CHIP_NAVI12:
1491 		break;
1492 	default:
1493 		return 0;
1494 	}
1495 
1496 	mutex_lock(&smu->mutex);
1497 
1498 	/* pass data to smu controller */
1499 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501 		ret = smu_write_watermarks_table(smu);
1502 
1503 		if (ret) {
1504 			mutex_unlock(&smu->mutex);
1505 			DRM_ERROR("Failed to update WMTABLE!\n");
1506 			return ret;
1507 		}
1508 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1509 	}
1510 
1511 	mutex_unlock(&smu->mutex);
1512 
1513 	return 0;
1514 }
1515 
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 	/* Create DAL display manager */
1540 	amdgpu_dm_init(adev);
1541 	amdgpu_dm_hpd_init(adev);
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557 
1558 	amdgpu_dm_hpd_fini(adev);
1559 
1560 	amdgpu_dm_irq_fini(adev);
1561 	amdgpu_dm_fini(adev);
1562 	return 0;
1563 }
1564 
1565 static int dm_suspend(void *handle)
1566 {
1567 	struct amdgpu_device *adev = handle;
1568 	struct amdgpu_display_manager *dm = &adev->dm;
1569 	int ret = 0;
1570 
1571 	WARN_ON(adev->dm.cached_state);
1572 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1573 
1574 	s3_handle_mst(adev->ddev, true);
1575 
1576 	amdgpu_dm_irq_suspend(adev);
1577 
1578 
1579 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1580 
1581 	return ret;
1582 }
1583 
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586 					     struct drm_crtc *crtc)
1587 {
1588 	uint32_t i;
1589 	struct drm_connector_state *new_con_state;
1590 	struct drm_connector *connector;
1591 	struct drm_crtc *crtc_from_state;
1592 
1593 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594 		crtc_from_state = new_con_state->crtc;
1595 
1596 		if (crtc_from_state == crtc)
1597 			return to_amdgpu_dm_connector(connector);
1598 	}
1599 
1600 	return NULL;
1601 }
1602 
1603 static void emulated_link_detect(struct dc_link *link)
1604 {
1605 	struct dc_sink_init_data sink_init_data = { 0 };
1606 	struct display_sink_capability sink_caps = { 0 };
1607 	enum dc_edid_status edid_status;
1608 	struct dc_context *dc_ctx = link->ctx;
1609 	struct dc_sink *sink = NULL;
1610 	struct dc_sink *prev_sink = NULL;
1611 
1612 	link->type = dc_connection_none;
1613 	prev_sink = link->local_sink;
1614 
1615 	if (prev_sink != NULL)
1616 		dc_sink_retain(prev_sink);
1617 
1618 	switch (link->connector_signal) {
1619 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1620 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1622 		break;
1623 	}
1624 
1625 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1628 		break;
1629 	}
1630 
1631 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1634 		break;
1635 	}
1636 
1637 	case SIGNAL_TYPE_LVDS: {
1638 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1640 		break;
1641 	}
1642 
1643 	case SIGNAL_TYPE_EDP: {
1644 		sink_caps.transaction_type =
1645 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646 		sink_caps.signal = SIGNAL_TYPE_EDP;
1647 		break;
1648 	}
1649 
1650 	case SIGNAL_TYPE_DISPLAY_PORT: {
1651 		sink_caps.transaction_type =
1652 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1654 		break;
1655 	}
1656 
1657 	default:
1658 		DC_ERROR("Invalid connector type! signal:%d\n",
1659 			link->connector_signal);
1660 		return;
1661 	}
1662 
1663 	sink_init_data.link = link;
1664 	sink_init_data.sink_signal = sink_caps.signal;
1665 
1666 	sink = dc_sink_create(&sink_init_data);
1667 	if (!sink) {
1668 		DC_ERROR("Failed to create sink!\n");
1669 		return;
1670 	}
1671 
1672 	/* dc_sink_create returns a new reference */
1673 	link->local_sink = sink;
1674 
1675 	edid_status = dm_helpers_read_local_edid(
1676 			link->ctx,
1677 			link,
1678 			sink);
1679 
1680 	if (edid_status != EDID_OK)
1681 		DC_ERROR("Failed to read EDID");
1682 
1683 }
1684 
1685 static int dm_resume(void *handle)
1686 {
1687 	struct amdgpu_device *adev = handle;
1688 	struct drm_device *ddev = adev->ddev;
1689 	struct amdgpu_display_manager *dm = &adev->dm;
1690 	struct amdgpu_dm_connector *aconnector;
1691 	struct drm_connector *connector;
1692 	struct drm_connector_list_iter iter;
1693 	struct drm_crtc *crtc;
1694 	struct drm_crtc_state *new_crtc_state;
1695 	struct dm_crtc_state *dm_new_crtc_state;
1696 	struct drm_plane *plane;
1697 	struct drm_plane_state *new_plane_state;
1698 	struct dm_plane_state *dm_new_plane_state;
1699 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700 	enum dc_connection_type new_connection_type = dc_connection_none;
1701 	int i, r;
1702 
1703 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704 	dc_release_state(dm_state->context);
1705 	dm_state->context = dc_create_state(dm->dc);
1706 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707 	dc_resource_state_construct(dm->dc, dm_state->context);
1708 
1709 	/* Before powering on DC we need to re-initialize DMUB. */
1710 	r = dm_dmub_hw_init(adev);
1711 	if (r)
1712 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1713 
1714 	/* power on hardware */
1715 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1716 
1717 	/* program HPD filter */
1718 	dc_resume(dm->dc);
1719 
1720 	/*
1721 	 * early enable HPD Rx IRQ, should be done before set mode as short
1722 	 * pulse interrupts are used for MST
1723 	 */
1724 	amdgpu_dm_irq_resume_early(adev);
1725 
1726 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1727 	s3_handle_mst(ddev, false);
1728 
1729 	/* Do detection*/
1730 	drm_connector_list_iter_begin(ddev, &iter);
1731 	drm_for_each_connector_iter(connector, &iter) {
1732 		aconnector = to_amdgpu_dm_connector(connector);
1733 
1734 		/*
1735 		 * this is the case when traversing through already created
1736 		 * MST connectors, should be skipped
1737 		 */
1738 		if (aconnector->mst_port)
1739 			continue;
1740 
1741 		mutex_lock(&aconnector->hpd_lock);
1742 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743 			DRM_ERROR("KMS: Failed to detect connector\n");
1744 
1745 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1746 			emulated_link_detect(aconnector->dc_link);
1747 		else
1748 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1749 
1750 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751 			aconnector->fake_enable = false;
1752 
1753 		if (aconnector->dc_sink)
1754 			dc_sink_release(aconnector->dc_sink);
1755 		aconnector->dc_sink = NULL;
1756 		amdgpu_dm_update_connector_after_detect(aconnector);
1757 		mutex_unlock(&aconnector->hpd_lock);
1758 	}
1759 	drm_connector_list_iter_end(&iter);
1760 
1761 	/* Force mode set in atomic commit */
1762 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763 		new_crtc_state->active_changed = true;
1764 
1765 	/*
1766 	 * atomic_check is expected to create the dc states. We need to release
1767 	 * them here, since they were duplicated as part of the suspend
1768 	 * procedure.
1769 	 */
1770 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772 		if (dm_new_crtc_state->stream) {
1773 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774 			dc_stream_release(dm_new_crtc_state->stream);
1775 			dm_new_crtc_state->stream = NULL;
1776 		}
1777 	}
1778 
1779 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781 		if (dm_new_plane_state->dc_state) {
1782 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783 			dc_plane_state_release(dm_new_plane_state->dc_state);
1784 			dm_new_plane_state->dc_state = NULL;
1785 		}
1786 	}
1787 
1788 	drm_atomic_helper_resume(ddev, dm->cached_state);
1789 
1790 	dm->cached_state = NULL;
1791 
1792 	amdgpu_dm_irq_resume_late(adev);
1793 
1794 	amdgpu_dm_smu_write_watermarks_table(adev);
1795 
1796 	return 0;
1797 }
1798 
1799 /**
1800  * DOC: DM Lifecycle
1801  *
1802  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804  * the base driver's device list to be initialized and torn down accordingly.
1805  *
1806  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1807  */
1808 
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1810 	.name = "dm",
1811 	.early_init = dm_early_init,
1812 	.late_init = dm_late_init,
1813 	.sw_init = dm_sw_init,
1814 	.sw_fini = dm_sw_fini,
1815 	.hw_init = dm_hw_init,
1816 	.hw_fini = dm_hw_fini,
1817 	.suspend = dm_suspend,
1818 	.resume = dm_resume,
1819 	.is_idle = dm_is_idle,
1820 	.wait_for_idle = dm_wait_for_idle,
1821 	.check_soft_reset = dm_check_soft_reset,
1822 	.soft_reset = dm_soft_reset,
1823 	.set_clockgating_state = dm_set_clockgating_state,
1824 	.set_powergating_state = dm_set_powergating_state,
1825 };
1826 
1827 const struct amdgpu_ip_block_version dm_ip_block =
1828 {
1829 	.type = AMD_IP_BLOCK_TYPE_DCE,
1830 	.major = 1,
1831 	.minor = 0,
1832 	.rev = 0,
1833 	.funcs = &amdgpu_dm_funcs,
1834 };
1835 
1836 
1837 /**
1838  * DOC: atomic
1839  *
1840  * *WIP*
1841  */
1842 
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844 	.fb_create = amdgpu_display_user_framebuffer_create,
1845 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1846 	.atomic_check = amdgpu_dm_atomic_check,
1847 	.atomic_commit = amdgpu_dm_atomic_commit,
1848 };
1849 
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1852 };
1853 
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1855 {
1856 	u32 max_cll, min_cll, max, min, q, r;
1857 	struct amdgpu_dm_backlight_caps *caps;
1858 	struct amdgpu_display_manager *dm;
1859 	struct drm_connector *conn_base;
1860 	struct amdgpu_device *adev;
1861 	static const u8 pre_computed_values[] = {
1862 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1864 
1865 	if (!aconnector || !aconnector->dc_link)
1866 		return;
1867 
1868 	conn_base = &aconnector->base;
1869 	adev = conn_base->dev->dev_private;
1870 	dm = &adev->dm;
1871 	caps = &dm->backlight_caps;
1872 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873 	caps->aux_support = false;
1874 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1876 
1877 	if (caps->ext_caps->bits.oled == 1 ||
1878 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880 		caps->aux_support = true;
1881 
1882 	/* From the specification (CTA-861-G), for calculating the maximum
1883 	 * luminance we need to use:
1884 	 *	Luminance = 50*2**(CV/32)
1885 	 * Where CV is a one-byte value.
1886 	 * For calculating this expression we may need float point precision;
1887 	 * to avoid this complexity level, we take advantage that CV is divided
1888 	 * by a constant. From the Euclids division algorithm, we know that CV
1889 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1890 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891 	 * need to pre-compute the value of r/32. For pre-computing the values
1892 	 * We just used the following Ruby line:
1893 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894 	 * The results of the above expressions can be verified at
1895 	 * pre_computed_values.
1896 	 */
1897 	q = max_cll >> 5;
1898 	r = max_cll % 32;
1899 	max = (1 << q) * pre_computed_values[r];
1900 
1901 	// min luminance: maxLum * (CV/255)^2 / 100
1902 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1903 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1904 
1905 	caps->aux_max_input_signal = max;
1906 	caps->aux_min_input_signal = min;
1907 }
1908 
1909 void amdgpu_dm_update_connector_after_detect(
1910 		struct amdgpu_dm_connector *aconnector)
1911 {
1912 	struct drm_connector *connector = &aconnector->base;
1913 	struct drm_device *dev = connector->dev;
1914 	struct dc_sink *sink;
1915 
1916 	/* MST handled by drm_mst framework */
1917 	if (aconnector->mst_mgr.mst_state == true)
1918 		return;
1919 
1920 
1921 	sink = aconnector->dc_link->local_sink;
1922 	if (sink)
1923 		dc_sink_retain(sink);
1924 
1925 	/*
1926 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1927 	 * the connector sink is set to either fake or physical sink depends on link status.
1928 	 * Skip if already done during boot.
1929 	 */
1930 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931 			&& aconnector->dc_em_sink) {
1932 
1933 		/*
1934 		 * For S3 resume with headless use eml_sink to fake stream
1935 		 * because on resume connector->sink is set to NULL
1936 		 */
1937 		mutex_lock(&dev->mode_config.mutex);
1938 
1939 		if (sink) {
1940 			if (aconnector->dc_sink) {
1941 				amdgpu_dm_update_freesync_caps(connector, NULL);
1942 				/*
1943 				 * retain and release below are used to
1944 				 * bump up refcount for sink because the link doesn't point
1945 				 * to it anymore after disconnect, so on next crtc to connector
1946 				 * reshuffle by UMD we will get into unwanted dc_sink release
1947 				 */
1948 				dc_sink_release(aconnector->dc_sink);
1949 			}
1950 			aconnector->dc_sink = sink;
1951 			dc_sink_retain(aconnector->dc_sink);
1952 			amdgpu_dm_update_freesync_caps(connector,
1953 					aconnector->edid);
1954 		} else {
1955 			amdgpu_dm_update_freesync_caps(connector, NULL);
1956 			if (!aconnector->dc_sink) {
1957 				aconnector->dc_sink = aconnector->dc_em_sink;
1958 				dc_sink_retain(aconnector->dc_sink);
1959 			}
1960 		}
1961 
1962 		mutex_unlock(&dev->mode_config.mutex);
1963 
1964 		if (sink)
1965 			dc_sink_release(sink);
1966 		return;
1967 	}
1968 
1969 	/*
1970 	 * TODO: temporary guard to look for proper fix
1971 	 * if this sink is MST sink, we should not do anything
1972 	 */
1973 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974 		dc_sink_release(sink);
1975 		return;
1976 	}
1977 
1978 	if (aconnector->dc_sink == sink) {
1979 		/*
1980 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1981 		 * Do nothing!!
1982 		 */
1983 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984 				aconnector->connector_id);
1985 		if (sink)
1986 			dc_sink_release(sink);
1987 		return;
1988 	}
1989 
1990 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991 		aconnector->connector_id, aconnector->dc_sink, sink);
1992 
1993 	mutex_lock(&dev->mode_config.mutex);
1994 
1995 	/*
1996 	 * 1. Update status of the drm connector
1997 	 * 2. Send an event and let userspace tell us what to do
1998 	 */
1999 	if (sink) {
2000 		/*
2001 		 * TODO: check if we still need the S3 mode update workaround.
2002 		 * If yes, put it here.
2003 		 */
2004 		if (aconnector->dc_sink)
2005 			amdgpu_dm_update_freesync_caps(connector, NULL);
2006 
2007 		aconnector->dc_sink = sink;
2008 		dc_sink_retain(aconnector->dc_sink);
2009 		if (sink->dc_edid.length == 0) {
2010 			aconnector->edid = NULL;
2011 			if (aconnector->dc_link->aux_mode) {
2012 				drm_dp_cec_unset_edid(
2013 					&aconnector->dm_dp_aux.aux);
2014 			}
2015 		} else {
2016 			aconnector->edid =
2017 				(struct edid *)sink->dc_edid.raw_edid;
2018 
2019 			drm_connector_update_edid_property(connector,
2020 							   aconnector->edid);
2021 
2022 			if (aconnector->dc_link->aux_mode)
2023 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2024 						    aconnector->edid);
2025 		}
2026 
2027 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2028 		update_connector_ext_caps(aconnector);
2029 	} else {
2030 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2031 		amdgpu_dm_update_freesync_caps(connector, NULL);
2032 		drm_connector_update_edid_property(connector, NULL);
2033 		aconnector->num_modes = 0;
2034 		dc_sink_release(aconnector->dc_sink);
2035 		aconnector->dc_sink = NULL;
2036 		aconnector->edid = NULL;
2037 #ifdef CONFIG_DRM_AMD_DC_HDCP
2038 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2040 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2041 #endif
2042 	}
2043 
2044 	mutex_unlock(&dev->mode_config.mutex);
2045 
2046 	if (sink)
2047 		dc_sink_release(sink);
2048 }
2049 
2050 static void handle_hpd_irq(void *param)
2051 {
2052 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2053 	struct drm_connector *connector = &aconnector->base;
2054 	struct drm_device *dev = connector->dev;
2055 	enum dc_connection_type new_connection_type = dc_connection_none;
2056 #ifdef CONFIG_DRM_AMD_DC_HDCP
2057 	struct amdgpu_device *adev = dev->dev_private;
2058 #endif
2059 
2060 	/*
2061 	 * In case of failure or MST no need to update connector status or notify the OS
2062 	 * since (for MST case) MST does this in its own context.
2063 	 */
2064 	mutex_lock(&aconnector->hpd_lock);
2065 
2066 #ifdef CONFIG_DRM_AMD_DC_HDCP
2067 	if (adev->dm.hdcp_workqueue)
2068 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2069 #endif
2070 	if (aconnector->fake_enable)
2071 		aconnector->fake_enable = false;
2072 
2073 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2074 		DRM_ERROR("KMS: Failed to detect connector\n");
2075 
2076 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2077 		emulated_link_detect(aconnector->dc_link);
2078 
2079 
2080 		drm_modeset_lock_all(dev);
2081 		dm_restore_drm_connector_state(dev, connector);
2082 		drm_modeset_unlock_all(dev);
2083 
2084 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2085 			drm_kms_helper_hotplug_event(dev);
2086 
2087 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2088 		amdgpu_dm_update_connector_after_detect(aconnector);
2089 
2090 
2091 		drm_modeset_lock_all(dev);
2092 		dm_restore_drm_connector_state(dev, connector);
2093 		drm_modeset_unlock_all(dev);
2094 
2095 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2096 			drm_kms_helper_hotplug_event(dev);
2097 	}
2098 	mutex_unlock(&aconnector->hpd_lock);
2099 
2100 }
2101 
2102 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2103 {
2104 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2105 	uint8_t dret;
2106 	bool new_irq_handled = false;
2107 	int dpcd_addr;
2108 	int dpcd_bytes_to_read;
2109 
2110 	const int max_process_count = 30;
2111 	int process_count = 0;
2112 
2113 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2114 
2115 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2116 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2117 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2118 		dpcd_addr = DP_SINK_COUNT;
2119 	} else {
2120 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2121 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122 		dpcd_addr = DP_SINK_COUNT_ESI;
2123 	}
2124 
2125 	dret = drm_dp_dpcd_read(
2126 		&aconnector->dm_dp_aux.aux,
2127 		dpcd_addr,
2128 		esi,
2129 		dpcd_bytes_to_read);
2130 
2131 	while (dret == dpcd_bytes_to_read &&
2132 		process_count < max_process_count) {
2133 		uint8_t retry;
2134 		dret = 0;
2135 
2136 		process_count++;
2137 
2138 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2139 		/* handle HPD short pulse irq */
2140 		if (aconnector->mst_mgr.mst_state)
2141 			drm_dp_mst_hpd_irq(
2142 				&aconnector->mst_mgr,
2143 				esi,
2144 				&new_irq_handled);
2145 
2146 		if (new_irq_handled) {
2147 			/* ACK at DPCD to notify down stream */
2148 			const int ack_dpcd_bytes_to_write =
2149 				dpcd_bytes_to_read - 1;
2150 
2151 			for (retry = 0; retry < 3; retry++) {
2152 				uint8_t wret;
2153 
2154 				wret = drm_dp_dpcd_write(
2155 					&aconnector->dm_dp_aux.aux,
2156 					dpcd_addr + 1,
2157 					&esi[1],
2158 					ack_dpcd_bytes_to_write);
2159 				if (wret == ack_dpcd_bytes_to_write)
2160 					break;
2161 			}
2162 
2163 			/* check if there is new irq to be handled */
2164 			dret = drm_dp_dpcd_read(
2165 				&aconnector->dm_dp_aux.aux,
2166 				dpcd_addr,
2167 				esi,
2168 				dpcd_bytes_to_read);
2169 
2170 			new_irq_handled = false;
2171 		} else {
2172 			break;
2173 		}
2174 	}
2175 
2176 	if (process_count == max_process_count)
2177 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2178 }
2179 
2180 static void handle_hpd_rx_irq(void *param)
2181 {
2182 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2183 	struct drm_connector *connector = &aconnector->base;
2184 	struct drm_device *dev = connector->dev;
2185 	struct dc_link *dc_link = aconnector->dc_link;
2186 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2187 	enum dc_connection_type new_connection_type = dc_connection_none;
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189 	union hpd_irq_data hpd_irq_data;
2190 	struct amdgpu_device *adev = dev->dev_private;
2191 
2192 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2193 #endif
2194 
2195 	/*
2196 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2197 	 * conflict, after implement i2c helper, this mutex should be
2198 	 * retired.
2199 	 */
2200 	if (dc_link->type != dc_connection_mst_branch)
2201 		mutex_lock(&aconnector->hpd_lock);
2202 
2203 
2204 #ifdef CONFIG_DRM_AMD_DC_HDCP
2205 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2206 #else
2207 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2208 #endif
2209 			!is_mst_root_connector) {
2210 		/* Downstream Port status changed. */
2211 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2212 			DRM_ERROR("KMS: Failed to detect connector\n");
2213 
2214 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2215 			emulated_link_detect(dc_link);
2216 
2217 			if (aconnector->fake_enable)
2218 				aconnector->fake_enable = false;
2219 
2220 			amdgpu_dm_update_connector_after_detect(aconnector);
2221 
2222 
2223 			drm_modeset_lock_all(dev);
2224 			dm_restore_drm_connector_state(dev, connector);
2225 			drm_modeset_unlock_all(dev);
2226 
2227 			drm_kms_helper_hotplug_event(dev);
2228 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2229 
2230 			if (aconnector->fake_enable)
2231 				aconnector->fake_enable = false;
2232 
2233 			amdgpu_dm_update_connector_after_detect(aconnector);
2234 
2235 
2236 			drm_modeset_lock_all(dev);
2237 			dm_restore_drm_connector_state(dev, connector);
2238 			drm_modeset_unlock_all(dev);
2239 
2240 			drm_kms_helper_hotplug_event(dev);
2241 		}
2242 	}
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2245 		if (adev->dm.hdcp_workqueue)
2246 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2247 	}
2248 #endif
2249 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2250 	    (dc_link->type == dc_connection_mst_branch))
2251 		dm_handle_hpd_rx_irq(aconnector);
2252 
2253 	if (dc_link->type != dc_connection_mst_branch) {
2254 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2255 		mutex_unlock(&aconnector->hpd_lock);
2256 	}
2257 }
2258 
2259 static void register_hpd_handlers(struct amdgpu_device *adev)
2260 {
2261 	struct drm_device *dev = adev->ddev;
2262 	struct drm_connector *connector;
2263 	struct amdgpu_dm_connector *aconnector;
2264 	const struct dc_link *dc_link;
2265 	struct dc_interrupt_params int_params = {0};
2266 
2267 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2268 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2269 
2270 	list_for_each_entry(connector,
2271 			&dev->mode_config.connector_list, head)	{
2272 
2273 		aconnector = to_amdgpu_dm_connector(connector);
2274 		dc_link = aconnector->dc_link;
2275 
2276 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2277 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2278 			int_params.irq_source = dc_link->irq_source_hpd;
2279 
2280 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2281 					handle_hpd_irq,
2282 					(void *) aconnector);
2283 		}
2284 
2285 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2286 
2287 			/* Also register for DP short pulse (hpd_rx). */
2288 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2289 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2290 
2291 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 					handle_hpd_rx_irq,
2293 					(void *) aconnector);
2294 		}
2295 	}
2296 }
2297 
2298 /* Register IRQ sources and initialize IRQ callbacks */
2299 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2300 {
2301 	struct dc *dc = adev->dm.dc;
2302 	struct common_irq_params *c_irq_params;
2303 	struct dc_interrupt_params int_params = {0};
2304 	int r;
2305 	int i;
2306 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2307 
2308 	if (adev->asic_type >= CHIP_VEGA10)
2309 		client_id = SOC15_IH_CLIENTID_DCE;
2310 
2311 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2312 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2313 
2314 	/*
2315 	 * Actions of amdgpu_irq_add_id():
2316 	 * 1. Register a set() function with base driver.
2317 	 *    Base driver will call set() function to enable/disable an
2318 	 *    interrupt in DC hardware.
2319 	 * 2. Register amdgpu_dm_irq_handler().
2320 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321 	 *    coming from DC hardware.
2322 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323 	 *    for acknowledging and handling. */
2324 
2325 	/* Use VBLANK interrupt */
2326 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2327 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2328 		if (r) {
2329 			DRM_ERROR("Failed to add crtc irq id!\n");
2330 			return r;
2331 		}
2332 
2333 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2334 		int_params.irq_source =
2335 			dc_interrupt_to_irq_source(dc, i, 0);
2336 
2337 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2338 
2339 		c_irq_params->adev = adev;
2340 		c_irq_params->irq_src = int_params.irq_source;
2341 
2342 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2343 				dm_crtc_high_irq, c_irq_params);
2344 	}
2345 
2346 	/* Use VUPDATE interrupt */
2347 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2348 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2349 		if (r) {
2350 			DRM_ERROR("Failed to add vupdate irq id!\n");
2351 			return r;
2352 		}
2353 
2354 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2355 		int_params.irq_source =
2356 			dc_interrupt_to_irq_source(dc, i, 0);
2357 
2358 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2359 
2360 		c_irq_params->adev = adev;
2361 		c_irq_params->irq_src = int_params.irq_source;
2362 
2363 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2364 				dm_vupdate_high_irq, c_irq_params);
2365 	}
2366 
2367 	/* Use GRPH_PFLIP interrupt */
2368 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2369 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2370 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2371 		if (r) {
2372 			DRM_ERROR("Failed to add page flip irq id!\n");
2373 			return r;
2374 		}
2375 
2376 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2377 		int_params.irq_source =
2378 			dc_interrupt_to_irq_source(dc, i, 0);
2379 
2380 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2381 
2382 		c_irq_params->adev = adev;
2383 		c_irq_params->irq_src = int_params.irq_source;
2384 
2385 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2386 				dm_pflip_high_irq, c_irq_params);
2387 
2388 	}
2389 
2390 	/* HPD */
2391 	r = amdgpu_irq_add_id(adev, client_id,
2392 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2393 	if (r) {
2394 		DRM_ERROR("Failed to add hpd irq id!\n");
2395 		return r;
2396 	}
2397 
2398 	register_hpd_handlers(adev);
2399 
2400 	return 0;
2401 }
2402 
2403 #if defined(CONFIG_DRM_AMD_DC_DCN)
2404 /* Register IRQ sources and initialize IRQ callbacks */
2405 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2406 {
2407 	struct dc *dc = adev->dm.dc;
2408 	struct common_irq_params *c_irq_params;
2409 	struct dc_interrupt_params int_params = {0};
2410 	int r;
2411 	int i;
2412 
2413 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2414 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2415 
2416 	/*
2417 	 * Actions of amdgpu_irq_add_id():
2418 	 * 1. Register a set() function with base driver.
2419 	 *    Base driver will call set() function to enable/disable an
2420 	 *    interrupt in DC hardware.
2421 	 * 2. Register amdgpu_dm_irq_handler().
2422 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423 	 *    coming from DC hardware.
2424 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425 	 *    for acknowledging and handling.
2426 	 */
2427 
2428 	/* Use VSTARTUP interrupt */
2429 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2430 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2431 			i++) {
2432 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2433 
2434 		if (r) {
2435 			DRM_ERROR("Failed to add crtc irq id!\n");
2436 			return r;
2437 		}
2438 
2439 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 		int_params.irq_source =
2441 			dc_interrupt_to_irq_source(dc, i, 0);
2442 
2443 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2444 
2445 		c_irq_params->adev = adev;
2446 		c_irq_params->irq_src = int_params.irq_source;
2447 
2448 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449 				dm_dcn_crtc_high_irq, c_irq_params);
2450 	}
2451 
2452 	/* Use GRPH_PFLIP interrupt */
2453 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2454 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2455 			i++) {
2456 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2457 		if (r) {
2458 			DRM_ERROR("Failed to add page flip irq id!\n");
2459 			return r;
2460 		}
2461 
2462 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2463 		int_params.irq_source =
2464 			dc_interrupt_to_irq_source(dc, i, 0);
2465 
2466 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2467 
2468 		c_irq_params->adev = adev;
2469 		c_irq_params->irq_src = int_params.irq_source;
2470 
2471 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2472 				dm_pflip_high_irq, c_irq_params);
2473 
2474 	}
2475 
2476 	/* HPD */
2477 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2478 			&adev->hpd_irq);
2479 	if (r) {
2480 		DRM_ERROR("Failed to add hpd irq id!\n");
2481 		return r;
2482 	}
2483 
2484 	register_hpd_handlers(adev);
2485 
2486 	return 0;
2487 }
2488 #endif
2489 
2490 /*
2491  * Acquires the lock for the atomic state object and returns
2492  * the new atomic state.
2493  *
2494  * This should only be called during atomic check.
2495  */
2496 static int dm_atomic_get_state(struct drm_atomic_state *state,
2497 			       struct dm_atomic_state **dm_state)
2498 {
2499 	struct drm_device *dev = state->dev;
2500 	struct amdgpu_device *adev = dev->dev_private;
2501 	struct amdgpu_display_manager *dm = &adev->dm;
2502 	struct drm_private_state *priv_state;
2503 
2504 	if (*dm_state)
2505 		return 0;
2506 
2507 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2508 	if (IS_ERR(priv_state))
2509 		return PTR_ERR(priv_state);
2510 
2511 	*dm_state = to_dm_atomic_state(priv_state);
2512 
2513 	return 0;
2514 }
2515 
2516 struct dm_atomic_state *
2517 dm_atomic_get_new_state(struct drm_atomic_state *state)
2518 {
2519 	struct drm_device *dev = state->dev;
2520 	struct amdgpu_device *adev = dev->dev_private;
2521 	struct amdgpu_display_manager *dm = &adev->dm;
2522 	struct drm_private_obj *obj;
2523 	struct drm_private_state *new_obj_state;
2524 	int i;
2525 
2526 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2527 		if (obj->funcs == dm->atomic_obj.funcs)
2528 			return to_dm_atomic_state(new_obj_state);
2529 	}
2530 
2531 	return NULL;
2532 }
2533 
2534 struct dm_atomic_state *
2535 dm_atomic_get_old_state(struct drm_atomic_state *state)
2536 {
2537 	struct drm_device *dev = state->dev;
2538 	struct amdgpu_device *adev = dev->dev_private;
2539 	struct amdgpu_display_manager *dm = &adev->dm;
2540 	struct drm_private_obj *obj;
2541 	struct drm_private_state *old_obj_state;
2542 	int i;
2543 
2544 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2545 		if (obj->funcs == dm->atomic_obj.funcs)
2546 			return to_dm_atomic_state(old_obj_state);
2547 	}
2548 
2549 	return NULL;
2550 }
2551 
2552 static struct drm_private_state *
2553 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2554 {
2555 	struct dm_atomic_state *old_state, *new_state;
2556 
2557 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2558 	if (!new_state)
2559 		return NULL;
2560 
2561 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2562 
2563 	old_state = to_dm_atomic_state(obj->state);
2564 
2565 	if (old_state && old_state->context)
2566 		new_state->context = dc_copy_state(old_state->context);
2567 
2568 	if (!new_state->context) {
2569 		kfree(new_state);
2570 		return NULL;
2571 	}
2572 
2573 	return &new_state->base;
2574 }
2575 
2576 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2577 				    struct drm_private_state *state)
2578 {
2579 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2580 
2581 	if (dm_state && dm_state->context)
2582 		dc_release_state(dm_state->context);
2583 
2584 	kfree(dm_state);
2585 }
2586 
2587 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2588 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2589 	.atomic_destroy_state = dm_atomic_destroy_state,
2590 };
2591 
2592 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2593 {
2594 	struct dm_atomic_state *state;
2595 	int r;
2596 
2597 	adev->mode_info.mode_config_initialized = true;
2598 
2599 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2600 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2601 
2602 	adev->ddev->mode_config.max_width = 16384;
2603 	adev->ddev->mode_config.max_height = 16384;
2604 
2605 	adev->ddev->mode_config.preferred_depth = 24;
2606 	adev->ddev->mode_config.prefer_shadow = 1;
2607 	/* indicates support for immediate flip */
2608 	adev->ddev->mode_config.async_page_flip = true;
2609 
2610 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2611 
2612 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2613 	if (!state)
2614 		return -ENOMEM;
2615 
2616 	state->context = dc_create_state(adev->dm.dc);
2617 	if (!state->context) {
2618 		kfree(state);
2619 		return -ENOMEM;
2620 	}
2621 
2622 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2623 
2624 	drm_atomic_private_obj_init(adev->ddev,
2625 				    &adev->dm.atomic_obj,
2626 				    &state->base,
2627 				    &dm_atomic_state_funcs);
2628 
2629 	r = amdgpu_display_modeset_create_props(adev);
2630 	if (r)
2631 		return r;
2632 
2633 	r = amdgpu_dm_audio_init(adev);
2634 	if (r)
2635 		return r;
2636 
2637 	return 0;
2638 }
2639 
2640 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2642 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2643 
2644 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2646 
2647 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2648 {
2649 #if defined(CONFIG_ACPI)
2650 	struct amdgpu_dm_backlight_caps caps;
2651 
2652 	if (dm->backlight_caps.caps_valid)
2653 		return;
2654 
2655 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2656 	if (caps.caps_valid) {
2657 		dm->backlight_caps.caps_valid = true;
2658 		if (caps.aux_support)
2659 			return;
2660 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2661 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2662 	} else {
2663 		dm->backlight_caps.min_input_signal =
2664 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2665 		dm->backlight_caps.max_input_signal =
2666 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2667 	}
2668 #else
2669 	if (dm->backlight_caps.aux_support)
2670 		return;
2671 
2672 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2673 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2674 #endif
2675 }
2676 
2677 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2678 {
2679 	bool rc;
2680 
2681 	if (!link)
2682 		return 1;
2683 
2684 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2685 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2686 
2687 	return rc ? 0 : 1;
2688 }
2689 
2690 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2691 			      const uint32_t user_brightness)
2692 {
2693 	u32 min, max, conversion_pace;
2694 	u32 brightness = user_brightness;
2695 
2696 	if (!caps)
2697 		goto out;
2698 
2699 	if (!caps->aux_support) {
2700 		max = caps->max_input_signal;
2701 		min = caps->min_input_signal;
2702 		/*
2703 		 * The brightness input is in the range 0-255
2704 		 * It needs to be rescaled to be between the
2705 		 * requested min and max input signal
2706 		 * It also needs to be scaled up by 0x101 to
2707 		 * match the DC interface which has a range of
2708 		 * 0 to 0xffff
2709 		 */
2710 		conversion_pace = 0x101;
2711 		brightness =
2712 			user_brightness
2713 			* conversion_pace
2714 			* (max - min)
2715 			/ AMDGPU_MAX_BL_LEVEL
2716 			+ min * conversion_pace;
2717 	} else {
2718 		/* TODO
2719 		 * We are doing a linear interpolation here, which is OK but
2720 		 * does not provide the optimal result. We probably want
2721 		 * something close to the Perceptual Quantizer (PQ) curve.
2722 		 */
2723 		max = caps->aux_max_input_signal;
2724 		min = caps->aux_min_input_signal;
2725 
2726 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2727 			       + user_brightness * max;
2728 		// Multiple the value by 1000 since we use millinits
2729 		brightness *= 1000;
2730 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2731 	}
2732 
2733 out:
2734 	return brightness;
2735 }
2736 
2737 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2738 {
2739 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2740 	struct amdgpu_dm_backlight_caps caps;
2741 	struct dc_link *link = NULL;
2742 	u32 brightness;
2743 	bool rc;
2744 
2745 	amdgpu_dm_update_backlight_caps(dm);
2746 	caps = dm->backlight_caps;
2747 
2748 	link = (struct dc_link *)dm->backlight_link;
2749 
2750 	brightness = convert_brightness(&caps, bd->props.brightness);
2751 	// Change brightness based on AUX property
2752 	if (caps.aux_support)
2753 		return set_backlight_via_aux(link, brightness);
2754 
2755 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2756 
2757 	return rc ? 0 : 1;
2758 }
2759 
2760 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2761 {
2762 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2763 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2764 
2765 	if (ret == DC_ERROR_UNEXPECTED)
2766 		return bd->props.brightness;
2767 	return ret;
2768 }
2769 
2770 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2771 	.options = BL_CORE_SUSPENDRESUME,
2772 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2773 	.update_status	= amdgpu_dm_backlight_update_status,
2774 };
2775 
2776 static void
2777 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2778 {
2779 	char bl_name[16];
2780 	struct backlight_properties props = { 0 };
2781 
2782 	amdgpu_dm_update_backlight_caps(dm);
2783 
2784 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2785 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2786 	props.type = BACKLIGHT_RAW;
2787 
2788 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2789 			dm->adev->ddev->primary->index);
2790 
2791 	dm->backlight_dev = backlight_device_register(bl_name,
2792 			dm->adev->ddev->dev,
2793 			dm,
2794 			&amdgpu_dm_backlight_ops,
2795 			&props);
2796 
2797 	if (IS_ERR(dm->backlight_dev))
2798 		DRM_ERROR("DM: Backlight registration failed!\n");
2799 	else
2800 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2801 }
2802 
2803 #endif
2804 
2805 static int initialize_plane(struct amdgpu_display_manager *dm,
2806 			    struct amdgpu_mode_info *mode_info, int plane_id,
2807 			    enum drm_plane_type plane_type,
2808 			    const struct dc_plane_cap *plane_cap)
2809 {
2810 	struct drm_plane *plane;
2811 	unsigned long possible_crtcs;
2812 	int ret = 0;
2813 
2814 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2815 	if (!plane) {
2816 		DRM_ERROR("KMS: Failed to allocate plane\n");
2817 		return -ENOMEM;
2818 	}
2819 	plane->type = plane_type;
2820 
2821 	/*
2822 	 * HACK: IGT tests expect that the primary plane for a CRTC
2823 	 * can only have one possible CRTC. Only expose support for
2824 	 * any CRTC if they're not going to be used as a primary plane
2825 	 * for a CRTC - like overlay or underlay planes.
2826 	 */
2827 	possible_crtcs = 1 << plane_id;
2828 	if (plane_id >= dm->dc->caps.max_streams)
2829 		possible_crtcs = 0xff;
2830 
2831 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2832 
2833 	if (ret) {
2834 		DRM_ERROR("KMS: Failed to initialize plane\n");
2835 		kfree(plane);
2836 		return ret;
2837 	}
2838 
2839 	if (mode_info)
2840 		mode_info->planes[plane_id] = plane;
2841 
2842 	return ret;
2843 }
2844 
2845 
2846 static void register_backlight_device(struct amdgpu_display_manager *dm,
2847 				      struct dc_link *link)
2848 {
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2851 
2852 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2853 	    link->type != dc_connection_none) {
2854 		/*
2855 		 * Event if registration failed, we should continue with
2856 		 * DM initialization because not having a backlight control
2857 		 * is better then a black screen.
2858 		 */
2859 		amdgpu_dm_register_backlight_device(dm);
2860 
2861 		if (dm->backlight_dev)
2862 			dm->backlight_link = link;
2863 	}
2864 #endif
2865 }
2866 
2867 
2868 /*
2869  * In this architecture, the association
2870  * connector -> encoder -> crtc
2871  * id not really requried. The crtc and connector will hold the
2872  * display_index as an abstraction to use with DAL component
2873  *
2874  * Returns 0 on success
2875  */
2876 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2877 {
2878 	struct amdgpu_display_manager *dm = &adev->dm;
2879 	int32_t i;
2880 	struct amdgpu_dm_connector *aconnector = NULL;
2881 	struct amdgpu_encoder *aencoder = NULL;
2882 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2883 	uint32_t link_cnt;
2884 	int32_t primary_planes;
2885 	enum dc_connection_type new_connection_type = dc_connection_none;
2886 	const struct dc_plane_cap *plane;
2887 
2888 	link_cnt = dm->dc->caps.max_links;
2889 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2890 		DRM_ERROR("DM: Failed to initialize mode config\n");
2891 		return -EINVAL;
2892 	}
2893 
2894 	/* There is one primary plane per CRTC */
2895 	primary_planes = dm->dc->caps.max_streams;
2896 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2897 
2898 	/*
2899 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2900 	 * Order is reversed to match iteration order in atomic check.
2901 	 */
2902 	for (i = (primary_planes - 1); i >= 0; i--) {
2903 		plane = &dm->dc->caps.planes[i];
2904 
2905 		if (initialize_plane(dm, mode_info, i,
2906 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2907 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908 			goto fail;
2909 		}
2910 	}
2911 
2912 	/*
2913 	 * Initialize overlay planes, index starting after primary planes.
2914 	 * These planes have a higher DRM index than the primary planes since
2915 	 * they should be considered as having a higher z-order.
2916 	 * Order is reversed to match iteration order in atomic check.
2917 	 *
2918 	 * Only support DCN for now, and only expose one so we don't encourage
2919 	 * userspace to use up all the pipes.
2920 	 */
2921 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2922 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2923 
2924 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2925 			continue;
2926 
2927 		if (!plane->blends_with_above || !plane->blends_with_below)
2928 			continue;
2929 
2930 		if (!plane->pixel_format_support.argb8888)
2931 			continue;
2932 
2933 		if (initialize_plane(dm, NULL, primary_planes + i,
2934 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2935 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2936 			goto fail;
2937 		}
2938 
2939 		/* Only create one overlay plane. */
2940 		break;
2941 	}
2942 
2943 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2944 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2945 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2946 			goto fail;
2947 		}
2948 
2949 	dm->display_indexes_num = dm->dc->caps.max_streams;
2950 
2951 	/* loops over all connectors on the board */
2952 	for (i = 0; i < link_cnt; i++) {
2953 		struct dc_link *link = NULL;
2954 
2955 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2956 			DRM_ERROR(
2957 				"KMS: Cannot support more than %d display indexes\n",
2958 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2959 			continue;
2960 		}
2961 
2962 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2963 		if (!aconnector)
2964 			goto fail;
2965 
2966 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2967 		if (!aencoder)
2968 			goto fail;
2969 
2970 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2971 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2972 			goto fail;
2973 		}
2974 
2975 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2976 			DRM_ERROR("KMS: Failed to initialize connector\n");
2977 			goto fail;
2978 		}
2979 
2980 		link = dc_get_link_at_index(dm->dc, i);
2981 
2982 		if (!dc_link_detect_sink(link, &new_connection_type))
2983 			DRM_ERROR("KMS: Failed to detect connector\n");
2984 
2985 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2986 			emulated_link_detect(link);
2987 			amdgpu_dm_update_connector_after_detect(aconnector);
2988 
2989 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2990 			amdgpu_dm_update_connector_after_detect(aconnector);
2991 			register_backlight_device(dm, link);
2992 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2993 				amdgpu_dm_set_psr_caps(link);
2994 		}
2995 
2996 
2997 	}
2998 
2999 	/* Software is initialized. Now we can register interrupt handlers. */
3000 	switch (adev->asic_type) {
3001 	case CHIP_BONAIRE:
3002 	case CHIP_HAWAII:
3003 	case CHIP_KAVERI:
3004 	case CHIP_KABINI:
3005 	case CHIP_MULLINS:
3006 	case CHIP_TONGA:
3007 	case CHIP_FIJI:
3008 	case CHIP_CARRIZO:
3009 	case CHIP_STONEY:
3010 	case CHIP_POLARIS11:
3011 	case CHIP_POLARIS10:
3012 	case CHIP_POLARIS12:
3013 	case CHIP_VEGAM:
3014 	case CHIP_VEGA10:
3015 	case CHIP_VEGA12:
3016 	case CHIP_VEGA20:
3017 		if (dce110_register_irq_handlers(dm->adev)) {
3018 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3019 			goto fail;
3020 		}
3021 		break;
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023 	case CHIP_RAVEN:
3024 	case CHIP_NAVI12:
3025 	case CHIP_NAVI10:
3026 	case CHIP_NAVI14:
3027 	case CHIP_RENOIR:
3028 		if (dcn10_register_irq_handlers(dm->adev)) {
3029 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3030 			goto fail;
3031 		}
3032 		break;
3033 #endif
3034 	default:
3035 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3036 		goto fail;
3037 	}
3038 
3039 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3040 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3041 
3042 	/* No userspace support. */
3043 	dm->dc->debug.disable_tri_buf = true;
3044 
3045 	return 0;
3046 fail:
3047 	kfree(aencoder);
3048 	kfree(aconnector);
3049 
3050 	return -EINVAL;
3051 }
3052 
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3054 {
3055 	drm_mode_config_cleanup(dm->ddev);
3056 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3057 	return;
3058 }
3059 
3060 /******************************************************************************
3061  * amdgpu_display_funcs functions
3062  *****************************************************************************/
3063 
3064 /*
3065  * dm_bandwidth_update - program display watermarks
3066  *
3067  * @adev: amdgpu_device pointer
3068  *
3069  * Calculate and program the display watermarks and line buffer allocation.
3070  */
3071 static void dm_bandwidth_update(struct amdgpu_device *adev)
3072 {
3073 	/* TODO: implement later */
3074 }
3075 
3076 static const struct amdgpu_display_funcs dm_display_funcs = {
3077 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3079 	.backlight_set_level = NULL, /* never called for DC */
3080 	.backlight_get_level = NULL, /* never called for DC */
3081 	.hpd_sense = NULL,/* called unconditionally */
3082 	.hpd_set_polarity = NULL, /* called unconditionally */
3083 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3084 	.page_flip_get_scanoutpos =
3085 		dm_crtc_get_scanoutpos,/* called unconditionally */
3086 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3088 };
3089 
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3091 
3092 static ssize_t s3_debug_store(struct device *device,
3093 			      struct device_attribute *attr,
3094 			      const char *buf,
3095 			      size_t count)
3096 {
3097 	int ret;
3098 	int s3_state;
3099 	struct drm_device *drm_dev = dev_get_drvdata(device);
3100 	struct amdgpu_device *adev = drm_dev->dev_private;
3101 
3102 	ret = kstrtoint(buf, 0, &s3_state);
3103 
3104 	if (ret == 0) {
3105 		if (s3_state) {
3106 			dm_resume(adev);
3107 			drm_kms_helper_hotplug_event(adev->ddev);
3108 		} else
3109 			dm_suspend(adev);
3110 	}
3111 
3112 	return ret == 0 ? count : 0;
3113 }
3114 
3115 DEVICE_ATTR_WO(s3_debug);
3116 
3117 #endif
3118 
3119 static int dm_early_init(void *handle)
3120 {
3121 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3122 
3123 	switch (adev->asic_type) {
3124 	case CHIP_BONAIRE:
3125 	case CHIP_HAWAII:
3126 		adev->mode_info.num_crtc = 6;
3127 		adev->mode_info.num_hpd = 6;
3128 		adev->mode_info.num_dig = 6;
3129 		break;
3130 	case CHIP_KAVERI:
3131 		adev->mode_info.num_crtc = 4;
3132 		adev->mode_info.num_hpd = 6;
3133 		adev->mode_info.num_dig = 7;
3134 		break;
3135 	case CHIP_KABINI:
3136 	case CHIP_MULLINS:
3137 		adev->mode_info.num_crtc = 2;
3138 		adev->mode_info.num_hpd = 6;
3139 		adev->mode_info.num_dig = 6;
3140 		break;
3141 	case CHIP_FIJI:
3142 	case CHIP_TONGA:
3143 		adev->mode_info.num_crtc = 6;
3144 		adev->mode_info.num_hpd = 6;
3145 		adev->mode_info.num_dig = 7;
3146 		break;
3147 	case CHIP_CARRIZO:
3148 		adev->mode_info.num_crtc = 3;
3149 		adev->mode_info.num_hpd = 6;
3150 		adev->mode_info.num_dig = 9;
3151 		break;
3152 	case CHIP_STONEY:
3153 		adev->mode_info.num_crtc = 2;
3154 		adev->mode_info.num_hpd = 6;
3155 		adev->mode_info.num_dig = 9;
3156 		break;
3157 	case CHIP_POLARIS11:
3158 	case CHIP_POLARIS12:
3159 		adev->mode_info.num_crtc = 5;
3160 		adev->mode_info.num_hpd = 5;
3161 		adev->mode_info.num_dig = 5;
3162 		break;
3163 	case CHIP_POLARIS10:
3164 	case CHIP_VEGAM:
3165 		adev->mode_info.num_crtc = 6;
3166 		adev->mode_info.num_hpd = 6;
3167 		adev->mode_info.num_dig = 6;
3168 		break;
3169 	case CHIP_VEGA10:
3170 	case CHIP_VEGA12:
3171 	case CHIP_VEGA20:
3172 		adev->mode_info.num_crtc = 6;
3173 		adev->mode_info.num_hpd = 6;
3174 		adev->mode_info.num_dig = 6;
3175 		break;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3177 	case CHIP_RAVEN:
3178 		adev->mode_info.num_crtc = 4;
3179 		adev->mode_info.num_hpd = 4;
3180 		adev->mode_info.num_dig = 4;
3181 		break;
3182 #endif
3183 	case CHIP_NAVI10:
3184 	case CHIP_NAVI12:
3185 		adev->mode_info.num_crtc = 6;
3186 		adev->mode_info.num_hpd = 6;
3187 		adev->mode_info.num_dig = 6;
3188 		break;
3189 	case CHIP_NAVI14:
3190 		adev->mode_info.num_crtc = 5;
3191 		adev->mode_info.num_hpd = 5;
3192 		adev->mode_info.num_dig = 5;
3193 		break;
3194 	case CHIP_RENOIR:
3195 		adev->mode_info.num_crtc = 4;
3196 		adev->mode_info.num_hpd = 4;
3197 		adev->mode_info.num_dig = 4;
3198 		break;
3199 	default:
3200 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3201 		return -EINVAL;
3202 	}
3203 
3204 	amdgpu_dm_set_irq_funcs(adev);
3205 
3206 	if (adev->mode_info.funcs == NULL)
3207 		adev->mode_info.funcs = &dm_display_funcs;
3208 
3209 	/*
3210 	 * Note: Do NOT change adev->audio_endpt_rreg and
3211 	 * adev->audio_endpt_wreg because they are initialised in
3212 	 * amdgpu_device_init()
3213 	 */
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3215 	device_create_file(
3216 		adev->ddev->dev,
3217 		&dev_attr_s3_debug);
3218 #endif
3219 
3220 	return 0;
3221 }
3222 
3223 static bool modeset_required(struct drm_crtc_state *crtc_state,
3224 			     struct dc_stream_state *new_stream,
3225 			     struct dc_stream_state *old_stream)
3226 {
3227 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3228 		return false;
3229 
3230 	if (!crtc_state->enable)
3231 		return false;
3232 
3233 	return crtc_state->active;
3234 }
3235 
3236 static bool modereset_required(struct drm_crtc_state *crtc_state)
3237 {
3238 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3239 		return false;
3240 
3241 	return !crtc_state->enable || !crtc_state->active;
3242 }
3243 
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3245 {
3246 	drm_encoder_cleanup(encoder);
3247 	kfree(encoder);
3248 }
3249 
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251 	.destroy = amdgpu_dm_encoder_destroy,
3252 };
3253 
3254 
3255 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256 				struct dc_scaling_info *scaling_info)
3257 {
3258 	int scale_w, scale_h;
3259 
3260 	memset(scaling_info, 0, sizeof(*scaling_info));
3261 
3262 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3263 	scaling_info->src_rect.x = state->src_x >> 16;
3264 	scaling_info->src_rect.y = state->src_y >> 16;
3265 
3266 	scaling_info->src_rect.width = state->src_w >> 16;
3267 	if (scaling_info->src_rect.width == 0)
3268 		return -EINVAL;
3269 
3270 	scaling_info->src_rect.height = state->src_h >> 16;
3271 	if (scaling_info->src_rect.height == 0)
3272 		return -EINVAL;
3273 
3274 	scaling_info->dst_rect.x = state->crtc_x;
3275 	scaling_info->dst_rect.y = state->crtc_y;
3276 
3277 	if (state->crtc_w == 0)
3278 		return -EINVAL;
3279 
3280 	scaling_info->dst_rect.width = state->crtc_w;
3281 
3282 	if (state->crtc_h == 0)
3283 		return -EINVAL;
3284 
3285 	scaling_info->dst_rect.height = state->crtc_h;
3286 
3287 	/* DRM doesn't specify clipping on destination output. */
3288 	scaling_info->clip_rect = scaling_info->dst_rect;
3289 
3290 	/* TODO: Validate scaling per-format with DC plane caps */
3291 	scale_w = scaling_info->dst_rect.width * 1000 /
3292 		  scaling_info->src_rect.width;
3293 
3294 	if (scale_w < 250 || scale_w > 16000)
3295 		return -EINVAL;
3296 
3297 	scale_h = scaling_info->dst_rect.height * 1000 /
3298 		  scaling_info->src_rect.height;
3299 
3300 	if (scale_h < 250 || scale_h > 16000)
3301 		return -EINVAL;
3302 
3303 	/*
3304 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 	 * assume reasonable defaults based on the format.
3306 	 */
3307 
3308 	return 0;
3309 }
3310 
3311 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3312 		       uint64_t *tiling_flags)
3313 {
3314 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3315 	int r = amdgpu_bo_reserve(rbo, false);
3316 
3317 	if (unlikely(r)) {
3318 		/* Don't show error message when returning -ERESTARTSYS */
3319 		if (r != -ERESTARTSYS)
3320 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3321 		return r;
3322 	}
3323 
3324 	if (tiling_flags)
3325 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3326 
3327 	amdgpu_bo_unreserve(rbo);
3328 
3329 	return r;
3330 }
3331 
3332 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3333 {
3334 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3335 
3336 	return offset ? (address + offset * 256) : 0;
3337 }
3338 
3339 static int
3340 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3341 			  const struct amdgpu_framebuffer *afb,
3342 			  const enum surface_pixel_format format,
3343 			  const enum dc_rotation_angle rotation,
3344 			  const struct plane_size *plane_size,
3345 			  const union dc_tiling_info *tiling_info,
3346 			  const uint64_t info,
3347 			  struct dc_plane_dcc_param *dcc,
3348 			  struct dc_plane_address *address,
3349 			  bool force_disable_dcc)
3350 {
3351 	struct dc *dc = adev->dm.dc;
3352 	struct dc_dcc_surface_param input;
3353 	struct dc_surface_dcc_cap output;
3354 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3355 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3356 	uint64_t dcc_address;
3357 
3358 	memset(&input, 0, sizeof(input));
3359 	memset(&output, 0, sizeof(output));
3360 
3361 	if (force_disable_dcc)
3362 		return 0;
3363 
3364 	if (!offset)
3365 		return 0;
3366 
3367 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3368 		return 0;
3369 
3370 	if (!dc->cap_funcs.get_dcc_compression_cap)
3371 		return -EINVAL;
3372 
3373 	input.format = format;
3374 	input.surface_size.width = plane_size->surface_size.width;
3375 	input.surface_size.height = plane_size->surface_size.height;
3376 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3377 
3378 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3379 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3380 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3381 		input.scan = SCAN_DIRECTION_VERTICAL;
3382 
3383 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3384 		return -EINVAL;
3385 
3386 	if (!output.capable)
3387 		return -EINVAL;
3388 
3389 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3390 		return -EINVAL;
3391 
3392 	dcc->enable = 1;
3393 	dcc->meta_pitch =
3394 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3395 	dcc->independent_64b_blks = i64b;
3396 
3397 	dcc_address = get_dcc_address(afb->address, info);
3398 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3399 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3400 
3401 	return 0;
3402 }
3403 
3404 static int
3405 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3406 			     const struct amdgpu_framebuffer *afb,
3407 			     const enum surface_pixel_format format,
3408 			     const enum dc_rotation_angle rotation,
3409 			     const uint64_t tiling_flags,
3410 			     union dc_tiling_info *tiling_info,
3411 			     struct plane_size *plane_size,
3412 			     struct dc_plane_dcc_param *dcc,
3413 			     struct dc_plane_address *address,
3414 			     bool force_disable_dcc)
3415 {
3416 	const struct drm_framebuffer *fb = &afb->base;
3417 	int ret;
3418 
3419 	memset(tiling_info, 0, sizeof(*tiling_info));
3420 	memset(plane_size, 0, sizeof(*plane_size));
3421 	memset(dcc, 0, sizeof(*dcc));
3422 	memset(address, 0, sizeof(*address));
3423 
3424 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3425 		plane_size->surface_size.x = 0;
3426 		plane_size->surface_size.y = 0;
3427 		plane_size->surface_size.width = fb->width;
3428 		plane_size->surface_size.height = fb->height;
3429 		plane_size->surface_pitch =
3430 			fb->pitches[0] / fb->format->cpp[0];
3431 
3432 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3433 		address->grph.addr.low_part = lower_32_bits(afb->address);
3434 		address->grph.addr.high_part = upper_32_bits(afb->address);
3435 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3436 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3437 
3438 		plane_size->surface_size.x = 0;
3439 		plane_size->surface_size.y = 0;
3440 		plane_size->surface_size.width = fb->width;
3441 		plane_size->surface_size.height = fb->height;
3442 		plane_size->surface_pitch =
3443 			fb->pitches[0] / fb->format->cpp[0];
3444 
3445 		plane_size->chroma_size.x = 0;
3446 		plane_size->chroma_size.y = 0;
3447 		/* TODO: set these based on surface format */
3448 		plane_size->chroma_size.width = fb->width / 2;
3449 		plane_size->chroma_size.height = fb->height / 2;
3450 
3451 		plane_size->chroma_pitch =
3452 			fb->pitches[1] / fb->format->cpp[1];
3453 
3454 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3455 		address->video_progressive.luma_addr.low_part =
3456 			lower_32_bits(afb->address);
3457 		address->video_progressive.luma_addr.high_part =
3458 			upper_32_bits(afb->address);
3459 		address->video_progressive.chroma_addr.low_part =
3460 			lower_32_bits(chroma_addr);
3461 		address->video_progressive.chroma_addr.high_part =
3462 			upper_32_bits(chroma_addr);
3463 	}
3464 
3465 	/* Fill GFX8 params */
3466 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3467 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3468 
3469 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3470 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3471 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3472 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3473 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3474 
3475 		/* XXX fix me for VI */
3476 		tiling_info->gfx8.num_banks = num_banks;
3477 		tiling_info->gfx8.array_mode =
3478 				DC_ARRAY_2D_TILED_THIN1;
3479 		tiling_info->gfx8.tile_split = tile_split;
3480 		tiling_info->gfx8.bank_width = bankw;
3481 		tiling_info->gfx8.bank_height = bankh;
3482 		tiling_info->gfx8.tile_aspect = mtaspect;
3483 		tiling_info->gfx8.tile_mode =
3484 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3485 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3486 			== DC_ARRAY_1D_TILED_THIN1) {
3487 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3488 	}
3489 
3490 	tiling_info->gfx8.pipe_config =
3491 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3492 
3493 	if (adev->asic_type == CHIP_VEGA10 ||
3494 	    adev->asic_type == CHIP_VEGA12 ||
3495 	    adev->asic_type == CHIP_VEGA20 ||
3496 	    adev->asic_type == CHIP_NAVI10 ||
3497 	    adev->asic_type == CHIP_NAVI14 ||
3498 	    adev->asic_type == CHIP_NAVI12 ||
3499 	    adev->asic_type == CHIP_RENOIR ||
3500 	    adev->asic_type == CHIP_RAVEN) {
3501 		/* Fill GFX9 params */
3502 		tiling_info->gfx9.num_pipes =
3503 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3504 		tiling_info->gfx9.num_banks =
3505 			adev->gfx.config.gb_addr_config_fields.num_banks;
3506 		tiling_info->gfx9.pipe_interleave =
3507 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3508 		tiling_info->gfx9.num_shader_engines =
3509 			adev->gfx.config.gb_addr_config_fields.num_se;
3510 		tiling_info->gfx9.max_compressed_frags =
3511 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3512 		tiling_info->gfx9.num_rb_per_se =
3513 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3514 		tiling_info->gfx9.swizzle =
3515 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3516 		tiling_info->gfx9.shaderEnable = 1;
3517 
3518 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3519 						plane_size, tiling_info,
3520 						tiling_flags, dcc, address,
3521 						force_disable_dcc);
3522 		if (ret)
3523 			return ret;
3524 	}
3525 
3526 	return 0;
3527 }
3528 
3529 static void
3530 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3531 			       bool *per_pixel_alpha, bool *global_alpha,
3532 			       int *global_alpha_value)
3533 {
3534 	*per_pixel_alpha = false;
3535 	*global_alpha = false;
3536 	*global_alpha_value = 0xff;
3537 
3538 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3539 		return;
3540 
3541 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3542 		static const uint32_t alpha_formats[] = {
3543 			DRM_FORMAT_ARGB8888,
3544 			DRM_FORMAT_RGBA8888,
3545 			DRM_FORMAT_ABGR8888,
3546 		};
3547 		uint32_t format = plane_state->fb->format->format;
3548 		unsigned int i;
3549 
3550 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3551 			if (format == alpha_formats[i]) {
3552 				*per_pixel_alpha = true;
3553 				break;
3554 			}
3555 		}
3556 	}
3557 
3558 	if (plane_state->alpha < 0xffff) {
3559 		*global_alpha = true;
3560 		*global_alpha_value = plane_state->alpha >> 8;
3561 	}
3562 }
3563 
3564 static int
3565 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3566 			    const enum surface_pixel_format format,
3567 			    enum dc_color_space *color_space)
3568 {
3569 	bool full_range;
3570 
3571 	*color_space = COLOR_SPACE_SRGB;
3572 
3573 	/* DRM color properties only affect non-RGB formats. */
3574 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3575 		return 0;
3576 
3577 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3578 
3579 	switch (plane_state->color_encoding) {
3580 	case DRM_COLOR_YCBCR_BT601:
3581 		if (full_range)
3582 			*color_space = COLOR_SPACE_YCBCR601;
3583 		else
3584 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3585 		break;
3586 
3587 	case DRM_COLOR_YCBCR_BT709:
3588 		if (full_range)
3589 			*color_space = COLOR_SPACE_YCBCR709;
3590 		else
3591 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3592 		break;
3593 
3594 	case DRM_COLOR_YCBCR_BT2020:
3595 		if (full_range)
3596 			*color_space = COLOR_SPACE_2020_YCBCR;
3597 		else
3598 			return -EINVAL;
3599 		break;
3600 
3601 	default:
3602 		return -EINVAL;
3603 	}
3604 
3605 	return 0;
3606 }
3607 
3608 static int
3609 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3610 			    const struct drm_plane_state *plane_state,
3611 			    const uint64_t tiling_flags,
3612 			    struct dc_plane_info *plane_info,
3613 			    struct dc_plane_address *address,
3614 			    bool force_disable_dcc)
3615 {
3616 	const struct drm_framebuffer *fb = plane_state->fb;
3617 	const struct amdgpu_framebuffer *afb =
3618 		to_amdgpu_framebuffer(plane_state->fb);
3619 	struct drm_format_name_buf format_name;
3620 	int ret;
3621 
3622 	memset(plane_info, 0, sizeof(*plane_info));
3623 
3624 	switch (fb->format->format) {
3625 	case DRM_FORMAT_C8:
3626 		plane_info->format =
3627 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3628 		break;
3629 	case DRM_FORMAT_RGB565:
3630 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3631 		break;
3632 	case DRM_FORMAT_XRGB8888:
3633 	case DRM_FORMAT_ARGB8888:
3634 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3635 		break;
3636 	case DRM_FORMAT_XRGB2101010:
3637 	case DRM_FORMAT_ARGB2101010:
3638 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3639 		break;
3640 	case DRM_FORMAT_XBGR2101010:
3641 	case DRM_FORMAT_ABGR2101010:
3642 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3643 		break;
3644 	case DRM_FORMAT_XBGR8888:
3645 	case DRM_FORMAT_ABGR8888:
3646 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3647 		break;
3648 	case DRM_FORMAT_NV21:
3649 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3650 		break;
3651 	case DRM_FORMAT_NV12:
3652 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3653 		break;
3654 	case DRM_FORMAT_P010:
3655 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3656 		break;
3657 	default:
3658 		DRM_ERROR(
3659 			"Unsupported screen format %s\n",
3660 			drm_get_format_name(fb->format->format, &format_name));
3661 		return -EINVAL;
3662 	}
3663 
3664 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3665 	case DRM_MODE_ROTATE_0:
3666 		plane_info->rotation = ROTATION_ANGLE_0;
3667 		break;
3668 	case DRM_MODE_ROTATE_90:
3669 		plane_info->rotation = ROTATION_ANGLE_90;
3670 		break;
3671 	case DRM_MODE_ROTATE_180:
3672 		plane_info->rotation = ROTATION_ANGLE_180;
3673 		break;
3674 	case DRM_MODE_ROTATE_270:
3675 		plane_info->rotation = ROTATION_ANGLE_270;
3676 		break;
3677 	default:
3678 		plane_info->rotation = ROTATION_ANGLE_0;
3679 		break;
3680 	}
3681 
3682 	plane_info->visible = true;
3683 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3684 
3685 	plane_info->layer_index = 0;
3686 
3687 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3688 					  &plane_info->color_space);
3689 	if (ret)
3690 		return ret;
3691 
3692 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3693 					   plane_info->rotation, tiling_flags,
3694 					   &plane_info->tiling_info,
3695 					   &plane_info->plane_size,
3696 					   &plane_info->dcc, address,
3697 					   force_disable_dcc);
3698 	if (ret)
3699 		return ret;
3700 
3701 	fill_blending_from_plane_state(
3702 		plane_state, &plane_info->per_pixel_alpha,
3703 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3704 
3705 	return 0;
3706 }
3707 
3708 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3709 				    struct dc_plane_state *dc_plane_state,
3710 				    struct drm_plane_state *plane_state,
3711 				    struct drm_crtc_state *crtc_state)
3712 {
3713 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3714 	const struct amdgpu_framebuffer *amdgpu_fb =
3715 		to_amdgpu_framebuffer(plane_state->fb);
3716 	struct dc_scaling_info scaling_info;
3717 	struct dc_plane_info plane_info;
3718 	uint64_t tiling_flags;
3719 	int ret;
3720 	bool force_disable_dcc = false;
3721 
3722 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3723 	if (ret)
3724 		return ret;
3725 
3726 	dc_plane_state->src_rect = scaling_info.src_rect;
3727 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3728 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3729 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3730 
3731 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3732 	if (ret)
3733 		return ret;
3734 
3735 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3736 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3737 					  &plane_info,
3738 					  &dc_plane_state->address,
3739 					  force_disable_dcc);
3740 	if (ret)
3741 		return ret;
3742 
3743 	dc_plane_state->format = plane_info.format;
3744 	dc_plane_state->color_space = plane_info.color_space;
3745 	dc_plane_state->format = plane_info.format;
3746 	dc_plane_state->plane_size = plane_info.plane_size;
3747 	dc_plane_state->rotation = plane_info.rotation;
3748 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3749 	dc_plane_state->stereo_format = plane_info.stereo_format;
3750 	dc_plane_state->tiling_info = plane_info.tiling_info;
3751 	dc_plane_state->visible = plane_info.visible;
3752 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3753 	dc_plane_state->global_alpha = plane_info.global_alpha;
3754 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3755 	dc_plane_state->dcc = plane_info.dcc;
3756 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3757 
3758 	/*
3759 	 * Always set input transfer function, since plane state is refreshed
3760 	 * every time.
3761 	 */
3762 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3763 	if (ret)
3764 		return ret;
3765 
3766 	return 0;
3767 }
3768 
3769 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3770 					   const struct dm_connector_state *dm_state,
3771 					   struct dc_stream_state *stream)
3772 {
3773 	enum amdgpu_rmx_type rmx_type;
3774 
3775 	struct rect src = { 0 }; /* viewport in composition space*/
3776 	struct rect dst = { 0 }; /* stream addressable area */
3777 
3778 	/* no mode. nothing to be done */
3779 	if (!mode)
3780 		return;
3781 
3782 	/* Full screen scaling by default */
3783 	src.width = mode->hdisplay;
3784 	src.height = mode->vdisplay;
3785 	dst.width = stream->timing.h_addressable;
3786 	dst.height = stream->timing.v_addressable;
3787 
3788 	if (dm_state) {
3789 		rmx_type = dm_state->scaling;
3790 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3791 			if (src.width * dst.height <
3792 					src.height * dst.width) {
3793 				/* height needs less upscaling/more downscaling */
3794 				dst.width = src.width *
3795 						dst.height / src.height;
3796 			} else {
3797 				/* width needs less upscaling/more downscaling */
3798 				dst.height = src.height *
3799 						dst.width / src.width;
3800 			}
3801 		} else if (rmx_type == RMX_CENTER) {
3802 			dst = src;
3803 		}
3804 
3805 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3806 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3807 
3808 		if (dm_state->underscan_enable) {
3809 			dst.x += dm_state->underscan_hborder / 2;
3810 			dst.y += dm_state->underscan_vborder / 2;
3811 			dst.width -= dm_state->underscan_hborder;
3812 			dst.height -= dm_state->underscan_vborder;
3813 		}
3814 	}
3815 
3816 	stream->src = src;
3817 	stream->dst = dst;
3818 
3819 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3820 			dst.x, dst.y, dst.width, dst.height);
3821 
3822 }
3823 
3824 static enum dc_color_depth
3825 convert_color_depth_from_display_info(const struct drm_connector *connector,
3826 				      const struct drm_connector_state *state,
3827 				      bool is_y420)
3828 {
3829 	uint8_t bpc;
3830 
3831 	if (is_y420) {
3832 		bpc = 8;
3833 
3834 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3835 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3836 			bpc = 16;
3837 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3838 			bpc = 12;
3839 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3840 			bpc = 10;
3841 	} else {
3842 		bpc = (uint8_t)connector->display_info.bpc;
3843 		/* Assume 8 bpc by default if no bpc is specified. */
3844 		bpc = bpc ? bpc : 8;
3845 	}
3846 
3847 	if (!state)
3848 		state = connector->state;
3849 
3850 	if (state) {
3851 		/*
3852 		 * Cap display bpc based on the user requested value.
3853 		 *
3854 		 * The value for state->max_bpc may not correctly updated
3855 		 * depending on when the connector gets added to the state
3856 		 * or if this was called outside of atomic check, so it
3857 		 * can't be used directly.
3858 		 */
3859 		bpc = min(bpc, state->max_requested_bpc);
3860 
3861 		/* Round down to the nearest even number. */
3862 		bpc = bpc - (bpc & 1);
3863 	}
3864 
3865 	switch (bpc) {
3866 	case 0:
3867 		/*
3868 		 * Temporary Work around, DRM doesn't parse color depth for
3869 		 * EDID revision before 1.4
3870 		 * TODO: Fix edid parsing
3871 		 */
3872 		return COLOR_DEPTH_888;
3873 	case 6:
3874 		return COLOR_DEPTH_666;
3875 	case 8:
3876 		return COLOR_DEPTH_888;
3877 	case 10:
3878 		return COLOR_DEPTH_101010;
3879 	case 12:
3880 		return COLOR_DEPTH_121212;
3881 	case 14:
3882 		return COLOR_DEPTH_141414;
3883 	case 16:
3884 		return COLOR_DEPTH_161616;
3885 	default:
3886 		return COLOR_DEPTH_UNDEFINED;
3887 	}
3888 }
3889 
3890 static enum dc_aspect_ratio
3891 get_aspect_ratio(const struct drm_display_mode *mode_in)
3892 {
3893 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3894 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3895 }
3896 
3897 static enum dc_color_space
3898 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3899 {
3900 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3901 
3902 	switch (dc_crtc_timing->pixel_encoding)	{
3903 	case PIXEL_ENCODING_YCBCR422:
3904 	case PIXEL_ENCODING_YCBCR444:
3905 	case PIXEL_ENCODING_YCBCR420:
3906 	{
3907 		/*
3908 		 * 27030khz is the separation point between HDTV and SDTV
3909 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3910 		 * respectively
3911 		 */
3912 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3913 			if (dc_crtc_timing->flags.Y_ONLY)
3914 				color_space =
3915 					COLOR_SPACE_YCBCR709_LIMITED;
3916 			else
3917 				color_space = COLOR_SPACE_YCBCR709;
3918 		} else {
3919 			if (dc_crtc_timing->flags.Y_ONLY)
3920 				color_space =
3921 					COLOR_SPACE_YCBCR601_LIMITED;
3922 			else
3923 				color_space = COLOR_SPACE_YCBCR601;
3924 		}
3925 
3926 	}
3927 	break;
3928 	case PIXEL_ENCODING_RGB:
3929 		color_space = COLOR_SPACE_SRGB;
3930 		break;
3931 
3932 	default:
3933 		WARN_ON(1);
3934 		break;
3935 	}
3936 
3937 	return color_space;
3938 }
3939 
3940 static bool adjust_colour_depth_from_display_info(
3941 	struct dc_crtc_timing *timing_out,
3942 	const struct drm_display_info *info)
3943 {
3944 	enum dc_color_depth depth = timing_out->display_color_depth;
3945 	int normalized_clk;
3946 	do {
3947 		normalized_clk = timing_out->pix_clk_100hz / 10;
3948 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3949 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3950 			normalized_clk /= 2;
3951 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3952 		switch (depth) {
3953 		case COLOR_DEPTH_888:
3954 			break;
3955 		case COLOR_DEPTH_101010:
3956 			normalized_clk = (normalized_clk * 30) / 24;
3957 			break;
3958 		case COLOR_DEPTH_121212:
3959 			normalized_clk = (normalized_clk * 36) / 24;
3960 			break;
3961 		case COLOR_DEPTH_161616:
3962 			normalized_clk = (normalized_clk * 48) / 24;
3963 			break;
3964 		default:
3965 			/* The above depths are the only ones valid for HDMI. */
3966 			return false;
3967 		}
3968 		if (normalized_clk <= info->max_tmds_clock) {
3969 			timing_out->display_color_depth = depth;
3970 			return true;
3971 		}
3972 	} while (--depth > COLOR_DEPTH_666);
3973 	return false;
3974 }
3975 
3976 static void fill_stream_properties_from_drm_display_mode(
3977 	struct dc_stream_state *stream,
3978 	const struct drm_display_mode *mode_in,
3979 	const struct drm_connector *connector,
3980 	const struct drm_connector_state *connector_state,
3981 	const struct dc_stream_state *old_stream)
3982 {
3983 	struct dc_crtc_timing *timing_out = &stream->timing;
3984 	const struct drm_display_info *info = &connector->display_info;
3985 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3986 	struct hdmi_vendor_infoframe hv_frame;
3987 	struct hdmi_avi_infoframe avi_frame;
3988 
3989 	memset(&hv_frame, 0, sizeof(hv_frame));
3990 	memset(&avi_frame, 0, sizeof(avi_frame));
3991 
3992 	timing_out->h_border_left = 0;
3993 	timing_out->h_border_right = 0;
3994 	timing_out->v_border_top = 0;
3995 	timing_out->v_border_bottom = 0;
3996 	/* TODO: un-hardcode */
3997 	if (drm_mode_is_420_only(info, mode_in)
3998 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3999 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4000 	else if (drm_mode_is_420_also(info, mode_in)
4001 			&& aconnector->force_yuv420_output)
4002 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4003 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4004 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4005 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4006 	else
4007 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4008 
4009 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4010 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4011 		connector, connector_state,
4012 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4013 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4014 	timing_out->hdmi_vic = 0;
4015 
4016 	if(old_stream) {
4017 		timing_out->vic = old_stream->timing.vic;
4018 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4019 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4020 	} else {
4021 		timing_out->vic = drm_match_cea_mode(mode_in);
4022 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4023 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4024 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4025 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4026 	}
4027 
4028 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4029 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4030 		timing_out->vic = avi_frame.video_code;
4031 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4032 		timing_out->hdmi_vic = hv_frame.vic;
4033 	}
4034 
4035 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4036 	timing_out->h_total = mode_in->crtc_htotal;
4037 	timing_out->h_sync_width =
4038 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4039 	timing_out->h_front_porch =
4040 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4041 	timing_out->v_total = mode_in->crtc_vtotal;
4042 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4043 	timing_out->v_front_porch =
4044 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4045 	timing_out->v_sync_width =
4046 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4047 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4048 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4049 
4050 	stream->output_color_space = get_output_color_space(timing_out);
4051 
4052 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4053 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4054 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4055 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4056 		    drm_mode_is_420_also(info, mode_in) &&
4057 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4058 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4059 			adjust_colour_depth_from_display_info(timing_out, info);
4060 		}
4061 	}
4062 }
4063 
4064 static void fill_audio_info(struct audio_info *audio_info,
4065 			    const struct drm_connector *drm_connector,
4066 			    const struct dc_sink *dc_sink)
4067 {
4068 	int i = 0;
4069 	int cea_revision = 0;
4070 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4071 
4072 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4073 	audio_info->product_id = edid_caps->product_id;
4074 
4075 	cea_revision = drm_connector->display_info.cea_rev;
4076 
4077 	strscpy(audio_info->display_name,
4078 		edid_caps->display_name,
4079 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4080 
4081 	if (cea_revision >= 3) {
4082 		audio_info->mode_count = edid_caps->audio_mode_count;
4083 
4084 		for (i = 0; i < audio_info->mode_count; ++i) {
4085 			audio_info->modes[i].format_code =
4086 					(enum audio_format_code)
4087 					(edid_caps->audio_modes[i].format_code);
4088 			audio_info->modes[i].channel_count =
4089 					edid_caps->audio_modes[i].channel_count;
4090 			audio_info->modes[i].sample_rates.all =
4091 					edid_caps->audio_modes[i].sample_rate;
4092 			audio_info->modes[i].sample_size =
4093 					edid_caps->audio_modes[i].sample_size;
4094 		}
4095 	}
4096 
4097 	audio_info->flags.all = edid_caps->speaker_flags;
4098 
4099 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4100 	if (drm_connector->latency_present[0]) {
4101 		audio_info->video_latency = drm_connector->video_latency[0];
4102 		audio_info->audio_latency = drm_connector->audio_latency[0];
4103 	}
4104 
4105 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4106 
4107 }
4108 
4109 static void
4110 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4111 				      struct drm_display_mode *dst_mode)
4112 {
4113 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4114 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4115 	dst_mode->crtc_clock = src_mode->crtc_clock;
4116 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4117 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4118 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4119 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4120 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4121 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4122 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4123 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4124 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4125 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4126 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4127 }
4128 
4129 static void
4130 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4131 					const struct drm_display_mode *native_mode,
4132 					bool scale_enabled)
4133 {
4134 	if (scale_enabled) {
4135 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4136 	} else if (native_mode->clock == drm_mode->clock &&
4137 			native_mode->htotal == drm_mode->htotal &&
4138 			native_mode->vtotal == drm_mode->vtotal) {
4139 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4140 	} else {
4141 		/* no scaling nor amdgpu inserted, no need to patch */
4142 	}
4143 }
4144 
4145 static struct dc_sink *
4146 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4147 {
4148 	struct dc_sink_init_data sink_init_data = { 0 };
4149 	struct dc_sink *sink = NULL;
4150 	sink_init_data.link = aconnector->dc_link;
4151 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4152 
4153 	sink = dc_sink_create(&sink_init_data);
4154 	if (!sink) {
4155 		DRM_ERROR("Failed to create sink!\n");
4156 		return NULL;
4157 	}
4158 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4159 
4160 	return sink;
4161 }
4162 
4163 static void set_multisync_trigger_params(
4164 		struct dc_stream_state *stream)
4165 {
4166 	if (stream->triggered_crtc_reset.enabled) {
4167 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4168 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4169 	}
4170 }
4171 
4172 static void set_master_stream(struct dc_stream_state *stream_set[],
4173 			      int stream_count)
4174 {
4175 	int j, highest_rfr = 0, master_stream = 0;
4176 
4177 	for (j = 0;  j < stream_count; j++) {
4178 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4179 			int refresh_rate = 0;
4180 
4181 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4182 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4183 			if (refresh_rate > highest_rfr) {
4184 				highest_rfr = refresh_rate;
4185 				master_stream = j;
4186 			}
4187 		}
4188 	}
4189 	for (j = 0;  j < stream_count; j++) {
4190 		if (stream_set[j])
4191 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4192 	}
4193 }
4194 
4195 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4196 {
4197 	int i = 0;
4198 
4199 	if (context->stream_count < 2)
4200 		return;
4201 	for (i = 0; i < context->stream_count ; i++) {
4202 		if (!context->streams[i])
4203 			continue;
4204 		/*
4205 		 * TODO: add a function to read AMD VSDB bits and set
4206 		 * crtc_sync_master.multi_sync_enabled flag
4207 		 * For now it's set to false
4208 		 */
4209 		set_multisync_trigger_params(context->streams[i]);
4210 	}
4211 	set_master_stream(context->streams, context->stream_count);
4212 }
4213 
4214 static struct dc_stream_state *
4215 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4216 		       const struct drm_display_mode *drm_mode,
4217 		       const struct dm_connector_state *dm_state,
4218 		       const struct dc_stream_state *old_stream)
4219 {
4220 	struct drm_display_mode *preferred_mode = NULL;
4221 	struct drm_connector *drm_connector;
4222 	const struct drm_connector_state *con_state =
4223 		dm_state ? &dm_state->base : NULL;
4224 	struct dc_stream_state *stream = NULL;
4225 	struct drm_display_mode mode = *drm_mode;
4226 	bool native_mode_found = false;
4227 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4228 	int mode_refresh;
4229 	int preferred_refresh = 0;
4230 #if defined(CONFIG_DRM_AMD_DC_DCN)
4231 	struct dsc_dec_dpcd_caps dsc_caps;
4232 #endif
4233 	uint32_t link_bandwidth_kbps;
4234 
4235 	struct dc_sink *sink = NULL;
4236 	if (aconnector == NULL) {
4237 		DRM_ERROR("aconnector is NULL!\n");
4238 		return stream;
4239 	}
4240 
4241 	drm_connector = &aconnector->base;
4242 
4243 	if (!aconnector->dc_sink) {
4244 		sink = create_fake_sink(aconnector);
4245 		if (!sink)
4246 			return stream;
4247 	} else {
4248 		sink = aconnector->dc_sink;
4249 		dc_sink_retain(sink);
4250 	}
4251 
4252 	stream = dc_create_stream_for_sink(sink);
4253 
4254 	if (stream == NULL) {
4255 		DRM_ERROR("Failed to create stream for sink!\n");
4256 		goto finish;
4257 	}
4258 
4259 	stream->dm_stream_context = aconnector;
4260 
4261 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4262 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4263 
4264 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4265 		/* Search for preferred mode */
4266 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4267 			native_mode_found = true;
4268 			break;
4269 		}
4270 	}
4271 	if (!native_mode_found)
4272 		preferred_mode = list_first_entry_or_null(
4273 				&aconnector->base.modes,
4274 				struct drm_display_mode,
4275 				head);
4276 
4277 	mode_refresh = drm_mode_vrefresh(&mode);
4278 
4279 	if (preferred_mode == NULL) {
4280 		/*
4281 		 * This may not be an error, the use case is when we have no
4282 		 * usermode calls to reset and set mode upon hotplug. In this
4283 		 * case, we call set mode ourselves to restore the previous mode
4284 		 * and the modelist may not be filled in in time.
4285 		 */
4286 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4287 	} else {
4288 		decide_crtc_timing_for_drm_display_mode(
4289 				&mode, preferred_mode,
4290 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4291 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4292 	}
4293 
4294 	if (!dm_state)
4295 		drm_mode_set_crtcinfo(&mode, 0);
4296 
4297 	/*
4298 	* If scaling is enabled and refresh rate didn't change
4299 	* we copy the vic and polarities of the old timings
4300 	*/
4301 	if (!scale || mode_refresh != preferred_refresh)
4302 		fill_stream_properties_from_drm_display_mode(stream,
4303 			&mode, &aconnector->base, con_state, NULL);
4304 	else
4305 		fill_stream_properties_from_drm_display_mode(stream,
4306 			&mode, &aconnector->base, con_state, old_stream);
4307 
4308 	stream->timing.flags.DSC = 0;
4309 
4310 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4311 #if defined(CONFIG_DRM_AMD_DC_DCN)
4312 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4313 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4314 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4315 				      &dsc_caps);
4316 #endif
4317 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4318 							     dc_link_get_link_cap(aconnector->dc_link));
4319 
4320 #if defined(CONFIG_DRM_AMD_DC_DCN)
4321 		if (dsc_caps.is_dsc_supported)
4322 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4323 						  &dsc_caps,
4324 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4325 						  link_bandwidth_kbps,
4326 						  &stream->timing,
4327 						  &stream->timing.dsc_cfg))
4328 				stream->timing.flags.DSC = 1;
4329 #endif
4330 	}
4331 
4332 	update_stream_scaling_settings(&mode, dm_state, stream);
4333 
4334 	fill_audio_info(
4335 		&stream->audio_info,
4336 		drm_connector,
4337 		sink);
4338 
4339 	update_stream_signal(stream, sink);
4340 
4341 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4342 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4343 	if (stream->link->psr_feature_enabled)	{
4344 		struct dc  *core_dc = stream->link->ctx->dc;
4345 
4346 		if (dc_is_dmcu_initialized(core_dc)) {
4347 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4348 
4349 			stream->psr_version = dmcu->dmcu_version.psr_version;
4350 
4351 			//
4352 			// should decide stream support vsc sdp colorimetry capability
4353 			// before building vsc info packet
4354 			//
4355 			stream->use_vsc_sdp_for_colorimetry = false;
4356 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4357 				stream->use_vsc_sdp_for_colorimetry =
4358 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4359 			} else {
4360 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4361 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4362 					stream->use_vsc_sdp_for_colorimetry = true;
4363 				}
4364 			}
4365 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4366 		}
4367 	}
4368 finish:
4369 	dc_sink_release(sink);
4370 
4371 	return stream;
4372 }
4373 
4374 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4375 {
4376 	drm_crtc_cleanup(crtc);
4377 	kfree(crtc);
4378 }
4379 
4380 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4381 				  struct drm_crtc_state *state)
4382 {
4383 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4384 
4385 	/* TODO Destroy dc_stream objects are stream object is flattened */
4386 	if (cur->stream)
4387 		dc_stream_release(cur->stream);
4388 
4389 
4390 	__drm_atomic_helper_crtc_destroy_state(state);
4391 
4392 
4393 	kfree(state);
4394 }
4395 
4396 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4397 {
4398 	struct dm_crtc_state *state;
4399 
4400 	if (crtc->state)
4401 		dm_crtc_destroy_state(crtc, crtc->state);
4402 
4403 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4404 	if (WARN_ON(!state))
4405 		return;
4406 
4407 	crtc->state = &state->base;
4408 	crtc->state->crtc = crtc;
4409 
4410 }
4411 
4412 static struct drm_crtc_state *
4413 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4414 {
4415 	struct dm_crtc_state *state, *cur;
4416 
4417 	cur = to_dm_crtc_state(crtc->state);
4418 
4419 	if (WARN_ON(!crtc->state))
4420 		return NULL;
4421 
4422 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4423 	if (!state)
4424 		return NULL;
4425 
4426 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4427 
4428 	if (cur->stream) {
4429 		state->stream = cur->stream;
4430 		dc_stream_retain(state->stream);
4431 	}
4432 
4433 	state->active_planes = cur->active_planes;
4434 	state->interrupts_enabled = cur->interrupts_enabled;
4435 	state->vrr_params = cur->vrr_params;
4436 	state->vrr_infopacket = cur->vrr_infopacket;
4437 	state->abm_level = cur->abm_level;
4438 	state->vrr_supported = cur->vrr_supported;
4439 	state->freesync_config = cur->freesync_config;
4440 	state->crc_src = cur->crc_src;
4441 	state->cm_has_degamma = cur->cm_has_degamma;
4442 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4443 
4444 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4445 
4446 	return &state->base;
4447 }
4448 
4449 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4450 {
4451 	enum dc_irq_source irq_source;
4452 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4453 	struct amdgpu_device *adev = crtc->dev->dev_private;
4454 	int rc;
4455 
4456 	/* Do not set vupdate for DCN hardware */
4457 	if (adev->family > AMDGPU_FAMILY_AI)
4458 		return 0;
4459 
4460 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4461 
4462 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4463 
4464 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4465 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4466 	return rc;
4467 }
4468 
4469 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4470 {
4471 	enum dc_irq_source irq_source;
4472 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4473 	struct amdgpu_device *adev = crtc->dev->dev_private;
4474 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4475 	int rc = 0;
4476 
4477 	if (enable) {
4478 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4479 		if (amdgpu_dm_vrr_active(acrtc_state))
4480 			rc = dm_set_vupdate_irq(crtc, true);
4481 	} else {
4482 		/* vblank irq off -> vupdate irq off */
4483 		rc = dm_set_vupdate_irq(crtc, false);
4484 	}
4485 
4486 	if (rc)
4487 		return rc;
4488 
4489 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4490 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4491 }
4492 
4493 static int dm_enable_vblank(struct drm_crtc *crtc)
4494 {
4495 	return dm_set_vblank(crtc, true);
4496 }
4497 
4498 static void dm_disable_vblank(struct drm_crtc *crtc)
4499 {
4500 	dm_set_vblank(crtc, false);
4501 }
4502 
4503 /* Implemented only the options currently availible for the driver */
4504 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4505 	.reset = dm_crtc_reset_state,
4506 	.destroy = amdgpu_dm_crtc_destroy,
4507 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4508 	.set_config = drm_atomic_helper_set_config,
4509 	.page_flip = drm_atomic_helper_page_flip,
4510 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4511 	.atomic_destroy_state = dm_crtc_destroy_state,
4512 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4513 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4514 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4515 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4516 	.enable_vblank = dm_enable_vblank,
4517 	.disable_vblank = dm_disable_vblank,
4518 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4519 };
4520 
4521 static enum drm_connector_status
4522 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4523 {
4524 	bool connected;
4525 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4526 
4527 	/*
4528 	 * Notes:
4529 	 * 1. This interface is NOT called in context of HPD irq.
4530 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4531 	 * makes it a bad place for *any* MST-related activity.
4532 	 */
4533 
4534 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4535 	    !aconnector->fake_enable)
4536 		connected = (aconnector->dc_sink != NULL);
4537 	else
4538 		connected = (aconnector->base.force == DRM_FORCE_ON);
4539 
4540 	return (connected ? connector_status_connected :
4541 			connector_status_disconnected);
4542 }
4543 
4544 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4545 					    struct drm_connector_state *connector_state,
4546 					    struct drm_property *property,
4547 					    uint64_t val)
4548 {
4549 	struct drm_device *dev = connector->dev;
4550 	struct amdgpu_device *adev = dev->dev_private;
4551 	struct dm_connector_state *dm_old_state =
4552 		to_dm_connector_state(connector->state);
4553 	struct dm_connector_state *dm_new_state =
4554 		to_dm_connector_state(connector_state);
4555 
4556 	int ret = -EINVAL;
4557 
4558 	if (property == dev->mode_config.scaling_mode_property) {
4559 		enum amdgpu_rmx_type rmx_type;
4560 
4561 		switch (val) {
4562 		case DRM_MODE_SCALE_CENTER:
4563 			rmx_type = RMX_CENTER;
4564 			break;
4565 		case DRM_MODE_SCALE_ASPECT:
4566 			rmx_type = RMX_ASPECT;
4567 			break;
4568 		case DRM_MODE_SCALE_FULLSCREEN:
4569 			rmx_type = RMX_FULL;
4570 			break;
4571 		case DRM_MODE_SCALE_NONE:
4572 		default:
4573 			rmx_type = RMX_OFF;
4574 			break;
4575 		}
4576 
4577 		if (dm_old_state->scaling == rmx_type)
4578 			return 0;
4579 
4580 		dm_new_state->scaling = rmx_type;
4581 		ret = 0;
4582 	} else if (property == adev->mode_info.underscan_hborder_property) {
4583 		dm_new_state->underscan_hborder = val;
4584 		ret = 0;
4585 	} else if (property == adev->mode_info.underscan_vborder_property) {
4586 		dm_new_state->underscan_vborder = val;
4587 		ret = 0;
4588 	} else if (property == adev->mode_info.underscan_property) {
4589 		dm_new_state->underscan_enable = val;
4590 		ret = 0;
4591 	} else if (property == adev->mode_info.abm_level_property) {
4592 		dm_new_state->abm_level = val;
4593 		ret = 0;
4594 	}
4595 
4596 	return ret;
4597 }
4598 
4599 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4600 					    const struct drm_connector_state *state,
4601 					    struct drm_property *property,
4602 					    uint64_t *val)
4603 {
4604 	struct drm_device *dev = connector->dev;
4605 	struct amdgpu_device *adev = dev->dev_private;
4606 	struct dm_connector_state *dm_state =
4607 		to_dm_connector_state(state);
4608 	int ret = -EINVAL;
4609 
4610 	if (property == dev->mode_config.scaling_mode_property) {
4611 		switch (dm_state->scaling) {
4612 		case RMX_CENTER:
4613 			*val = DRM_MODE_SCALE_CENTER;
4614 			break;
4615 		case RMX_ASPECT:
4616 			*val = DRM_MODE_SCALE_ASPECT;
4617 			break;
4618 		case RMX_FULL:
4619 			*val = DRM_MODE_SCALE_FULLSCREEN;
4620 			break;
4621 		case RMX_OFF:
4622 		default:
4623 			*val = DRM_MODE_SCALE_NONE;
4624 			break;
4625 		}
4626 		ret = 0;
4627 	} else if (property == adev->mode_info.underscan_hborder_property) {
4628 		*val = dm_state->underscan_hborder;
4629 		ret = 0;
4630 	} else if (property == adev->mode_info.underscan_vborder_property) {
4631 		*val = dm_state->underscan_vborder;
4632 		ret = 0;
4633 	} else if (property == adev->mode_info.underscan_property) {
4634 		*val = dm_state->underscan_enable;
4635 		ret = 0;
4636 	} else if (property == adev->mode_info.abm_level_property) {
4637 		*val = dm_state->abm_level;
4638 		ret = 0;
4639 	}
4640 
4641 	return ret;
4642 }
4643 
4644 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4645 {
4646 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4647 
4648 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4649 }
4650 
4651 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4652 {
4653 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4654 	const struct dc_link *link = aconnector->dc_link;
4655 	struct amdgpu_device *adev = connector->dev->dev_private;
4656 	struct amdgpu_display_manager *dm = &adev->dm;
4657 
4658 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4659 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4660 
4661 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4662 	    link->type != dc_connection_none &&
4663 	    dm->backlight_dev) {
4664 		backlight_device_unregister(dm->backlight_dev);
4665 		dm->backlight_dev = NULL;
4666 	}
4667 #endif
4668 
4669 	if (aconnector->dc_em_sink)
4670 		dc_sink_release(aconnector->dc_em_sink);
4671 	aconnector->dc_em_sink = NULL;
4672 	if (aconnector->dc_sink)
4673 		dc_sink_release(aconnector->dc_sink);
4674 	aconnector->dc_sink = NULL;
4675 
4676 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4677 	drm_connector_unregister(connector);
4678 	drm_connector_cleanup(connector);
4679 	if (aconnector->i2c) {
4680 		i2c_del_adapter(&aconnector->i2c->base);
4681 		kfree(aconnector->i2c);
4682 	}
4683 	kfree(aconnector->dm_dp_aux.aux.name);
4684 
4685 	kfree(connector);
4686 }
4687 
4688 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4689 {
4690 	struct dm_connector_state *state =
4691 		to_dm_connector_state(connector->state);
4692 
4693 	if (connector->state)
4694 		__drm_atomic_helper_connector_destroy_state(connector->state);
4695 
4696 	kfree(state);
4697 
4698 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4699 
4700 	if (state) {
4701 		state->scaling = RMX_OFF;
4702 		state->underscan_enable = false;
4703 		state->underscan_hborder = 0;
4704 		state->underscan_vborder = 0;
4705 		state->base.max_requested_bpc = 8;
4706 		state->vcpi_slots = 0;
4707 		state->pbn = 0;
4708 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4709 			state->abm_level = amdgpu_dm_abm_level;
4710 
4711 		__drm_atomic_helper_connector_reset(connector, &state->base);
4712 	}
4713 }
4714 
4715 struct drm_connector_state *
4716 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4717 {
4718 	struct dm_connector_state *state =
4719 		to_dm_connector_state(connector->state);
4720 
4721 	struct dm_connector_state *new_state =
4722 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4723 
4724 	if (!new_state)
4725 		return NULL;
4726 
4727 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4728 
4729 	new_state->freesync_capable = state->freesync_capable;
4730 	new_state->abm_level = state->abm_level;
4731 	new_state->scaling = state->scaling;
4732 	new_state->underscan_enable = state->underscan_enable;
4733 	new_state->underscan_hborder = state->underscan_hborder;
4734 	new_state->underscan_vborder = state->underscan_vborder;
4735 	new_state->vcpi_slots = state->vcpi_slots;
4736 	new_state->pbn = state->pbn;
4737 	return &new_state->base;
4738 }
4739 
4740 static int
4741 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4742 {
4743 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4744 		to_amdgpu_dm_connector(connector);
4745 	int r;
4746 
4747 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4748 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4749 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4750 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4751 		if (r)
4752 			return r;
4753 	}
4754 
4755 #if defined(CONFIG_DEBUG_FS)
4756 	connector_debugfs_init(amdgpu_dm_connector);
4757 #endif
4758 
4759 	return 0;
4760 }
4761 
4762 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4763 	.reset = amdgpu_dm_connector_funcs_reset,
4764 	.detect = amdgpu_dm_connector_detect,
4765 	.fill_modes = drm_helper_probe_single_connector_modes,
4766 	.destroy = amdgpu_dm_connector_destroy,
4767 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4768 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4769 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4770 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4771 	.late_register = amdgpu_dm_connector_late_register,
4772 	.early_unregister = amdgpu_dm_connector_unregister
4773 };
4774 
4775 static int get_modes(struct drm_connector *connector)
4776 {
4777 	return amdgpu_dm_connector_get_modes(connector);
4778 }
4779 
4780 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4781 {
4782 	struct dc_sink_init_data init_params = {
4783 			.link = aconnector->dc_link,
4784 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4785 	};
4786 	struct edid *edid;
4787 
4788 	if (!aconnector->base.edid_blob_ptr) {
4789 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4790 				aconnector->base.name);
4791 
4792 		aconnector->base.force = DRM_FORCE_OFF;
4793 		aconnector->base.override_edid = false;
4794 		return;
4795 	}
4796 
4797 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4798 
4799 	aconnector->edid = edid;
4800 
4801 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4802 		aconnector->dc_link,
4803 		(uint8_t *)edid,
4804 		(edid->extensions + 1) * EDID_LENGTH,
4805 		&init_params);
4806 
4807 	if (aconnector->base.force == DRM_FORCE_ON) {
4808 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4809 		aconnector->dc_link->local_sink :
4810 		aconnector->dc_em_sink;
4811 		dc_sink_retain(aconnector->dc_sink);
4812 	}
4813 }
4814 
4815 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4816 {
4817 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4818 
4819 	/*
4820 	 * In case of headless boot with force on for DP managed connector
4821 	 * Those settings have to be != 0 to get initial modeset
4822 	 */
4823 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4824 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4825 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4826 	}
4827 
4828 
4829 	aconnector->base.override_edid = true;
4830 	create_eml_sink(aconnector);
4831 }
4832 
4833 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4834 				   struct drm_display_mode *mode)
4835 {
4836 	int result = MODE_ERROR;
4837 	struct dc_sink *dc_sink;
4838 	struct amdgpu_device *adev = connector->dev->dev_private;
4839 	/* TODO: Unhardcode stream count */
4840 	struct dc_stream_state *stream;
4841 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4842 	enum dc_status dc_result = DC_OK;
4843 
4844 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4845 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4846 		return result;
4847 
4848 	/*
4849 	 * Only run this the first time mode_valid is called to initilialize
4850 	 * EDID mgmt
4851 	 */
4852 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4853 		!aconnector->dc_em_sink)
4854 		handle_edid_mgmt(aconnector);
4855 
4856 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4857 
4858 	if (dc_sink == NULL) {
4859 		DRM_ERROR("dc_sink is NULL!\n");
4860 		goto fail;
4861 	}
4862 
4863 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4864 	if (stream == NULL) {
4865 		DRM_ERROR("Failed to create stream for sink!\n");
4866 		goto fail;
4867 	}
4868 
4869 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4870 
4871 	if (dc_result == DC_OK)
4872 		result = MODE_OK;
4873 	else
4874 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4875 			      mode->hdisplay,
4876 			      mode->vdisplay,
4877 			      mode->clock,
4878 			      dc_result);
4879 
4880 	dc_stream_release(stream);
4881 
4882 fail:
4883 	/* TODO: error handling*/
4884 	return result;
4885 }
4886 
4887 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4888 				struct dc_info_packet *out)
4889 {
4890 	struct hdmi_drm_infoframe frame;
4891 	unsigned char buf[30]; /* 26 + 4 */
4892 	ssize_t len;
4893 	int ret, i;
4894 
4895 	memset(out, 0, sizeof(*out));
4896 
4897 	if (!state->hdr_output_metadata)
4898 		return 0;
4899 
4900 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4901 	if (ret)
4902 		return ret;
4903 
4904 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4905 	if (len < 0)
4906 		return (int)len;
4907 
4908 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4909 	if (len != 30)
4910 		return -EINVAL;
4911 
4912 	/* Prepare the infopacket for DC. */
4913 	switch (state->connector->connector_type) {
4914 	case DRM_MODE_CONNECTOR_HDMIA:
4915 		out->hb0 = 0x87; /* type */
4916 		out->hb1 = 0x01; /* version */
4917 		out->hb2 = 0x1A; /* length */
4918 		out->sb[0] = buf[3]; /* checksum */
4919 		i = 1;
4920 		break;
4921 
4922 	case DRM_MODE_CONNECTOR_DisplayPort:
4923 	case DRM_MODE_CONNECTOR_eDP:
4924 		out->hb0 = 0x00; /* sdp id, zero */
4925 		out->hb1 = 0x87; /* type */
4926 		out->hb2 = 0x1D; /* payload len - 1 */
4927 		out->hb3 = (0x13 << 2); /* sdp version */
4928 		out->sb[0] = 0x01; /* version */
4929 		out->sb[1] = 0x1A; /* length */
4930 		i = 2;
4931 		break;
4932 
4933 	default:
4934 		return -EINVAL;
4935 	}
4936 
4937 	memcpy(&out->sb[i], &buf[4], 26);
4938 	out->valid = true;
4939 
4940 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4941 		       sizeof(out->sb), false);
4942 
4943 	return 0;
4944 }
4945 
4946 static bool
4947 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4948 			  const struct drm_connector_state *new_state)
4949 {
4950 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4951 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4952 
4953 	if (old_blob != new_blob) {
4954 		if (old_blob && new_blob &&
4955 		    old_blob->length == new_blob->length)
4956 			return memcmp(old_blob->data, new_blob->data,
4957 				      old_blob->length);
4958 
4959 		return true;
4960 	}
4961 
4962 	return false;
4963 }
4964 
4965 static int
4966 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4967 				 struct drm_atomic_state *state)
4968 {
4969 	struct drm_connector_state *new_con_state =
4970 		drm_atomic_get_new_connector_state(state, conn);
4971 	struct drm_connector_state *old_con_state =
4972 		drm_atomic_get_old_connector_state(state, conn);
4973 	struct drm_crtc *crtc = new_con_state->crtc;
4974 	struct drm_crtc_state *new_crtc_state;
4975 	int ret;
4976 
4977 	if (!crtc)
4978 		return 0;
4979 
4980 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4981 		struct dc_info_packet hdr_infopacket;
4982 
4983 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4984 		if (ret)
4985 			return ret;
4986 
4987 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4988 		if (IS_ERR(new_crtc_state))
4989 			return PTR_ERR(new_crtc_state);
4990 
4991 		/*
4992 		 * DC considers the stream backends changed if the
4993 		 * static metadata changes. Forcing the modeset also
4994 		 * gives a simple way for userspace to switch from
4995 		 * 8bpc to 10bpc when setting the metadata to enter
4996 		 * or exit HDR.
4997 		 *
4998 		 * Changing the static metadata after it's been
4999 		 * set is permissible, however. So only force a
5000 		 * modeset if we're entering or exiting HDR.
5001 		 */
5002 		new_crtc_state->mode_changed =
5003 			!old_con_state->hdr_output_metadata ||
5004 			!new_con_state->hdr_output_metadata;
5005 	}
5006 
5007 	return 0;
5008 }
5009 
5010 static const struct drm_connector_helper_funcs
5011 amdgpu_dm_connector_helper_funcs = {
5012 	/*
5013 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5014 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5015 	 * are missing after user start lightdm. So we need to renew modes list.
5016 	 * in get_modes call back, not just return the modes count
5017 	 */
5018 	.get_modes = get_modes,
5019 	.mode_valid = amdgpu_dm_connector_mode_valid,
5020 	.atomic_check = amdgpu_dm_connector_atomic_check,
5021 };
5022 
5023 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5024 {
5025 }
5026 
5027 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5028 {
5029 	struct drm_device *dev = new_crtc_state->crtc->dev;
5030 	struct drm_plane *plane;
5031 
5032 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5033 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5034 			return true;
5035 	}
5036 
5037 	return false;
5038 }
5039 
5040 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5041 {
5042 	struct drm_atomic_state *state = new_crtc_state->state;
5043 	struct drm_plane *plane;
5044 	int num_active = 0;
5045 
5046 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5047 		struct drm_plane_state *new_plane_state;
5048 
5049 		/* Cursor planes are "fake". */
5050 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5051 			continue;
5052 
5053 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5054 
5055 		if (!new_plane_state) {
5056 			/*
5057 			 * The plane is enable on the CRTC and hasn't changed
5058 			 * state. This means that it previously passed
5059 			 * validation and is therefore enabled.
5060 			 */
5061 			num_active += 1;
5062 			continue;
5063 		}
5064 
5065 		/* We need a framebuffer to be considered enabled. */
5066 		num_active += (new_plane_state->fb != NULL);
5067 	}
5068 
5069 	return num_active;
5070 }
5071 
5072 /*
5073  * Sets whether interrupts should be enabled on a specific CRTC.
5074  * We require that the stream be enabled and that there exist active
5075  * DC planes on the stream.
5076  */
5077 static void
5078 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5079 			       struct drm_crtc_state *new_crtc_state)
5080 {
5081 	struct dm_crtc_state *dm_new_crtc_state =
5082 		to_dm_crtc_state(new_crtc_state);
5083 
5084 	dm_new_crtc_state->active_planes = 0;
5085 	dm_new_crtc_state->interrupts_enabled = false;
5086 
5087 	if (!dm_new_crtc_state->stream)
5088 		return;
5089 
5090 	dm_new_crtc_state->active_planes =
5091 		count_crtc_active_planes(new_crtc_state);
5092 
5093 	dm_new_crtc_state->interrupts_enabled =
5094 		dm_new_crtc_state->active_planes > 0;
5095 }
5096 
5097 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5098 				       struct drm_crtc_state *state)
5099 {
5100 	struct amdgpu_device *adev = crtc->dev->dev_private;
5101 	struct dc *dc = adev->dm.dc;
5102 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5103 	int ret = -EINVAL;
5104 
5105 	/*
5106 	 * Update interrupt state for the CRTC. This needs to happen whenever
5107 	 * the CRTC has changed or whenever any of its planes have changed.
5108 	 * Atomic check satisfies both of these requirements since the CRTC
5109 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5110 	 */
5111 	dm_update_crtc_interrupt_state(crtc, state);
5112 
5113 	if (unlikely(!dm_crtc_state->stream &&
5114 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5115 		WARN_ON(1);
5116 		return ret;
5117 	}
5118 
5119 	/* In some use cases, like reset, no stream is attached */
5120 	if (!dm_crtc_state->stream)
5121 		return 0;
5122 
5123 	/*
5124 	 * We want at least one hardware plane enabled to use
5125 	 * the stream with a cursor enabled.
5126 	 */
5127 	if (state->enable && state->active &&
5128 	    does_crtc_have_active_cursor(state) &&
5129 	    dm_crtc_state->active_planes == 0)
5130 		return -EINVAL;
5131 
5132 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5133 		return 0;
5134 
5135 	return ret;
5136 }
5137 
5138 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5139 				      const struct drm_display_mode *mode,
5140 				      struct drm_display_mode *adjusted_mode)
5141 {
5142 	return true;
5143 }
5144 
5145 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5146 	.disable = dm_crtc_helper_disable,
5147 	.atomic_check = dm_crtc_helper_atomic_check,
5148 	.mode_fixup = dm_crtc_helper_mode_fixup,
5149 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5150 };
5151 
5152 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5153 {
5154 
5155 }
5156 
5157 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5158 {
5159 	switch (display_color_depth) {
5160 		case COLOR_DEPTH_666:
5161 			return 6;
5162 		case COLOR_DEPTH_888:
5163 			return 8;
5164 		case COLOR_DEPTH_101010:
5165 			return 10;
5166 		case COLOR_DEPTH_121212:
5167 			return 12;
5168 		case COLOR_DEPTH_141414:
5169 			return 14;
5170 		case COLOR_DEPTH_161616:
5171 			return 16;
5172 		default:
5173 			break;
5174 		}
5175 	return 0;
5176 }
5177 
5178 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5179 					  struct drm_crtc_state *crtc_state,
5180 					  struct drm_connector_state *conn_state)
5181 {
5182 	struct drm_atomic_state *state = crtc_state->state;
5183 	struct drm_connector *connector = conn_state->connector;
5184 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5185 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5186 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5187 	struct drm_dp_mst_topology_mgr *mst_mgr;
5188 	struct drm_dp_mst_port *mst_port;
5189 	enum dc_color_depth color_depth;
5190 	int clock, bpp = 0;
5191 	bool is_y420 = false;
5192 
5193 	if (!aconnector->port || !aconnector->dc_sink)
5194 		return 0;
5195 
5196 	mst_port = aconnector->port;
5197 	mst_mgr = &aconnector->mst_port->mst_mgr;
5198 
5199 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5200 		return 0;
5201 
5202 	if (!state->duplicated) {
5203 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5204 				aconnector->force_yuv420_output;
5205 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5206 								    is_y420);
5207 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5208 		clock = adjusted_mode->clock;
5209 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5210 	}
5211 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5212 									   mst_mgr,
5213 									   mst_port,
5214 									   dm_new_connector_state->pbn,
5215 									   0);
5216 	if (dm_new_connector_state->vcpi_slots < 0) {
5217 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5218 		return dm_new_connector_state->vcpi_slots;
5219 	}
5220 	return 0;
5221 }
5222 
5223 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5224 	.disable = dm_encoder_helper_disable,
5225 	.atomic_check = dm_encoder_helper_atomic_check
5226 };
5227 
5228 #if defined(CONFIG_DRM_AMD_DC_DCN)
5229 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5230 					    struct dc_state *dc_state)
5231 {
5232 	struct dc_stream_state *stream = NULL;
5233 	struct drm_connector *connector;
5234 	struct drm_connector_state *new_con_state, *old_con_state;
5235 	struct amdgpu_dm_connector *aconnector;
5236 	struct dm_connector_state *dm_conn_state;
5237 	int i, j, clock, bpp;
5238 	int vcpi, pbn_div, pbn = 0;
5239 
5240 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5241 
5242 		aconnector = to_amdgpu_dm_connector(connector);
5243 
5244 		if (!aconnector->port)
5245 			continue;
5246 
5247 		if (!new_con_state || !new_con_state->crtc)
5248 			continue;
5249 
5250 		dm_conn_state = to_dm_connector_state(new_con_state);
5251 
5252 		for (j = 0; j < dc_state->stream_count; j++) {
5253 			stream = dc_state->streams[j];
5254 			if (!stream)
5255 				continue;
5256 
5257 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5258 				break;
5259 
5260 			stream = NULL;
5261 		}
5262 
5263 		if (!stream)
5264 			continue;
5265 
5266 		if (stream->timing.flags.DSC != 1) {
5267 			drm_dp_mst_atomic_enable_dsc(state,
5268 						     aconnector->port,
5269 						     dm_conn_state->pbn,
5270 						     0,
5271 						     false);
5272 			continue;
5273 		}
5274 
5275 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5276 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5277 		clock = stream->timing.pix_clk_100hz / 10;
5278 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5279 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5280 						    aconnector->port,
5281 						    pbn, pbn_div,
5282 						    true);
5283 		if (vcpi < 0)
5284 			return vcpi;
5285 
5286 		dm_conn_state->pbn = pbn;
5287 		dm_conn_state->vcpi_slots = vcpi;
5288 	}
5289 	return 0;
5290 }
5291 #endif
5292 
5293 static void dm_drm_plane_reset(struct drm_plane *plane)
5294 {
5295 	struct dm_plane_state *amdgpu_state = NULL;
5296 
5297 	if (plane->state)
5298 		plane->funcs->atomic_destroy_state(plane, plane->state);
5299 
5300 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5301 	WARN_ON(amdgpu_state == NULL);
5302 
5303 	if (amdgpu_state)
5304 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5305 }
5306 
5307 static struct drm_plane_state *
5308 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5309 {
5310 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5311 
5312 	old_dm_plane_state = to_dm_plane_state(plane->state);
5313 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5314 	if (!dm_plane_state)
5315 		return NULL;
5316 
5317 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5318 
5319 	if (old_dm_plane_state->dc_state) {
5320 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5321 		dc_plane_state_retain(dm_plane_state->dc_state);
5322 	}
5323 
5324 	return &dm_plane_state->base;
5325 }
5326 
5327 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5328 				struct drm_plane_state *state)
5329 {
5330 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5331 
5332 	if (dm_plane_state->dc_state)
5333 		dc_plane_state_release(dm_plane_state->dc_state);
5334 
5335 	drm_atomic_helper_plane_destroy_state(plane, state);
5336 }
5337 
5338 static const struct drm_plane_funcs dm_plane_funcs = {
5339 	.update_plane	= drm_atomic_helper_update_plane,
5340 	.disable_plane	= drm_atomic_helper_disable_plane,
5341 	.destroy	= drm_primary_helper_destroy,
5342 	.reset = dm_drm_plane_reset,
5343 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5344 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5345 };
5346 
5347 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5348 				      struct drm_plane_state *new_state)
5349 {
5350 	struct amdgpu_framebuffer *afb;
5351 	struct drm_gem_object *obj;
5352 	struct amdgpu_device *adev;
5353 	struct amdgpu_bo *rbo;
5354 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5355 	struct list_head list;
5356 	struct ttm_validate_buffer tv;
5357 	struct ww_acquire_ctx ticket;
5358 	uint64_t tiling_flags;
5359 	uint32_t domain;
5360 	int r;
5361 	bool force_disable_dcc = false;
5362 
5363 	dm_plane_state_old = to_dm_plane_state(plane->state);
5364 	dm_plane_state_new = to_dm_plane_state(new_state);
5365 
5366 	if (!new_state->fb) {
5367 		DRM_DEBUG_DRIVER("No FB bound\n");
5368 		return 0;
5369 	}
5370 
5371 	afb = to_amdgpu_framebuffer(new_state->fb);
5372 	obj = new_state->fb->obj[0];
5373 	rbo = gem_to_amdgpu_bo(obj);
5374 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5375 	INIT_LIST_HEAD(&list);
5376 
5377 	tv.bo = &rbo->tbo;
5378 	tv.num_shared = 1;
5379 	list_add(&tv.head, &list);
5380 
5381 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5382 	if (r) {
5383 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5384 		return r;
5385 	}
5386 
5387 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5388 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5389 	else
5390 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5391 
5392 	r = amdgpu_bo_pin(rbo, domain);
5393 	if (unlikely(r != 0)) {
5394 		if (r != -ERESTARTSYS)
5395 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5396 		ttm_eu_backoff_reservation(&ticket, &list);
5397 		return r;
5398 	}
5399 
5400 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5401 	if (unlikely(r != 0)) {
5402 		amdgpu_bo_unpin(rbo);
5403 		ttm_eu_backoff_reservation(&ticket, &list);
5404 		DRM_ERROR("%p bind failed\n", rbo);
5405 		return r;
5406 	}
5407 
5408 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5409 
5410 	ttm_eu_backoff_reservation(&ticket, &list);
5411 
5412 	afb->address = amdgpu_bo_gpu_offset(rbo);
5413 
5414 	amdgpu_bo_ref(rbo);
5415 
5416 	if (dm_plane_state_new->dc_state &&
5417 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5418 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5419 
5420 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5421 		fill_plane_buffer_attributes(
5422 			adev, afb, plane_state->format, plane_state->rotation,
5423 			tiling_flags, &plane_state->tiling_info,
5424 			&plane_state->plane_size, &plane_state->dcc,
5425 			&plane_state->address,
5426 			force_disable_dcc);
5427 	}
5428 
5429 	return 0;
5430 }
5431 
5432 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5433 				       struct drm_plane_state *old_state)
5434 {
5435 	struct amdgpu_bo *rbo;
5436 	int r;
5437 
5438 	if (!old_state->fb)
5439 		return;
5440 
5441 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5442 	r = amdgpu_bo_reserve(rbo, false);
5443 	if (unlikely(r)) {
5444 		DRM_ERROR("failed to reserve rbo before unpin\n");
5445 		return;
5446 	}
5447 
5448 	amdgpu_bo_unpin(rbo);
5449 	amdgpu_bo_unreserve(rbo);
5450 	amdgpu_bo_unref(&rbo);
5451 }
5452 
5453 static int dm_plane_atomic_check(struct drm_plane *plane,
5454 				 struct drm_plane_state *state)
5455 {
5456 	struct amdgpu_device *adev = plane->dev->dev_private;
5457 	struct dc *dc = adev->dm.dc;
5458 	struct dm_plane_state *dm_plane_state;
5459 	struct dc_scaling_info scaling_info;
5460 	int ret;
5461 
5462 	dm_plane_state = to_dm_plane_state(state);
5463 
5464 	if (!dm_plane_state->dc_state)
5465 		return 0;
5466 
5467 	ret = fill_dc_scaling_info(state, &scaling_info);
5468 	if (ret)
5469 		return ret;
5470 
5471 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5472 		return 0;
5473 
5474 	return -EINVAL;
5475 }
5476 
5477 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5478 				       struct drm_plane_state *new_plane_state)
5479 {
5480 	/* Only support async updates on cursor planes. */
5481 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5482 		return -EINVAL;
5483 
5484 	return 0;
5485 }
5486 
5487 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5488 					 struct drm_plane_state *new_state)
5489 {
5490 	struct drm_plane_state *old_state =
5491 		drm_atomic_get_old_plane_state(new_state->state, plane);
5492 
5493 	swap(plane->state->fb, new_state->fb);
5494 
5495 	plane->state->src_x = new_state->src_x;
5496 	plane->state->src_y = new_state->src_y;
5497 	plane->state->src_w = new_state->src_w;
5498 	plane->state->src_h = new_state->src_h;
5499 	plane->state->crtc_x = new_state->crtc_x;
5500 	plane->state->crtc_y = new_state->crtc_y;
5501 	plane->state->crtc_w = new_state->crtc_w;
5502 	plane->state->crtc_h = new_state->crtc_h;
5503 
5504 	handle_cursor_update(plane, old_state);
5505 }
5506 
5507 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5508 	.prepare_fb = dm_plane_helper_prepare_fb,
5509 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5510 	.atomic_check = dm_plane_atomic_check,
5511 	.atomic_async_check = dm_plane_atomic_async_check,
5512 	.atomic_async_update = dm_plane_atomic_async_update
5513 };
5514 
5515 /*
5516  * TODO: these are currently initialized to rgb formats only.
5517  * For future use cases we should either initialize them dynamically based on
5518  * plane capabilities, or initialize this array to all formats, so internal drm
5519  * check will succeed, and let DC implement proper check
5520  */
5521 static const uint32_t rgb_formats[] = {
5522 	DRM_FORMAT_XRGB8888,
5523 	DRM_FORMAT_ARGB8888,
5524 	DRM_FORMAT_RGBA8888,
5525 	DRM_FORMAT_XRGB2101010,
5526 	DRM_FORMAT_XBGR2101010,
5527 	DRM_FORMAT_ARGB2101010,
5528 	DRM_FORMAT_ABGR2101010,
5529 	DRM_FORMAT_XBGR8888,
5530 	DRM_FORMAT_ABGR8888,
5531 	DRM_FORMAT_RGB565,
5532 };
5533 
5534 static const uint32_t overlay_formats[] = {
5535 	DRM_FORMAT_XRGB8888,
5536 	DRM_FORMAT_ARGB8888,
5537 	DRM_FORMAT_RGBA8888,
5538 	DRM_FORMAT_XBGR8888,
5539 	DRM_FORMAT_ABGR8888,
5540 	DRM_FORMAT_RGB565
5541 };
5542 
5543 static const u32 cursor_formats[] = {
5544 	DRM_FORMAT_ARGB8888
5545 };
5546 
5547 static int get_plane_formats(const struct drm_plane *plane,
5548 			     const struct dc_plane_cap *plane_cap,
5549 			     uint32_t *formats, int max_formats)
5550 {
5551 	int i, num_formats = 0;
5552 
5553 	/*
5554 	 * TODO: Query support for each group of formats directly from
5555 	 * DC plane caps. This will require adding more formats to the
5556 	 * caps list.
5557 	 */
5558 
5559 	switch (plane->type) {
5560 	case DRM_PLANE_TYPE_PRIMARY:
5561 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5562 			if (num_formats >= max_formats)
5563 				break;
5564 
5565 			formats[num_formats++] = rgb_formats[i];
5566 		}
5567 
5568 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5569 			formats[num_formats++] = DRM_FORMAT_NV12;
5570 		if (plane_cap && plane_cap->pixel_format_support.p010)
5571 			formats[num_formats++] = DRM_FORMAT_P010;
5572 		break;
5573 
5574 	case DRM_PLANE_TYPE_OVERLAY:
5575 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5576 			if (num_formats >= max_formats)
5577 				break;
5578 
5579 			formats[num_formats++] = overlay_formats[i];
5580 		}
5581 		break;
5582 
5583 	case DRM_PLANE_TYPE_CURSOR:
5584 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5585 			if (num_formats >= max_formats)
5586 				break;
5587 
5588 			formats[num_formats++] = cursor_formats[i];
5589 		}
5590 		break;
5591 	}
5592 
5593 	return num_formats;
5594 }
5595 
5596 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5597 				struct drm_plane *plane,
5598 				unsigned long possible_crtcs,
5599 				const struct dc_plane_cap *plane_cap)
5600 {
5601 	uint32_t formats[32];
5602 	int num_formats;
5603 	int res = -EPERM;
5604 
5605 	num_formats = get_plane_formats(plane, plane_cap, formats,
5606 					ARRAY_SIZE(formats));
5607 
5608 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5609 				       &dm_plane_funcs, formats, num_formats,
5610 				       NULL, plane->type, NULL);
5611 	if (res)
5612 		return res;
5613 
5614 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5615 	    plane_cap && plane_cap->per_pixel_alpha) {
5616 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5617 					  BIT(DRM_MODE_BLEND_PREMULTI);
5618 
5619 		drm_plane_create_alpha_property(plane);
5620 		drm_plane_create_blend_mode_property(plane, blend_caps);
5621 	}
5622 
5623 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5624 	    plane_cap &&
5625 	    (plane_cap->pixel_format_support.nv12 ||
5626 	     plane_cap->pixel_format_support.p010)) {
5627 		/* This only affects YUV formats. */
5628 		drm_plane_create_color_properties(
5629 			plane,
5630 			BIT(DRM_COLOR_YCBCR_BT601) |
5631 			BIT(DRM_COLOR_YCBCR_BT709) |
5632 			BIT(DRM_COLOR_YCBCR_BT2020),
5633 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5634 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5635 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5636 	}
5637 
5638 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5639 
5640 	/* Create (reset) the plane state */
5641 	if (plane->funcs->reset)
5642 		plane->funcs->reset(plane);
5643 
5644 	return 0;
5645 }
5646 
5647 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5648 			       struct drm_plane *plane,
5649 			       uint32_t crtc_index)
5650 {
5651 	struct amdgpu_crtc *acrtc = NULL;
5652 	struct drm_plane *cursor_plane;
5653 
5654 	int res = -ENOMEM;
5655 
5656 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5657 	if (!cursor_plane)
5658 		goto fail;
5659 
5660 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5661 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5662 
5663 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5664 	if (!acrtc)
5665 		goto fail;
5666 
5667 	res = drm_crtc_init_with_planes(
5668 			dm->ddev,
5669 			&acrtc->base,
5670 			plane,
5671 			cursor_plane,
5672 			&amdgpu_dm_crtc_funcs, NULL);
5673 
5674 	if (res)
5675 		goto fail;
5676 
5677 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5678 
5679 	/* Create (reset) the plane state */
5680 	if (acrtc->base.funcs->reset)
5681 		acrtc->base.funcs->reset(&acrtc->base);
5682 
5683 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5684 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5685 
5686 	acrtc->crtc_id = crtc_index;
5687 	acrtc->base.enabled = false;
5688 	acrtc->otg_inst = -1;
5689 
5690 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5691 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5692 				   true, MAX_COLOR_LUT_ENTRIES);
5693 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5694 
5695 	return 0;
5696 
5697 fail:
5698 	kfree(acrtc);
5699 	kfree(cursor_plane);
5700 	return res;
5701 }
5702 
5703 
5704 static int to_drm_connector_type(enum signal_type st)
5705 {
5706 	switch (st) {
5707 	case SIGNAL_TYPE_HDMI_TYPE_A:
5708 		return DRM_MODE_CONNECTOR_HDMIA;
5709 	case SIGNAL_TYPE_EDP:
5710 		return DRM_MODE_CONNECTOR_eDP;
5711 	case SIGNAL_TYPE_LVDS:
5712 		return DRM_MODE_CONNECTOR_LVDS;
5713 	case SIGNAL_TYPE_RGB:
5714 		return DRM_MODE_CONNECTOR_VGA;
5715 	case SIGNAL_TYPE_DISPLAY_PORT:
5716 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5717 		return DRM_MODE_CONNECTOR_DisplayPort;
5718 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5719 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5720 		return DRM_MODE_CONNECTOR_DVID;
5721 	case SIGNAL_TYPE_VIRTUAL:
5722 		return DRM_MODE_CONNECTOR_VIRTUAL;
5723 
5724 	default:
5725 		return DRM_MODE_CONNECTOR_Unknown;
5726 	}
5727 }
5728 
5729 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5730 {
5731 	struct drm_encoder *encoder;
5732 
5733 	/* There is only one encoder per connector */
5734 	drm_connector_for_each_possible_encoder(connector, encoder)
5735 		return encoder;
5736 
5737 	return NULL;
5738 }
5739 
5740 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5741 {
5742 	struct drm_encoder *encoder;
5743 	struct amdgpu_encoder *amdgpu_encoder;
5744 
5745 	encoder = amdgpu_dm_connector_to_encoder(connector);
5746 
5747 	if (encoder == NULL)
5748 		return;
5749 
5750 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5751 
5752 	amdgpu_encoder->native_mode.clock = 0;
5753 
5754 	if (!list_empty(&connector->probed_modes)) {
5755 		struct drm_display_mode *preferred_mode = NULL;
5756 
5757 		list_for_each_entry(preferred_mode,
5758 				    &connector->probed_modes,
5759 				    head) {
5760 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5761 				amdgpu_encoder->native_mode = *preferred_mode;
5762 
5763 			break;
5764 		}
5765 
5766 	}
5767 }
5768 
5769 static struct drm_display_mode *
5770 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5771 			     char *name,
5772 			     int hdisplay, int vdisplay)
5773 {
5774 	struct drm_device *dev = encoder->dev;
5775 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5776 	struct drm_display_mode *mode = NULL;
5777 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5778 
5779 	mode = drm_mode_duplicate(dev, native_mode);
5780 
5781 	if (mode == NULL)
5782 		return NULL;
5783 
5784 	mode->hdisplay = hdisplay;
5785 	mode->vdisplay = vdisplay;
5786 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5787 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5788 
5789 	return mode;
5790 
5791 }
5792 
5793 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5794 						 struct drm_connector *connector)
5795 {
5796 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5797 	struct drm_display_mode *mode = NULL;
5798 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5799 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5800 				to_amdgpu_dm_connector(connector);
5801 	int i;
5802 	int n;
5803 	struct mode_size {
5804 		char name[DRM_DISPLAY_MODE_LEN];
5805 		int w;
5806 		int h;
5807 	} common_modes[] = {
5808 		{  "640x480",  640,  480},
5809 		{  "800x600",  800,  600},
5810 		{ "1024x768", 1024,  768},
5811 		{ "1280x720", 1280,  720},
5812 		{ "1280x800", 1280,  800},
5813 		{"1280x1024", 1280, 1024},
5814 		{ "1440x900", 1440,  900},
5815 		{"1680x1050", 1680, 1050},
5816 		{"1600x1200", 1600, 1200},
5817 		{"1920x1080", 1920, 1080},
5818 		{"1920x1200", 1920, 1200}
5819 	};
5820 
5821 	n = ARRAY_SIZE(common_modes);
5822 
5823 	for (i = 0; i < n; i++) {
5824 		struct drm_display_mode *curmode = NULL;
5825 		bool mode_existed = false;
5826 
5827 		if (common_modes[i].w > native_mode->hdisplay ||
5828 		    common_modes[i].h > native_mode->vdisplay ||
5829 		   (common_modes[i].w == native_mode->hdisplay &&
5830 		    common_modes[i].h == native_mode->vdisplay))
5831 			continue;
5832 
5833 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5834 			if (common_modes[i].w == curmode->hdisplay &&
5835 			    common_modes[i].h == curmode->vdisplay) {
5836 				mode_existed = true;
5837 				break;
5838 			}
5839 		}
5840 
5841 		if (mode_existed)
5842 			continue;
5843 
5844 		mode = amdgpu_dm_create_common_mode(encoder,
5845 				common_modes[i].name, common_modes[i].w,
5846 				common_modes[i].h);
5847 		drm_mode_probed_add(connector, mode);
5848 		amdgpu_dm_connector->num_modes++;
5849 	}
5850 }
5851 
5852 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5853 					      struct edid *edid)
5854 {
5855 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5856 			to_amdgpu_dm_connector(connector);
5857 
5858 	if (edid) {
5859 		/* empty probed_modes */
5860 		INIT_LIST_HEAD(&connector->probed_modes);
5861 		amdgpu_dm_connector->num_modes =
5862 				drm_add_edid_modes(connector, edid);
5863 
5864 		/* sorting the probed modes before calling function
5865 		 * amdgpu_dm_get_native_mode() since EDID can have
5866 		 * more than one preferred mode. The modes that are
5867 		 * later in the probed mode list could be of higher
5868 		 * and preferred resolution. For example, 3840x2160
5869 		 * resolution in base EDID preferred timing and 4096x2160
5870 		 * preferred resolution in DID extension block later.
5871 		 */
5872 		drm_mode_sort(&connector->probed_modes);
5873 		amdgpu_dm_get_native_mode(connector);
5874 	} else {
5875 		amdgpu_dm_connector->num_modes = 0;
5876 	}
5877 }
5878 
5879 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5880 {
5881 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5882 			to_amdgpu_dm_connector(connector);
5883 	struct drm_encoder *encoder;
5884 	struct edid *edid = amdgpu_dm_connector->edid;
5885 
5886 	encoder = amdgpu_dm_connector_to_encoder(connector);
5887 
5888 	if (!edid || !drm_edid_is_valid(edid)) {
5889 		amdgpu_dm_connector->num_modes =
5890 				drm_add_modes_noedid(connector, 640, 480);
5891 	} else {
5892 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5893 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5894 	}
5895 	amdgpu_dm_fbc_init(connector);
5896 
5897 	return amdgpu_dm_connector->num_modes;
5898 }
5899 
5900 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5901 				     struct amdgpu_dm_connector *aconnector,
5902 				     int connector_type,
5903 				     struct dc_link *link,
5904 				     int link_index)
5905 {
5906 	struct amdgpu_device *adev = dm->ddev->dev_private;
5907 
5908 	/*
5909 	 * Some of the properties below require access to state, like bpc.
5910 	 * Allocate some default initial connector state with our reset helper.
5911 	 */
5912 	if (aconnector->base.funcs->reset)
5913 		aconnector->base.funcs->reset(&aconnector->base);
5914 
5915 	aconnector->connector_id = link_index;
5916 	aconnector->dc_link = link;
5917 	aconnector->base.interlace_allowed = false;
5918 	aconnector->base.doublescan_allowed = false;
5919 	aconnector->base.stereo_allowed = false;
5920 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5921 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5922 	aconnector->audio_inst = -1;
5923 	mutex_init(&aconnector->hpd_lock);
5924 
5925 	/*
5926 	 * configure support HPD hot plug connector_>polled default value is 0
5927 	 * which means HPD hot plug not supported
5928 	 */
5929 	switch (connector_type) {
5930 	case DRM_MODE_CONNECTOR_HDMIA:
5931 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5932 		aconnector->base.ycbcr_420_allowed =
5933 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5934 		break;
5935 	case DRM_MODE_CONNECTOR_DisplayPort:
5936 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5937 		aconnector->base.ycbcr_420_allowed =
5938 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5939 		break;
5940 	case DRM_MODE_CONNECTOR_DVID:
5941 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5942 		break;
5943 	default:
5944 		break;
5945 	}
5946 
5947 	drm_object_attach_property(&aconnector->base.base,
5948 				dm->ddev->mode_config.scaling_mode_property,
5949 				DRM_MODE_SCALE_NONE);
5950 
5951 	drm_object_attach_property(&aconnector->base.base,
5952 				adev->mode_info.underscan_property,
5953 				UNDERSCAN_OFF);
5954 	drm_object_attach_property(&aconnector->base.base,
5955 				adev->mode_info.underscan_hborder_property,
5956 				0);
5957 	drm_object_attach_property(&aconnector->base.base,
5958 				adev->mode_info.underscan_vborder_property,
5959 				0);
5960 
5961 	if (!aconnector->mst_port)
5962 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5963 
5964 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5965 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5966 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5967 
5968 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5969 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5970 		drm_object_attach_property(&aconnector->base.base,
5971 				adev->mode_info.abm_level_property, 0);
5972 	}
5973 
5974 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5975 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5976 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5977 		drm_object_attach_property(
5978 			&aconnector->base.base,
5979 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5980 
5981 		if (!aconnector->mst_port)
5982 			drm_connector_attach_vrr_capable_property(&aconnector->base);
5983 
5984 #ifdef CONFIG_DRM_AMD_DC_HDCP
5985 		if (adev->dm.hdcp_workqueue)
5986 			drm_connector_attach_content_protection_property(&aconnector->base, true);
5987 #endif
5988 	}
5989 }
5990 
5991 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5992 			      struct i2c_msg *msgs, int num)
5993 {
5994 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5995 	struct ddc_service *ddc_service = i2c->ddc_service;
5996 	struct i2c_command cmd;
5997 	int i;
5998 	int result = -EIO;
5999 
6000 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6001 
6002 	if (!cmd.payloads)
6003 		return result;
6004 
6005 	cmd.number_of_payloads = num;
6006 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6007 	cmd.speed = 100;
6008 
6009 	for (i = 0; i < num; i++) {
6010 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6011 		cmd.payloads[i].address = msgs[i].addr;
6012 		cmd.payloads[i].length = msgs[i].len;
6013 		cmd.payloads[i].data = msgs[i].buf;
6014 	}
6015 
6016 	if (dc_submit_i2c(
6017 			ddc_service->ctx->dc,
6018 			ddc_service->ddc_pin->hw_info.ddc_channel,
6019 			&cmd))
6020 		result = num;
6021 
6022 	kfree(cmd.payloads);
6023 	return result;
6024 }
6025 
6026 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6027 {
6028 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6029 }
6030 
6031 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6032 	.master_xfer = amdgpu_dm_i2c_xfer,
6033 	.functionality = amdgpu_dm_i2c_func,
6034 };
6035 
6036 static struct amdgpu_i2c_adapter *
6037 create_i2c(struct ddc_service *ddc_service,
6038 	   int link_index,
6039 	   int *res)
6040 {
6041 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6042 	struct amdgpu_i2c_adapter *i2c;
6043 
6044 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6045 	if (!i2c)
6046 		return NULL;
6047 	i2c->base.owner = THIS_MODULE;
6048 	i2c->base.class = I2C_CLASS_DDC;
6049 	i2c->base.dev.parent = &adev->pdev->dev;
6050 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6051 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6052 	i2c_set_adapdata(&i2c->base, i2c);
6053 	i2c->ddc_service = ddc_service;
6054 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6055 
6056 	return i2c;
6057 }
6058 
6059 
6060 /*
6061  * Note: this function assumes that dc_link_detect() was called for the
6062  * dc_link which will be represented by this aconnector.
6063  */
6064 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6065 				    struct amdgpu_dm_connector *aconnector,
6066 				    uint32_t link_index,
6067 				    struct amdgpu_encoder *aencoder)
6068 {
6069 	int res = 0;
6070 	int connector_type;
6071 	struct dc *dc = dm->dc;
6072 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6073 	struct amdgpu_i2c_adapter *i2c;
6074 
6075 	link->priv = aconnector;
6076 
6077 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6078 
6079 	i2c = create_i2c(link->ddc, link->link_index, &res);
6080 	if (!i2c) {
6081 		DRM_ERROR("Failed to create i2c adapter data\n");
6082 		return -ENOMEM;
6083 	}
6084 
6085 	aconnector->i2c = i2c;
6086 	res = i2c_add_adapter(&i2c->base);
6087 
6088 	if (res) {
6089 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6090 		goto out_free;
6091 	}
6092 
6093 	connector_type = to_drm_connector_type(link->connector_signal);
6094 
6095 	res = drm_connector_init_with_ddc(
6096 			dm->ddev,
6097 			&aconnector->base,
6098 			&amdgpu_dm_connector_funcs,
6099 			connector_type,
6100 			&i2c->base);
6101 
6102 	if (res) {
6103 		DRM_ERROR("connector_init failed\n");
6104 		aconnector->connector_id = -1;
6105 		goto out_free;
6106 	}
6107 
6108 	drm_connector_helper_add(
6109 			&aconnector->base,
6110 			&amdgpu_dm_connector_helper_funcs);
6111 
6112 	amdgpu_dm_connector_init_helper(
6113 		dm,
6114 		aconnector,
6115 		connector_type,
6116 		link,
6117 		link_index);
6118 
6119 	drm_connector_attach_encoder(
6120 		&aconnector->base, &aencoder->base);
6121 
6122 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6123 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6124 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6125 
6126 out_free:
6127 	if (res) {
6128 		kfree(i2c);
6129 		aconnector->i2c = NULL;
6130 	}
6131 	return res;
6132 }
6133 
6134 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6135 {
6136 	switch (adev->mode_info.num_crtc) {
6137 	case 1:
6138 		return 0x1;
6139 	case 2:
6140 		return 0x3;
6141 	case 3:
6142 		return 0x7;
6143 	case 4:
6144 		return 0xf;
6145 	case 5:
6146 		return 0x1f;
6147 	case 6:
6148 	default:
6149 		return 0x3f;
6150 	}
6151 }
6152 
6153 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6154 				  struct amdgpu_encoder *aencoder,
6155 				  uint32_t link_index)
6156 {
6157 	struct amdgpu_device *adev = dev->dev_private;
6158 
6159 	int res = drm_encoder_init(dev,
6160 				   &aencoder->base,
6161 				   &amdgpu_dm_encoder_funcs,
6162 				   DRM_MODE_ENCODER_TMDS,
6163 				   NULL);
6164 
6165 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6166 
6167 	if (!res)
6168 		aencoder->encoder_id = link_index;
6169 	else
6170 		aencoder->encoder_id = -1;
6171 
6172 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6173 
6174 	return res;
6175 }
6176 
6177 static void manage_dm_interrupts(struct amdgpu_device *adev,
6178 				 struct amdgpu_crtc *acrtc,
6179 				 bool enable)
6180 {
6181 	/*
6182 	 * this is not correct translation but will work as soon as VBLANK
6183 	 * constant is the same as PFLIP
6184 	 */
6185 	int irq_type =
6186 		amdgpu_display_crtc_idx_to_irq_type(
6187 			adev,
6188 			acrtc->crtc_id);
6189 
6190 	if (enable) {
6191 		drm_crtc_vblank_on(&acrtc->base);
6192 		amdgpu_irq_get(
6193 			adev,
6194 			&adev->pageflip_irq,
6195 			irq_type);
6196 	} else {
6197 
6198 		amdgpu_irq_put(
6199 			adev,
6200 			&adev->pageflip_irq,
6201 			irq_type);
6202 		drm_crtc_vblank_off(&acrtc->base);
6203 	}
6204 }
6205 
6206 static bool
6207 is_scaling_state_different(const struct dm_connector_state *dm_state,
6208 			   const struct dm_connector_state *old_dm_state)
6209 {
6210 	if (dm_state->scaling != old_dm_state->scaling)
6211 		return true;
6212 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6213 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6214 			return true;
6215 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6216 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6217 			return true;
6218 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6219 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6220 		return true;
6221 	return false;
6222 }
6223 
6224 #ifdef CONFIG_DRM_AMD_DC_HDCP
6225 static bool is_content_protection_different(struct drm_connector_state *state,
6226 					    const struct drm_connector_state *old_state,
6227 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6228 {
6229 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6230 
6231 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6232 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6233 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6234 		return true;
6235 	}
6236 
6237 	/* CP is being re enabled, ignore this */
6238 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6239 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6240 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6241 		return false;
6242 	}
6243 
6244 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6245 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6246 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6247 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6248 
6249 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6250 	 * hot-plug, headless s3, dpms
6251 	 */
6252 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6253 	    aconnector->dc_sink != NULL)
6254 		return true;
6255 
6256 	if (old_state->content_protection == state->content_protection)
6257 		return false;
6258 
6259 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6260 		return true;
6261 
6262 	return false;
6263 }
6264 
6265 #endif
6266 static void remove_stream(struct amdgpu_device *adev,
6267 			  struct amdgpu_crtc *acrtc,
6268 			  struct dc_stream_state *stream)
6269 {
6270 	/* this is the update mode case */
6271 
6272 	acrtc->otg_inst = -1;
6273 	acrtc->enabled = false;
6274 }
6275 
6276 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6277 			       struct dc_cursor_position *position)
6278 {
6279 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6280 	int x, y;
6281 	int xorigin = 0, yorigin = 0;
6282 
6283 	position->enable = false;
6284 	position->x = 0;
6285 	position->y = 0;
6286 
6287 	if (!crtc || !plane->state->fb)
6288 		return 0;
6289 
6290 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6291 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6292 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6293 			  __func__,
6294 			  plane->state->crtc_w,
6295 			  plane->state->crtc_h);
6296 		return -EINVAL;
6297 	}
6298 
6299 	x = plane->state->crtc_x;
6300 	y = plane->state->crtc_y;
6301 
6302 	if (x <= -amdgpu_crtc->max_cursor_width ||
6303 	    y <= -amdgpu_crtc->max_cursor_height)
6304 		return 0;
6305 
6306 	if (x < 0) {
6307 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6308 		x = 0;
6309 	}
6310 	if (y < 0) {
6311 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6312 		y = 0;
6313 	}
6314 	position->enable = true;
6315 	position->translate_by_source = true;
6316 	position->x = x;
6317 	position->y = y;
6318 	position->x_hotspot = xorigin;
6319 	position->y_hotspot = yorigin;
6320 
6321 	return 0;
6322 }
6323 
6324 static void handle_cursor_update(struct drm_plane *plane,
6325 				 struct drm_plane_state *old_plane_state)
6326 {
6327 	struct amdgpu_device *adev = plane->dev->dev_private;
6328 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6329 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6330 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6331 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6332 	uint64_t address = afb ? afb->address : 0;
6333 	struct dc_cursor_position position;
6334 	struct dc_cursor_attributes attributes;
6335 	int ret;
6336 
6337 	if (!plane->state->fb && !old_plane_state->fb)
6338 		return;
6339 
6340 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6341 			 __func__,
6342 			 amdgpu_crtc->crtc_id,
6343 			 plane->state->crtc_w,
6344 			 plane->state->crtc_h);
6345 
6346 	ret = get_cursor_position(plane, crtc, &position);
6347 	if (ret)
6348 		return;
6349 
6350 	if (!position.enable) {
6351 		/* turn off cursor */
6352 		if (crtc_state && crtc_state->stream) {
6353 			mutex_lock(&adev->dm.dc_lock);
6354 			dc_stream_set_cursor_position(crtc_state->stream,
6355 						      &position);
6356 			mutex_unlock(&adev->dm.dc_lock);
6357 		}
6358 		return;
6359 	}
6360 
6361 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6362 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6363 
6364 	memset(&attributes, 0, sizeof(attributes));
6365 	attributes.address.high_part = upper_32_bits(address);
6366 	attributes.address.low_part  = lower_32_bits(address);
6367 	attributes.width             = plane->state->crtc_w;
6368 	attributes.height            = plane->state->crtc_h;
6369 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6370 	attributes.rotation_angle    = 0;
6371 	attributes.attribute_flags.value = 0;
6372 
6373 	attributes.pitch = attributes.width;
6374 
6375 	if (crtc_state->stream) {
6376 		mutex_lock(&adev->dm.dc_lock);
6377 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6378 							 &attributes))
6379 			DRM_ERROR("DC failed to set cursor attributes\n");
6380 
6381 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6382 						   &position))
6383 			DRM_ERROR("DC failed to set cursor position\n");
6384 		mutex_unlock(&adev->dm.dc_lock);
6385 	}
6386 }
6387 
6388 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6389 {
6390 
6391 	assert_spin_locked(&acrtc->base.dev->event_lock);
6392 	WARN_ON(acrtc->event);
6393 
6394 	acrtc->event = acrtc->base.state->event;
6395 
6396 	/* Set the flip status */
6397 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6398 
6399 	/* Mark this event as consumed */
6400 	acrtc->base.state->event = NULL;
6401 
6402 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6403 						 acrtc->crtc_id);
6404 }
6405 
6406 static void update_freesync_state_on_stream(
6407 	struct amdgpu_display_manager *dm,
6408 	struct dm_crtc_state *new_crtc_state,
6409 	struct dc_stream_state *new_stream,
6410 	struct dc_plane_state *surface,
6411 	u32 flip_timestamp_in_us)
6412 {
6413 	struct mod_vrr_params vrr_params;
6414 	struct dc_info_packet vrr_infopacket = {0};
6415 	struct amdgpu_device *adev = dm->adev;
6416 	unsigned long flags;
6417 
6418 	if (!new_stream)
6419 		return;
6420 
6421 	/*
6422 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6423 	 * For now it's sufficient to just guard against these conditions.
6424 	 */
6425 
6426 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6427 		return;
6428 
6429 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6430 	vrr_params = new_crtc_state->vrr_params;
6431 
6432 	if (surface) {
6433 		mod_freesync_handle_preflip(
6434 			dm->freesync_module,
6435 			surface,
6436 			new_stream,
6437 			flip_timestamp_in_us,
6438 			&vrr_params);
6439 
6440 		if (adev->family < AMDGPU_FAMILY_AI &&
6441 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6442 			mod_freesync_handle_v_update(dm->freesync_module,
6443 						     new_stream, &vrr_params);
6444 
6445 			/* Need to call this before the frame ends. */
6446 			dc_stream_adjust_vmin_vmax(dm->dc,
6447 						   new_crtc_state->stream,
6448 						   &vrr_params.adjust);
6449 		}
6450 	}
6451 
6452 	mod_freesync_build_vrr_infopacket(
6453 		dm->freesync_module,
6454 		new_stream,
6455 		&vrr_params,
6456 		PACKET_TYPE_VRR,
6457 		TRANSFER_FUNC_UNKNOWN,
6458 		&vrr_infopacket);
6459 
6460 	new_crtc_state->freesync_timing_changed |=
6461 		(memcmp(&new_crtc_state->vrr_params.adjust,
6462 			&vrr_params.adjust,
6463 			sizeof(vrr_params.adjust)) != 0);
6464 
6465 	new_crtc_state->freesync_vrr_info_changed |=
6466 		(memcmp(&new_crtc_state->vrr_infopacket,
6467 			&vrr_infopacket,
6468 			sizeof(vrr_infopacket)) != 0);
6469 
6470 	new_crtc_state->vrr_params = vrr_params;
6471 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6472 
6473 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6474 	new_stream->vrr_infopacket = vrr_infopacket;
6475 
6476 	if (new_crtc_state->freesync_vrr_info_changed)
6477 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6478 			      new_crtc_state->base.crtc->base.id,
6479 			      (int)new_crtc_state->base.vrr_enabled,
6480 			      (int)vrr_params.state);
6481 
6482 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6483 }
6484 
6485 static void pre_update_freesync_state_on_stream(
6486 	struct amdgpu_display_manager *dm,
6487 	struct dm_crtc_state *new_crtc_state)
6488 {
6489 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6490 	struct mod_vrr_params vrr_params;
6491 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6492 	struct amdgpu_device *adev = dm->adev;
6493 	unsigned long flags;
6494 
6495 	if (!new_stream)
6496 		return;
6497 
6498 	/*
6499 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6500 	 * For now it's sufficient to just guard against these conditions.
6501 	 */
6502 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6503 		return;
6504 
6505 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6506 	vrr_params = new_crtc_state->vrr_params;
6507 
6508 	if (new_crtc_state->vrr_supported &&
6509 	    config.min_refresh_in_uhz &&
6510 	    config.max_refresh_in_uhz) {
6511 		config.state = new_crtc_state->base.vrr_enabled ?
6512 			VRR_STATE_ACTIVE_VARIABLE :
6513 			VRR_STATE_INACTIVE;
6514 	} else {
6515 		config.state = VRR_STATE_UNSUPPORTED;
6516 	}
6517 
6518 	mod_freesync_build_vrr_params(dm->freesync_module,
6519 				      new_stream,
6520 				      &config, &vrr_params);
6521 
6522 	new_crtc_state->freesync_timing_changed |=
6523 		(memcmp(&new_crtc_state->vrr_params.adjust,
6524 			&vrr_params.adjust,
6525 			sizeof(vrr_params.adjust)) != 0);
6526 
6527 	new_crtc_state->vrr_params = vrr_params;
6528 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6529 }
6530 
6531 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6532 					    struct dm_crtc_state *new_state)
6533 {
6534 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6535 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6536 
6537 	if (!old_vrr_active && new_vrr_active) {
6538 		/* Transition VRR inactive -> active:
6539 		 * While VRR is active, we must not disable vblank irq, as a
6540 		 * reenable after disable would compute bogus vblank/pflip
6541 		 * timestamps if it likely happened inside display front-porch.
6542 		 *
6543 		 * We also need vupdate irq for the actual core vblank handling
6544 		 * at end of vblank.
6545 		 */
6546 		dm_set_vupdate_irq(new_state->base.crtc, true);
6547 		drm_crtc_vblank_get(new_state->base.crtc);
6548 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6549 				 __func__, new_state->base.crtc->base.id);
6550 	} else if (old_vrr_active && !new_vrr_active) {
6551 		/* Transition VRR active -> inactive:
6552 		 * Allow vblank irq disable again for fixed refresh rate.
6553 		 */
6554 		dm_set_vupdate_irq(new_state->base.crtc, false);
6555 		drm_crtc_vblank_put(new_state->base.crtc);
6556 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6557 				 __func__, new_state->base.crtc->base.id);
6558 	}
6559 }
6560 
6561 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6562 {
6563 	struct drm_plane *plane;
6564 	struct drm_plane_state *old_plane_state, *new_plane_state;
6565 	int i;
6566 
6567 	/*
6568 	 * TODO: Make this per-stream so we don't issue redundant updates for
6569 	 * commits with multiple streams.
6570 	 */
6571 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6572 				       new_plane_state, i)
6573 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6574 			handle_cursor_update(plane, old_plane_state);
6575 }
6576 
6577 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6578 				    struct dc_state *dc_state,
6579 				    struct drm_device *dev,
6580 				    struct amdgpu_display_manager *dm,
6581 				    struct drm_crtc *pcrtc,
6582 				    bool wait_for_vblank)
6583 {
6584 	uint32_t i;
6585 	uint64_t timestamp_ns;
6586 	struct drm_plane *plane;
6587 	struct drm_plane_state *old_plane_state, *new_plane_state;
6588 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6589 	struct drm_crtc_state *new_pcrtc_state =
6590 			drm_atomic_get_new_crtc_state(state, pcrtc);
6591 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6592 	struct dm_crtc_state *dm_old_crtc_state =
6593 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6594 	int planes_count = 0, vpos, hpos;
6595 	long r;
6596 	unsigned long flags;
6597 	struct amdgpu_bo *abo;
6598 	uint64_t tiling_flags;
6599 	uint32_t target_vblank, last_flip_vblank;
6600 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6601 	bool pflip_present = false;
6602 	struct {
6603 		struct dc_surface_update surface_updates[MAX_SURFACES];
6604 		struct dc_plane_info plane_infos[MAX_SURFACES];
6605 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6606 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6607 		struct dc_stream_update stream_update;
6608 	} *bundle;
6609 
6610 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6611 
6612 	if (!bundle) {
6613 		dm_error("Failed to allocate update bundle\n");
6614 		goto cleanup;
6615 	}
6616 
6617 	/*
6618 	 * Disable the cursor first if we're disabling all the planes.
6619 	 * It'll remain on the screen after the planes are re-enabled
6620 	 * if we don't.
6621 	 */
6622 	if (acrtc_state->active_planes == 0)
6623 		amdgpu_dm_commit_cursors(state);
6624 
6625 	/* update planes when needed */
6626 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6627 		struct drm_crtc *crtc = new_plane_state->crtc;
6628 		struct drm_crtc_state *new_crtc_state;
6629 		struct drm_framebuffer *fb = new_plane_state->fb;
6630 		bool plane_needs_flip;
6631 		struct dc_plane_state *dc_plane;
6632 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6633 
6634 		/* Cursor plane is handled after stream updates */
6635 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6636 			continue;
6637 
6638 		if (!fb || !crtc || pcrtc != crtc)
6639 			continue;
6640 
6641 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6642 		if (!new_crtc_state->active)
6643 			continue;
6644 
6645 		dc_plane = dm_new_plane_state->dc_state;
6646 
6647 		bundle->surface_updates[planes_count].surface = dc_plane;
6648 		if (new_pcrtc_state->color_mgmt_changed) {
6649 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6650 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6651 		}
6652 
6653 		fill_dc_scaling_info(new_plane_state,
6654 				     &bundle->scaling_infos[planes_count]);
6655 
6656 		bundle->surface_updates[planes_count].scaling_info =
6657 			&bundle->scaling_infos[planes_count];
6658 
6659 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6660 
6661 		pflip_present = pflip_present || plane_needs_flip;
6662 
6663 		if (!plane_needs_flip) {
6664 			planes_count += 1;
6665 			continue;
6666 		}
6667 
6668 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6669 
6670 		/*
6671 		 * Wait for all fences on this FB. Do limited wait to avoid
6672 		 * deadlock during GPU reset when this fence will not signal
6673 		 * but we hold reservation lock for the BO.
6674 		 */
6675 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6676 							false,
6677 							msecs_to_jiffies(5000));
6678 		if (unlikely(r <= 0))
6679 			DRM_ERROR("Waiting for fences timed out!");
6680 
6681 		/*
6682 		 * TODO This might fail and hence better not used, wait
6683 		 * explicitly on fences instead
6684 		 * and in general should be called for
6685 		 * blocking commit to as per framework helpers
6686 		 */
6687 		r = amdgpu_bo_reserve(abo, true);
6688 		if (unlikely(r != 0))
6689 			DRM_ERROR("failed to reserve buffer before flip\n");
6690 
6691 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6692 
6693 		amdgpu_bo_unreserve(abo);
6694 
6695 		fill_dc_plane_info_and_addr(
6696 			dm->adev, new_plane_state, tiling_flags,
6697 			&bundle->plane_infos[planes_count],
6698 			&bundle->flip_addrs[planes_count].address,
6699 			false);
6700 
6701 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6702 				 new_plane_state->plane->index,
6703 				 bundle->plane_infos[planes_count].dcc.enable);
6704 
6705 		bundle->surface_updates[planes_count].plane_info =
6706 			&bundle->plane_infos[planes_count];
6707 
6708 		/*
6709 		 * Only allow immediate flips for fast updates that don't
6710 		 * change FB pitch, DCC state, rotation or mirroing.
6711 		 */
6712 		bundle->flip_addrs[planes_count].flip_immediate =
6713 			crtc->state->async_flip &&
6714 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6715 
6716 		timestamp_ns = ktime_get_ns();
6717 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6718 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6719 		bundle->surface_updates[planes_count].surface = dc_plane;
6720 
6721 		if (!bundle->surface_updates[planes_count].surface) {
6722 			DRM_ERROR("No surface for CRTC: id=%d\n",
6723 					acrtc_attach->crtc_id);
6724 			continue;
6725 		}
6726 
6727 		if (plane == pcrtc->primary)
6728 			update_freesync_state_on_stream(
6729 				dm,
6730 				acrtc_state,
6731 				acrtc_state->stream,
6732 				dc_plane,
6733 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6734 
6735 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6736 				 __func__,
6737 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6738 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6739 
6740 		planes_count += 1;
6741 
6742 	}
6743 
6744 	if (pflip_present) {
6745 		if (!vrr_active) {
6746 			/* Use old throttling in non-vrr fixed refresh rate mode
6747 			 * to keep flip scheduling based on target vblank counts
6748 			 * working in a backwards compatible way, e.g., for
6749 			 * clients using the GLX_OML_sync_control extension or
6750 			 * DRI3/Present extension with defined target_msc.
6751 			 */
6752 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6753 		}
6754 		else {
6755 			/* For variable refresh rate mode only:
6756 			 * Get vblank of last completed flip to avoid > 1 vrr
6757 			 * flips per video frame by use of throttling, but allow
6758 			 * flip programming anywhere in the possibly large
6759 			 * variable vrr vblank interval for fine-grained flip
6760 			 * timing control and more opportunity to avoid stutter
6761 			 * on late submission of flips.
6762 			 */
6763 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6764 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6765 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6766 		}
6767 
6768 		target_vblank = last_flip_vblank + wait_for_vblank;
6769 
6770 		/*
6771 		 * Wait until we're out of the vertical blank period before the one
6772 		 * targeted by the flip
6773 		 */
6774 		while ((acrtc_attach->enabled &&
6775 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6776 							    0, &vpos, &hpos, NULL,
6777 							    NULL, &pcrtc->hwmode)
6778 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6779 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6780 			(int)(target_vblank -
6781 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6782 			usleep_range(1000, 1100);
6783 		}
6784 
6785 		if (acrtc_attach->base.state->event) {
6786 			drm_crtc_vblank_get(pcrtc);
6787 
6788 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6789 
6790 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6791 			prepare_flip_isr(acrtc_attach);
6792 
6793 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6794 		}
6795 
6796 		if (acrtc_state->stream) {
6797 			if (acrtc_state->freesync_vrr_info_changed)
6798 				bundle->stream_update.vrr_infopacket =
6799 					&acrtc_state->stream->vrr_infopacket;
6800 		}
6801 	}
6802 
6803 	/* Update the planes if changed or disable if we don't have any. */
6804 	if ((planes_count || acrtc_state->active_planes == 0) &&
6805 		acrtc_state->stream) {
6806 		bundle->stream_update.stream = acrtc_state->stream;
6807 		if (new_pcrtc_state->mode_changed) {
6808 			bundle->stream_update.src = acrtc_state->stream->src;
6809 			bundle->stream_update.dst = acrtc_state->stream->dst;
6810 		}
6811 
6812 		if (new_pcrtc_state->color_mgmt_changed) {
6813 			/*
6814 			 * TODO: This isn't fully correct since we've actually
6815 			 * already modified the stream in place.
6816 			 */
6817 			bundle->stream_update.gamut_remap =
6818 				&acrtc_state->stream->gamut_remap_matrix;
6819 			bundle->stream_update.output_csc_transform =
6820 				&acrtc_state->stream->csc_color_matrix;
6821 			bundle->stream_update.out_transfer_func =
6822 				acrtc_state->stream->out_transfer_func;
6823 		}
6824 
6825 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6826 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6827 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6828 
6829 		/*
6830 		 * If FreeSync state on the stream has changed then we need to
6831 		 * re-adjust the min/max bounds now that DC doesn't handle this
6832 		 * as part of commit.
6833 		 */
6834 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6835 		    amdgpu_dm_vrr_active(acrtc_state)) {
6836 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6837 			dc_stream_adjust_vmin_vmax(
6838 				dm->dc, acrtc_state->stream,
6839 				&acrtc_state->vrr_params.adjust);
6840 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6841 		}
6842 		mutex_lock(&dm->dc_lock);
6843 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6844 				acrtc_state->stream->link->psr_allow_active)
6845 			amdgpu_dm_psr_disable(acrtc_state->stream);
6846 
6847 		dc_commit_updates_for_stream(dm->dc,
6848 						     bundle->surface_updates,
6849 						     planes_count,
6850 						     acrtc_state->stream,
6851 						     &bundle->stream_update,
6852 						     dc_state);
6853 
6854 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6855 						acrtc_state->stream->psr_version &&
6856 						!acrtc_state->stream->link->psr_feature_enabled)
6857 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6858 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6859 						acrtc_state->stream->link->psr_feature_enabled &&
6860 						!acrtc_state->stream->link->psr_allow_active) {
6861 			amdgpu_dm_psr_enable(acrtc_state->stream);
6862 		}
6863 
6864 		mutex_unlock(&dm->dc_lock);
6865 	}
6866 
6867 	/*
6868 	 * Update cursor state *after* programming all the planes.
6869 	 * This avoids redundant programming in the case where we're going
6870 	 * to be disabling a single plane - those pipes are being disabled.
6871 	 */
6872 	if (acrtc_state->active_planes)
6873 		amdgpu_dm_commit_cursors(state);
6874 
6875 cleanup:
6876 	kfree(bundle);
6877 }
6878 
6879 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6880 				   struct drm_atomic_state *state)
6881 {
6882 	struct amdgpu_device *adev = dev->dev_private;
6883 	struct amdgpu_dm_connector *aconnector;
6884 	struct drm_connector *connector;
6885 	struct drm_connector_state *old_con_state, *new_con_state;
6886 	struct drm_crtc_state *new_crtc_state;
6887 	struct dm_crtc_state *new_dm_crtc_state;
6888 	const struct dc_stream_status *status;
6889 	int i, inst;
6890 
6891 	/* Notify device removals. */
6892 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6893 		if (old_con_state->crtc != new_con_state->crtc) {
6894 			/* CRTC changes require notification. */
6895 			goto notify;
6896 		}
6897 
6898 		if (!new_con_state->crtc)
6899 			continue;
6900 
6901 		new_crtc_state = drm_atomic_get_new_crtc_state(
6902 			state, new_con_state->crtc);
6903 
6904 		if (!new_crtc_state)
6905 			continue;
6906 
6907 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6908 			continue;
6909 
6910 	notify:
6911 		aconnector = to_amdgpu_dm_connector(connector);
6912 
6913 		mutex_lock(&adev->dm.audio_lock);
6914 		inst = aconnector->audio_inst;
6915 		aconnector->audio_inst = -1;
6916 		mutex_unlock(&adev->dm.audio_lock);
6917 
6918 		amdgpu_dm_audio_eld_notify(adev, inst);
6919 	}
6920 
6921 	/* Notify audio device additions. */
6922 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6923 		if (!new_con_state->crtc)
6924 			continue;
6925 
6926 		new_crtc_state = drm_atomic_get_new_crtc_state(
6927 			state, new_con_state->crtc);
6928 
6929 		if (!new_crtc_state)
6930 			continue;
6931 
6932 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6933 			continue;
6934 
6935 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6936 		if (!new_dm_crtc_state->stream)
6937 			continue;
6938 
6939 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6940 		if (!status)
6941 			continue;
6942 
6943 		aconnector = to_amdgpu_dm_connector(connector);
6944 
6945 		mutex_lock(&adev->dm.audio_lock);
6946 		inst = status->audio_inst;
6947 		aconnector->audio_inst = inst;
6948 		mutex_unlock(&adev->dm.audio_lock);
6949 
6950 		amdgpu_dm_audio_eld_notify(adev, inst);
6951 	}
6952 }
6953 
6954 /*
6955  * Enable interrupts on CRTCs that are newly active, undergone
6956  * a modeset, or have active planes again.
6957  *
6958  * Done in two passes, based on the for_modeset flag:
6959  * Pass 1: For CRTCs going through modeset
6960  * Pass 2: For CRTCs going from 0 to n active planes
6961  *
6962  * Interrupts can only be enabled after the planes are programmed,
6963  * so this requires a two-pass approach since we don't want to
6964  * just defer the interrupts until after commit planes every time.
6965  */
6966 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6967 					     struct drm_atomic_state *state,
6968 					     bool for_modeset)
6969 {
6970 	struct amdgpu_device *adev = dev->dev_private;
6971 	struct drm_crtc *crtc;
6972 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6973 	int i;
6974 #ifdef CONFIG_DEBUG_FS
6975 	enum amdgpu_dm_pipe_crc_source source;
6976 #endif
6977 
6978 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6979 				      new_crtc_state, i) {
6980 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6981 		struct dm_crtc_state *dm_new_crtc_state =
6982 			to_dm_crtc_state(new_crtc_state);
6983 		struct dm_crtc_state *dm_old_crtc_state =
6984 			to_dm_crtc_state(old_crtc_state);
6985 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6986 		bool run_pass;
6987 
6988 		run_pass = (for_modeset && modeset) ||
6989 			   (!for_modeset && !modeset &&
6990 			    !dm_old_crtc_state->interrupts_enabled);
6991 
6992 		if (!run_pass)
6993 			continue;
6994 
6995 		if (!dm_new_crtc_state->interrupts_enabled)
6996 			continue;
6997 
6998 		manage_dm_interrupts(adev, acrtc, true);
6999 
7000 #ifdef CONFIG_DEBUG_FS
7001 		/* The stream has changed so CRC capture needs to re-enabled. */
7002 		source = dm_new_crtc_state->crc_src;
7003 		if (amdgpu_dm_is_valid_crc_source(source)) {
7004 			amdgpu_dm_crtc_configure_crc_source(
7005 				crtc, dm_new_crtc_state,
7006 				dm_new_crtc_state->crc_src);
7007 		}
7008 #endif
7009 	}
7010 }
7011 
7012 /*
7013  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7014  * @crtc_state: the DRM CRTC state
7015  * @stream_state: the DC stream state.
7016  *
7017  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7018  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7019  */
7020 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7021 						struct dc_stream_state *stream_state)
7022 {
7023 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7024 }
7025 
7026 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7027 				   struct drm_atomic_state *state,
7028 				   bool nonblock)
7029 {
7030 	struct drm_crtc *crtc;
7031 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7032 	struct amdgpu_device *adev = dev->dev_private;
7033 	int i;
7034 
7035 	/*
7036 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7037 	 * a modeset, being disabled, or have no active planes.
7038 	 *
7039 	 * It's done in atomic commit rather than commit tail for now since
7040 	 * some of these interrupt handlers access the current CRTC state and
7041 	 * potentially the stream pointer itself.
7042 	 *
7043 	 * Since the atomic state is swapped within atomic commit and not within
7044 	 * commit tail this would leave to new state (that hasn't been committed yet)
7045 	 * being accesssed from within the handlers.
7046 	 *
7047 	 * TODO: Fix this so we can do this in commit tail and not have to block
7048 	 * in atomic check.
7049 	 */
7050 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7051 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7052 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7053 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7054 
7055 		if (dm_old_crtc_state->interrupts_enabled &&
7056 		    (!dm_new_crtc_state->interrupts_enabled ||
7057 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7058 			manage_dm_interrupts(adev, acrtc, false);
7059 	}
7060 	/*
7061 	 * Add check here for SoC's that support hardware cursor plane, to
7062 	 * unset legacy_cursor_update
7063 	 */
7064 
7065 	return drm_atomic_helper_commit(dev, state, nonblock);
7066 
7067 	/*TODO Handle EINTR, reenable IRQ*/
7068 }
7069 
7070 /**
7071  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7072  * @state: The atomic state to commit
7073  *
7074  * This will tell DC to commit the constructed DC state from atomic_check,
7075  * programming the hardware. Any failures here implies a hardware failure, since
7076  * atomic check should have filtered anything non-kosher.
7077  */
7078 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7079 {
7080 	struct drm_device *dev = state->dev;
7081 	struct amdgpu_device *adev = dev->dev_private;
7082 	struct amdgpu_display_manager *dm = &adev->dm;
7083 	struct dm_atomic_state *dm_state;
7084 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7085 	uint32_t i, j;
7086 	struct drm_crtc *crtc;
7087 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7088 	unsigned long flags;
7089 	bool wait_for_vblank = true;
7090 	struct drm_connector *connector;
7091 	struct drm_connector_state *old_con_state, *new_con_state;
7092 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7093 	int crtc_disable_count = 0;
7094 
7095 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7096 
7097 	dm_state = dm_atomic_get_new_state(state);
7098 	if (dm_state && dm_state->context) {
7099 		dc_state = dm_state->context;
7100 	} else {
7101 		/* No state changes, retain current state. */
7102 		dc_state_temp = dc_create_state(dm->dc);
7103 		ASSERT(dc_state_temp);
7104 		dc_state = dc_state_temp;
7105 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7106 	}
7107 
7108 	/* update changed items */
7109 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7110 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7111 
7112 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7113 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7114 
7115 		DRM_DEBUG_DRIVER(
7116 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7117 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7118 			"connectors_changed:%d\n",
7119 			acrtc->crtc_id,
7120 			new_crtc_state->enable,
7121 			new_crtc_state->active,
7122 			new_crtc_state->planes_changed,
7123 			new_crtc_state->mode_changed,
7124 			new_crtc_state->active_changed,
7125 			new_crtc_state->connectors_changed);
7126 
7127 		/* Copy all transient state flags into dc state */
7128 		if (dm_new_crtc_state->stream) {
7129 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7130 							    dm_new_crtc_state->stream);
7131 		}
7132 
7133 		/* handles headless hotplug case, updating new_state and
7134 		 * aconnector as needed
7135 		 */
7136 
7137 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7138 
7139 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7140 
7141 			if (!dm_new_crtc_state->stream) {
7142 				/*
7143 				 * this could happen because of issues with
7144 				 * userspace notifications delivery.
7145 				 * In this case userspace tries to set mode on
7146 				 * display which is disconnected in fact.
7147 				 * dc_sink is NULL in this case on aconnector.
7148 				 * We expect reset mode will come soon.
7149 				 *
7150 				 * This can also happen when unplug is done
7151 				 * during resume sequence ended
7152 				 *
7153 				 * In this case, we want to pretend we still
7154 				 * have a sink to keep the pipe running so that
7155 				 * hw state is consistent with the sw state
7156 				 */
7157 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7158 						__func__, acrtc->base.base.id);
7159 				continue;
7160 			}
7161 
7162 			if (dm_old_crtc_state->stream)
7163 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7164 
7165 			pm_runtime_get_noresume(dev->dev);
7166 
7167 			acrtc->enabled = true;
7168 			acrtc->hw_mode = new_crtc_state->mode;
7169 			crtc->hwmode = new_crtc_state->mode;
7170 		} else if (modereset_required(new_crtc_state)) {
7171 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7172 			/* i.e. reset mode */
7173 			if (dm_old_crtc_state->stream) {
7174 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7175 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7176 
7177 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7178 			}
7179 		}
7180 	} /* for_each_crtc_in_state() */
7181 
7182 	if (dc_state) {
7183 		dm_enable_per_frame_crtc_master_sync(dc_state);
7184 		mutex_lock(&dm->dc_lock);
7185 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7186 		mutex_unlock(&dm->dc_lock);
7187 	}
7188 
7189 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7190 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7191 
7192 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7193 
7194 		if (dm_new_crtc_state->stream != NULL) {
7195 			const struct dc_stream_status *status =
7196 					dc_stream_get_status(dm_new_crtc_state->stream);
7197 
7198 			if (!status)
7199 				status = dc_stream_get_status_from_state(dc_state,
7200 									 dm_new_crtc_state->stream);
7201 
7202 			if (!status)
7203 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7204 			else
7205 				acrtc->otg_inst = status->primary_otg_inst;
7206 		}
7207 	}
7208 #ifdef CONFIG_DRM_AMD_DC_HDCP
7209 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7210 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7211 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7212 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7213 
7214 		new_crtc_state = NULL;
7215 
7216 		if (acrtc)
7217 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7218 
7219 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7220 
7221 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7222 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7223 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7224 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7225 			continue;
7226 		}
7227 
7228 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7229 			hdcp_update_display(
7230 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7231 				new_con_state->hdcp_content_type,
7232 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7233 													 : false);
7234 	}
7235 #endif
7236 
7237 	/* Handle connector state changes */
7238 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7239 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7240 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7241 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7242 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7243 		struct dc_stream_update stream_update;
7244 		struct dc_info_packet hdr_packet;
7245 		struct dc_stream_status *status = NULL;
7246 		bool abm_changed, hdr_changed, scaling_changed;
7247 
7248 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7249 		memset(&stream_update, 0, sizeof(stream_update));
7250 
7251 		if (acrtc) {
7252 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7253 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7254 		}
7255 
7256 		/* Skip any modesets/resets */
7257 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7258 			continue;
7259 
7260 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7261 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7262 
7263 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7264 							     dm_old_con_state);
7265 
7266 		abm_changed = dm_new_crtc_state->abm_level !=
7267 			      dm_old_crtc_state->abm_level;
7268 
7269 		hdr_changed =
7270 			is_hdr_metadata_different(old_con_state, new_con_state);
7271 
7272 		if (!scaling_changed && !abm_changed && !hdr_changed)
7273 			continue;
7274 
7275 		stream_update.stream = dm_new_crtc_state->stream;
7276 		if (scaling_changed) {
7277 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7278 					dm_new_con_state, dm_new_crtc_state->stream);
7279 
7280 			stream_update.src = dm_new_crtc_state->stream->src;
7281 			stream_update.dst = dm_new_crtc_state->stream->dst;
7282 		}
7283 
7284 		if (abm_changed) {
7285 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7286 
7287 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7288 		}
7289 
7290 		if (hdr_changed) {
7291 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7292 			stream_update.hdr_static_metadata = &hdr_packet;
7293 		}
7294 
7295 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7296 		WARN_ON(!status);
7297 		WARN_ON(!status->plane_count);
7298 
7299 		/*
7300 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7301 		 * Here we create an empty update on each plane.
7302 		 * To fix this, DC should permit updating only stream properties.
7303 		 */
7304 		for (j = 0; j < status->plane_count; j++)
7305 			dummy_updates[j].surface = status->plane_states[0];
7306 
7307 
7308 		mutex_lock(&dm->dc_lock);
7309 		dc_commit_updates_for_stream(dm->dc,
7310 						     dummy_updates,
7311 						     status->plane_count,
7312 						     dm_new_crtc_state->stream,
7313 						     &stream_update,
7314 						     dc_state);
7315 		mutex_unlock(&dm->dc_lock);
7316 	}
7317 
7318 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7319 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7320 				      new_crtc_state, i) {
7321 		if (old_crtc_state->active && !new_crtc_state->active)
7322 			crtc_disable_count++;
7323 
7324 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7325 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7326 
7327 		/* Update freesync active state. */
7328 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7329 
7330 		/* Handle vrr on->off / off->on transitions */
7331 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7332 						dm_new_crtc_state);
7333 	}
7334 
7335 	/* Enable interrupts for CRTCs going through a modeset. */
7336 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7337 
7338 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7339 		if (new_crtc_state->async_flip)
7340 			wait_for_vblank = false;
7341 
7342 	/* update planes when needed per crtc*/
7343 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7344 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7345 
7346 		if (dm_new_crtc_state->stream)
7347 			amdgpu_dm_commit_planes(state, dc_state, dev,
7348 						dm, crtc, wait_for_vblank);
7349 	}
7350 
7351 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7352 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7353 
7354 	/* Update audio instances for each connector. */
7355 	amdgpu_dm_commit_audio(dev, state);
7356 
7357 	/*
7358 	 * send vblank event on all events not handled in flip and
7359 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7360 	 */
7361 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7362 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7363 
7364 		if (new_crtc_state->event)
7365 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7366 
7367 		new_crtc_state->event = NULL;
7368 	}
7369 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7370 
7371 	/* Signal HW programming completion */
7372 	drm_atomic_helper_commit_hw_done(state);
7373 
7374 	if (wait_for_vblank)
7375 		drm_atomic_helper_wait_for_flip_done(dev, state);
7376 
7377 	drm_atomic_helper_cleanup_planes(dev, state);
7378 
7379 	/*
7380 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7381 	 * so we can put the GPU into runtime suspend if we're not driving any
7382 	 * displays anymore
7383 	 */
7384 	for (i = 0; i < crtc_disable_count; i++)
7385 		pm_runtime_put_autosuspend(dev->dev);
7386 	pm_runtime_mark_last_busy(dev->dev);
7387 
7388 	if (dc_state_temp)
7389 		dc_release_state(dc_state_temp);
7390 }
7391 
7392 
7393 static int dm_force_atomic_commit(struct drm_connector *connector)
7394 {
7395 	int ret = 0;
7396 	struct drm_device *ddev = connector->dev;
7397 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7398 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7399 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7400 	struct drm_connector_state *conn_state;
7401 	struct drm_crtc_state *crtc_state;
7402 	struct drm_plane_state *plane_state;
7403 
7404 	if (!state)
7405 		return -ENOMEM;
7406 
7407 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7408 
7409 	/* Construct an atomic state to restore previous display setting */
7410 
7411 	/*
7412 	 * Attach connectors to drm_atomic_state
7413 	 */
7414 	conn_state = drm_atomic_get_connector_state(state, connector);
7415 
7416 	ret = PTR_ERR_OR_ZERO(conn_state);
7417 	if (ret)
7418 		goto err;
7419 
7420 	/* Attach crtc to drm_atomic_state*/
7421 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7422 
7423 	ret = PTR_ERR_OR_ZERO(crtc_state);
7424 	if (ret)
7425 		goto err;
7426 
7427 	/* force a restore */
7428 	crtc_state->mode_changed = true;
7429 
7430 	/* Attach plane to drm_atomic_state */
7431 	plane_state = drm_atomic_get_plane_state(state, plane);
7432 
7433 	ret = PTR_ERR_OR_ZERO(plane_state);
7434 	if (ret)
7435 		goto err;
7436 
7437 
7438 	/* Call commit internally with the state we just constructed */
7439 	ret = drm_atomic_commit(state);
7440 	if (!ret)
7441 		return 0;
7442 
7443 err:
7444 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7445 	drm_atomic_state_put(state);
7446 
7447 	return ret;
7448 }
7449 
7450 /*
7451  * This function handles all cases when set mode does not come upon hotplug.
7452  * This includes when a display is unplugged then plugged back into the
7453  * same port and when running without usermode desktop manager supprot
7454  */
7455 void dm_restore_drm_connector_state(struct drm_device *dev,
7456 				    struct drm_connector *connector)
7457 {
7458 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7459 	struct amdgpu_crtc *disconnected_acrtc;
7460 	struct dm_crtc_state *acrtc_state;
7461 
7462 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7463 		return;
7464 
7465 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7466 	if (!disconnected_acrtc)
7467 		return;
7468 
7469 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7470 	if (!acrtc_state->stream)
7471 		return;
7472 
7473 	/*
7474 	 * If the previous sink is not released and different from the current,
7475 	 * we deduce we are in a state where we can not rely on usermode call
7476 	 * to turn on the display, so we do it here
7477 	 */
7478 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7479 		dm_force_atomic_commit(&aconnector->base);
7480 }
7481 
7482 /*
7483  * Grabs all modesetting locks to serialize against any blocking commits,
7484  * Waits for completion of all non blocking commits.
7485  */
7486 static int do_aquire_global_lock(struct drm_device *dev,
7487 				 struct drm_atomic_state *state)
7488 {
7489 	struct drm_crtc *crtc;
7490 	struct drm_crtc_commit *commit;
7491 	long ret;
7492 
7493 	/*
7494 	 * Adding all modeset locks to aquire_ctx will
7495 	 * ensure that when the framework release it the
7496 	 * extra locks we are locking here will get released to
7497 	 */
7498 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7499 	if (ret)
7500 		return ret;
7501 
7502 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7503 		spin_lock(&crtc->commit_lock);
7504 		commit = list_first_entry_or_null(&crtc->commit_list,
7505 				struct drm_crtc_commit, commit_entry);
7506 		if (commit)
7507 			drm_crtc_commit_get(commit);
7508 		spin_unlock(&crtc->commit_lock);
7509 
7510 		if (!commit)
7511 			continue;
7512 
7513 		/*
7514 		 * Make sure all pending HW programming completed and
7515 		 * page flips done
7516 		 */
7517 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7518 
7519 		if (ret > 0)
7520 			ret = wait_for_completion_interruptible_timeout(
7521 					&commit->flip_done, 10*HZ);
7522 
7523 		if (ret == 0)
7524 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7525 				  "timed out\n", crtc->base.id, crtc->name);
7526 
7527 		drm_crtc_commit_put(commit);
7528 	}
7529 
7530 	return ret < 0 ? ret : 0;
7531 }
7532 
7533 static void get_freesync_config_for_crtc(
7534 	struct dm_crtc_state *new_crtc_state,
7535 	struct dm_connector_state *new_con_state)
7536 {
7537 	struct mod_freesync_config config = {0};
7538 	struct amdgpu_dm_connector *aconnector =
7539 			to_amdgpu_dm_connector(new_con_state->base.connector);
7540 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7541 	int vrefresh = drm_mode_vrefresh(mode);
7542 
7543 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7544 					vrefresh >= aconnector->min_vfreq &&
7545 					vrefresh <= aconnector->max_vfreq;
7546 
7547 	if (new_crtc_state->vrr_supported) {
7548 		new_crtc_state->stream->ignore_msa_timing_param = true;
7549 		config.state = new_crtc_state->base.vrr_enabled ?
7550 				VRR_STATE_ACTIVE_VARIABLE :
7551 				VRR_STATE_INACTIVE;
7552 		config.min_refresh_in_uhz =
7553 				aconnector->min_vfreq * 1000000;
7554 		config.max_refresh_in_uhz =
7555 				aconnector->max_vfreq * 1000000;
7556 		config.vsif_supported = true;
7557 		config.btr = true;
7558 	}
7559 
7560 	new_crtc_state->freesync_config = config;
7561 }
7562 
7563 static void reset_freesync_config_for_crtc(
7564 	struct dm_crtc_state *new_crtc_state)
7565 {
7566 	new_crtc_state->vrr_supported = false;
7567 
7568 	memset(&new_crtc_state->vrr_params, 0,
7569 	       sizeof(new_crtc_state->vrr_params));
7570 	memset(&new_crtc_state->vrr_infopacket, 0,
7571 	       sizeof(new_crtc_state->vrr_infopacket));
7572 }
7573 
7574 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7575 				struct drm_atomic_state *state,
7576 				struct drm_crtc *crtc,
7577 				struct drm_crtc_state *old_crtc_state,
7578 				struct drm_crtc_state *new_crtc_state,
7579 				bool enable,
7580 				bool *lock_and_validation_needed)
7581 {
7582 	struct dm_atomic_state *dm_state = NULL;
7583 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7584 	struct dc_stream_state *new_stream;
7585 	int ret = 0;
7586 
7587 	/*
7588 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7589 	 * update changed items
7590 	 */
7591 	struct amdgpu_crtc *acrtc = NULL;
7592 	struct amdgpu_dm_connector *aconnector = NULL;
7593 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7594 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7595 
7596 	new_stream = NULL;
7597 
7598 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7599 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7600 	acrtc = to_amdgpu_crtc(crtc);
7601 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7602 
7603 	/* TODO This hack should go away */
7604 	if (aconnector && enable) {
7605 		/* Make sure fake sink is created in plug-in scenario */
7606 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7607 							    &aconnector->base);
7608 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7609 							    &aconnector->base);
7610 
7611 		if (IS_ERR(drm_new_conn_state)) {
7612 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7613 			goto fail;
7614 		}
7615 
7616 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7617 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7618 
7619 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7620 			goto skip_modeset;
7621 
7622 		new_stream = create_stream_for_sink(aconnector,
7623 						     &new_crtc_state->mode,
7624 						    dm_new_conn_state,
7625 						    dm_old_crtc_state->stream);
7626 
7627 		/*
7628 		 * we can have no stream on ACTION_SET if a display
7629 		 * was disconnected during S3, in this case it is not an
7630 		 * error, the OS will be updated after detection, and
7631 		 * will do the right thing on next atomic commit
7632 		 */
7633 
7634 		if (!new_stream) {
7635 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7636 					__func__, acrtc->base.base.id);
7637 			ret = -ENOMEM;
7638 			goto fail;
7639 		}
7640 
7641 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7642 
7643 		ret = fill_hdr_info_packet(drm_new_conn_state,
7644 					   &new_stream->hdr_static_metadata);
7645 		if (ret)
7646 			goto fail;
7647 
7648 		/*
7649 		 * If we already removed the old stream from the context
7650 		 * (and set the new stream to NULL) then we can't reuse
7651 		 * the old stream even if the stream and scaling are unchanged.
7652 		 * We'll hit the BUG_ON and black screen.
7653 		 *
7654 		 * TODO: Refactor this function to allow this check to work
7655 		 * in all conditions.
7656 		 */
7657 		if (dm_new_crtc_state->stream &&
7658 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7659 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7660 			new_crtc_state->mode_changed = false;
7661 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7662 					 new_crtc_state->mode_changed);
7663 		}
7664 	}
7665 
7666 	/* mode_changed flag may get updated above, need to check again */
7667 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7668 		goto skip_modeset;
7669 
7670 	DRM_DEBUG_DRIVER(
7671 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7672 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7673 		"connectors_changed:%d\n",
7674 		acrtc->crtc_id,
7675 		new_crtc_state->enable,
7676 		new_crtc_state->active,
7677 		new_crtc_state->planes_changed,
7678 		new_crtc_state->mode_changed,
7679 		new_crtc_state->active_changed,
7680 		new_crtc_state->connectors_changed);
7681 
7682 	/* Remove stream for any changed/disabled CRTC */
7683 	if (!enable) {
7684 
7685 		if (!dm_old_crtc_state->stream)
7686 			goto skip_modeset;
7687 
7688 		ret = dm_atomic_get_state(state, &dm_state);
7689 		if (ret)
7690 			goto fail;
7691 
7692 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7693 				crtc->base.id);
7694 
7695 		/* i.e. reset mode */
7696 		if (dc_remove_stream_from_ctx(
7697 				dm->dc,
7698 				dm_state->context,
7699 				dm_old_crtc_state->stream) != DC_OK) {
7700 			ret = -EINVAL;
7701 			goto fail;
7702 		}
7703 
7704 		dc_stream_release(dm_old_crtc_state->stream);
7705 		dm_new_crtc_state->stream = NULL;
7706 
7707 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7708 
7709 		*lock_and_validation_needed = true;
7710 
7711 	} else {/* Add stream for any updated/enabled CRTC */
7712 		/*
7713 		 * Quick fix to prevent NULL pointer on new_stream when
7714 		 * added MST connectors not found in existing crtc_state in the chained mode
7715 		 * TODO: need to dig out the root cause of that
7716 		 */
7717 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7718 			goto skip_modeset;
7719 
7720 		if (modereset_required(new_crtc_state))
7721 			goto skip_modeset;
7722 
7723 		if (modeset_required(new_crtc_state, new_stream,
7724 				     dm_old_crtc_state->stream)) {
7725 
7726 			WARN_ON(dm_new_crtc_state->stream);
7727 
7728 			ret = dm_atomic_get_state(state, &dm_state);
7729 			if (ret)
7730 				goto fail;
7731 
7732 			dm_new_crtc_state->stream = new_stream;
7733 
7734 			dc_stream_retain(new_stream);
7735 
7736 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7737 						crtc->base.id);
7738 
7739 			if (dc_add_stream_to_ctx(
7740 					dm->dc,
7741 					dm_state->context,
7742 					dm_new_crtc_state->stream) != DC_OK) {
7743 				ret = -EINVAL;
7744 				goto fail;
7745 			}
7746 
7747 			*lock_and_validation_needed = true;
7748 		}
7749 	}
7750 
7751 skip_modeset:
7752 	/* Release extra reference */
7753 	if (new_stream)
7754 		 dc_stream_release(new_stream);
7755 
7756 	/*
7757 	 * We want to do dc stream updates that do not require a
7758 	 * full modeset below.
7759 	 */
7760 	if (!(enable && aconnector && new_crtc_state->enable &&
7761 	      new_crtc_state->active))
7762 		return 0;
7763 	/*
7764 	 * Given above conditions, the dc state cannot be NULL because:
7765 	 * 1. We're in the process of enabling CRTCs (just been added
7766 	 *    to the dc context, or already is on the context)
7767 	 * 2. Has a valid connector attached, and
7768 	 * 3. Is currently active and enabled.
7769 	 * => The dc stream state currently exists.
7770 	 */
7771 	BUG_ON(dm_new_crtc_state->stream == NULL);
7772 
7773 	/* Scaling or underscan settings */
7774 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7775 		update_stream_scaling_settings(
7776 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7777 
7778 	/* ABM settings */
7779 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7780 
7781 	/*
7782 	 * Color management settings. We also update color properties
7783 	 * when a modeset is needed, to ensure it gets reprogrammed.
7784 	 */
7785 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7786 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7787 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7788 		if (ret)
7789 			goto fail;
7790 	}
7791 
7792 	/* Update Freesync settings. */
7793 	get_freesync_config_for_crtc(dm_new_crtc_state,
7794 				     dm_new_conn_state);
7795 
7796 	return ret;
7797 
7798 fail:
7799 	if (new_stream)
7800 		dc_stream_release(new_stream);
7801 	return ret;
7802 }
7803 
7804 static bool should_reset_plane(struct drm_atomic_state *state,
7805 			       struct drm_plane *plane,
7806 			       struct drm_plane_state *old_plane_state,
7807 			       struct drm_plane_state *new_plane_state)
7808 {
7809 	struct drm_plane *other;
7810 	struct drm_plane_state *old_other_state, *new_other_state;
7811 	struct drm_crtc_state *new_crtc_state;
7812 	int i;
7813 
7814 	/*
7815 	 * TODO: Remove this hack once the checks below are sufficient
7816 	 * enough to determine when we need to reset all the planes on
7817 	 * the stream.
7818 	 */
7819 	if (state->allow_modeset)
7820 		return true;
7821 
7822 	/* Exit early if we know that we're adding or removing the plane. */
7823 	if (old_plane_state->crtc != new_plane_state->crtc)
7824 		return true;
7825 
7826 	/* old crtc == new_crtc == NULL, plane not in context. */
7827 	if (!new_plane_state->crtc)
7828 		return false;
7829 
7830 	new_crtc_state =
7831 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7832 
7833 	if (!new_crtc_state)
7834 		return true;
7835 
7836 	/* CRTC Degamma changes currently require us to recreate planes. */
7837 	if (new_crtc_state->color_mgmt_changed)
7838 		return true;
7839 
7840 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7841 		return true;
7842 
7843 	/*
7844 	 * If there are any new primary or overlay planes being added or
7845 	 * removed then the z-order can potentially change. To ensure
7846 	 * correct z-order and pipe acquisition the current DC architecture
7847 	 * requires us to remove and recreate all existing planes.
7848 	 *
7849 	 * TODO: Come up with a more elegant solution for this.
7850 	 */
7851 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7852 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7853 			continue;
7854 
7855 		if (old_other_state->crtc != new_plane_state->crtc &&
7856 		    new_other_state->crtc != new_plane_state->crtc)
7857 			continue;
7858 
7859 		if (old_other_state->crtc != new_other_state->crtc)
7860 			return true;
7861 
7862 		/* TODO: Remove this once we can handle fast format changes. */
7863 		if (old_other_state->fb && new_other_state->fb &&
7864 		    old_other_state->fb->format != new_other_state->fb->format)
7865 			return true;
7866 	}
7867 
7868 	return false;
7869 }
7870 
7871 static int dm_update_plane_state(struct dc *dc,
7872 				 struct drm_atomic_state *state,
7873 				 struct drm_plane *plane,
7874 				 struct drm_plane_state *old_plane_state,
7875 				 struct drm_plane_state *new_plane_state,
7876 				 bool enable,
7877 				 bool *lock_and_validation_needed)
7878 {
7879 
7880 	struct dm_atomic_state *dm_state = NULL;
7881 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7882 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7883 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7884 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7885 	bool needs_reset;
7886 	int ret = 0;
7887 
7888 
7889 	new_plane_crtc = new_plane_state->crtc;
7890 	old_plane_crtc = old_plane_state->crtc;
7891 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7892 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7893 
7894 	/*TODO Implement atomic check for cursor plane */
7895 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
7896 		return 0;
7897 
7898 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7899 					 new_plane_state);
7900 
7901 	/* Remove any changed/removed planes */
7902 	if (!enable) {
7903 		if (!needs_reset)
7904 			return 0;
7905 
7906 		if (!old_plane_crtc)
7907 			return 0;
7908 
7909 		old_crtc_state = drm_atomic_get_old_crtc_state(
7910 				state, old_plane_crtc);
7911 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7912 
7913 		if (!dm_old_crtc_state->stream)
7914 			return 0;
7915 
7916 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7917 				plane->base.id, old_plane_crtc->base.id);
7918 
7919 		ret = dm_atomic_get_state(state, &dm_state);
7920 		if (ret)
7921 			return ret;
7922 
7923 		if (!dc_remove_plane_from_context(
7924 				dc,
7925 				dm_old_crtc_state->stream,
7926 				dm_old_plane_state->dc_state,
7927 				dm_state->context)) {
7928 
7929 			ret = EINVAL;
7930 			return ret;
7931 		}
7932 
7933 
7934 		dc_plane_state_release(dm_old_plane_state->dc_state);
7935 		dm_new_plane_state->dc_state = NULL;
7936 
7937 		*lock_and_validation_needed = true;
7938 
7939 	} else { /* Add new planes */
7940 		struct dc_plane_state *dc_new_plane_state;
7941 
7942 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7943 			return 0;
7944 
7945 		if (!new_plane_crtc)
7946 			return 0;
7947 
7948 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7949 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7950 
7951 		if (!dm_new_crtc_state->stream)
7952 			return 0;
7953 
7954 		if (!needs_reset)
7955 			return 0;
7956 
7957 		WARN_ON(dm_new_plane_state->dc_state);
7958 
7959 		dc_new_plane_state = dc_create_plane_state(dc);
7960 		if (!dc_new_plane_state)
7961 			return -ENOMEM;
7962 
7963 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7964 				plane->base.id, new_plane_crtc->base.id);
7965 
7966 		ret = fill_dc_plane_attributes(
7967 			new_plane_crtc->dev->dev_private,
7968 			dc_new_plane_state,
7969 			new_plane_state,
7970 			new_crtc_state);
7971 		if (ret) {
7972 			dc_plane_state_release(dc_new_plane_state);
7973 			return ret;
7974 		}
7975 
7976 		ret = dm_atomic_get_state(state, &dm_state);
7977 		if (ret) {
7978 			dc_plane_state_release(dc_new_plane_state);
7979 			return ret;
7980 		}
7981 
7982 		/*
7983 		 * Any atomic check errors that occur after this will
7984 		 * not need a release. The plane state will be attached
7985 		 * to the stream, and therefore part of the atomic
7986 		 * state. It'll be released when the atomic state is
7987 		 * cleaned.
7988 		 */
7989 		if (!dc_add_plane_to_context(
7990 				dc,
7991 				dm_new_crtc_state->stream,
7992 				dc_new_plane_state,
7993 				dm_state->context)) {
7994 
7995 			dc_plane_state_release(dc_new_plane_state);
7996 			return -EINVAL;
7997 		}
7998 
7999 		dm_new_plane_state->dc_state = dc_new_plane_state;
8000 
8001 		/* Tell DC to do a full surface update every time there
8002 		 * is a plane change. Inefficient, but works for now.
8003 		 */
8004 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8005 
8006 		*lock_and_validation_needed = true;
8007 	}
8008 
8009 
8010 	return ret;
8011 }
8012 
8013 static int
8014 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8015 				    struct drm_atomic_state *state,
8016 				    enum surface_update_type *out_type)
8017 {
8018 	struct dc *dc = dm->dc;
8019 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8020 	int i, j, num_plane, ret = 0;
8021 	struct drm_plane_state *old_plane_state, *new_plane_state;
8022 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8023 	struct drm_crtc *new_plane_crtc;
8024 	struct drm_plane *plane;
8025 
8026 	struct drm_crtc *crtc;
8027 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8028 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8029 	struct dc_stream_status *status = NULL;
8030 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8031 	struct surface_info_bundle {
8032 		struct dc_surface_update surface_updates[MAX_SURFACES];
8033 		struct dc_plane_info plane_infos[MAX_SURFACES];
8034 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8035 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8036 		struct dc_stream_update stream_update;
8037 	} *bundle;
8038 
8039 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8040 
8041 	if (!bundle) {
8042 		DRM_ERROR("Failed to allocate update bundle\n");
8043 		/* Set type to FULL to avoid crashing in DC*/
8044 		update_type = UPDATE_TYPE_FULL;
8045 		goto cleanup;
8046 	}
8047 
8048 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8049 
8050 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8051 
8052 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8053 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8054 		num_plane = 0;
8055 
8056 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8057 			update_type = UPDATE_TYPE_FULL;
8058 			goto cleanup;
8059 		}
8060 
8061 		if (!new_dm_crtc_state->stream)
8062 			continue;
8063 
8064 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8065 			const struct amdgpu_framebuffer *amdgpu_fb =
8066 				to_amdgpu_framebuffer(new_plane_state->fb);
8067 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8068 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8069 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8070 			uint64_t tiling_flags;
8071 
8072 			new_plane_crtc = new_plane_state->crtc;
8073 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8074 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8075 
8076 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8077 				continue;
8078 
8079 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8080 				update_type = UPDATE_TYPE_FULL;
8081 				goto cleanup;
8082 			}
8083 
8084 			if (crtc != new_plane_crtc)
8085 				continue;
8086 
8087 			bundle->surface_updates[num_plane].surface =
8088 					new_dm_plane_state->dc_state;
8089 
8090 			if (new_crtc_state->mode_changed) {
8091 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8092 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8093 			}
8094 
8095 			if (new_crtc_state->color_mgmt_changed) {
8096 				bundle->surface_updates[num_plane].gamma =
8097 						new_dm_plane_state->dc_state->gamma_correction;
8098 				bundle->surface_updates[num_plane].in_transfer_func =
8099 						new_dm_plane_state->dc_state->in_transfer_func;
8100 				bundle->stream_update.gamut_remap =
8101 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8102 				bundle->stream_update.output_csc_transform =
8103 						&new_dm_crtc_state->stream->csc_color_matrix;
8104 				bundle->stream_update.out_transfer_func =
8105 						new_dm_crtc_state->stream->out_transfer_func;
8106 			}
8107 
8108 			ret = fill_dc_scaling_info(new_plane_state,
8109 						   scaling_info);
8110 			if (ret)
8111 				goto cleanup;
8112 
8113 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8114 
8115 			if (amdgpu_fb) {
8116 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8117 				if (ret)
8118 					goto cleanup;
8119 
8120 				ret = fill_dc_plane_info_and_addr(
8121 					dm->adev, new_plane_state, tiling_flags,
8122 					plane_info,
8123 					&flip_addr->address,
8124 					false);
8125 				if (ret)
8126 					goto cleanup;
8127 
8128 				bundle->surface_updates[num_plane].plane_info = plane_info;
8129 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8130 			}
8131 
8132 			num_plane++;
8133 		}
8134 
8135 		if (num_plane == 0)
8136 			continue;
8137 
8138 		ret = dm_atomic_get_state(state, &dm_state);
8139 		if (ret)
8140 			goto cleanup;
8141 
8142 		old_dm_state = dm_atomic_get_old_state(state);
8143 		if (!old_dm_state) {
8144 			ret = -EINVAL;
8145 			goto cleanup;
8146 		}
8147 
8148 		status = dc_stream_get_status_from_state(old_dm_state->context,
8149 							 new_dm_crtc_state->stream);
8150 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8151 		/*
8152 		 * TODO: DC modifies the surface during this call so we need
8153 		 * to lock here - find a way to do this without locking.
8154 		 */
8155 		mutex_lock(&dm->dc_lock);
8156 		update_type = dc_check_update_surfaces_for_stream(
8157 				dc,	bundle->surface_updates, num_plane,
8158 				&bundle->stream_update, status);
8159 		mutex_unlock(&dm->dc_lock);
8160 
8161 		if (update_type > UPDATE_TYPE_MED) {
8162 			update_type = UPDATE_TYPE_FULL;
8163 			goto cleanup;
8164 		}
8165 	}
8166 
8167 cleanup:
8168 	kfree(bundle);
8169 
8170 	*out_type = update_type;
8171 	return ret;
8172 }
8173 
8174 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8175 {
8176 	struct drm_connector *connector;
8177 	struct drm_connector_state *conn_state;
8178 	struct amdgpu_dm_connector *aconnector = NULL;
8179 	int i;
8180 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8181 		if (conn_state->crtc != crtc)
8182 			continue;
8183 
8184 		aconnector = to_amdgpu_dm_connector(connector);
8185 		if (!aconnector->port || !aconnector->mst_port)
8186 			aconnector = NULL;
8187 		else
8188 			break;
8189 	}
8190 
8191 	if (!aconnector)
8192 		return 0;
8193 
8194 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8195 }
8196 
8197 /**
8198  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8199  * @dev: The DRM device
8200  * @state: The atomic state to commit
8201  *
8202  * Validate that the given atomic state is programmable by DC into hardware.
8203  * This involves constructing a &struct dc_state reflecting the new hardware
8204  * state we wish to commit, then querying DC to see if it is programmable. It's
8205  * important not to modify the existing DC state. Otherwise, atomic_check
8206  * may unexpectedly commit hardware changes.
8207  *
8208  * When validating the DC state, it's important that the right locks are
8209  * acquired. For full updates case which removes/adds/updates streams on one
8210  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8211  * that any such full update commit will wait for completion of any outstanding
8212  * flip using DRMs synchronization events. See
8213  * dm_determine_update_type_for_commit()
8214  *
8215  * Note that DM adds the affected connectors for all CRTCs in state, when that
8216  * might not seem necessary. This is because DC stream creation requires the
8217  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8218  * be possible but non-trivial - a possible TODO item.
8219  *
8220  * Return: -Error code if validation failed.
8221  */
8222 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8223 				  struct drm_atomic_state *state)
8224 {
8225 	struct amdgpu_device *adev = dev->dev_private;
8226 	struct dm_atomic_state *dm_state = NULL;
8227 	struct dc *dc = adev->dm.dc;
8228 	struct drm_connector *connector;
8229 	struct drm_connector_state *old_con_state, *new_con_state;
8230 	struct drm_crtc *crtc;
8231 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8232 	struct drm_plane *plane;
8233 	struct drm_plane_state *old_plane_state, *new_plane_state;
8234 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8235 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8236 
8237 	int ret, i;
8238 
8239 	/*
8240 	 * This bool will be set for true for any modeset/reset
8241 	 * or plane update which implies non fast surface update.
8242 	 */
8243 	bool lock_and_validation_needed = false;
8244 
8245 	ret = drm_atomic_helper_check_modeset(dev, state);
8246 	if (ret)
8247 		goto fail;
8248 
8249 	if (adev->asic_type >= CHIP_NAVI10) {
8250 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8251 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8252 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8253 				if (ret)
8254 					goto fail;
8255 			}
8256 		}
8257 	}
8258 
8259 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8260 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8261 		    !new_crtc_state->color_mgmt_changed &&
8262 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8263 			continue;
8264 
8265 		if (!new_crtc_state->enable)
8266 			continue;
8267 
8268 		ret = drm_atomic_add_affected_connectors(state, crtc);
8269 		if (ret)
8270 			return ret;
8271 
8272 		ret = drm_atomic_add_affected_planes(state, crtc);
8273 		if (ret)
8274 			goto fail;
8275 	}
8276 
8277 	/*
8278 	 * Add all primary and overlay planes on the CRTC to the state
8279 	 * whenever a plane is enabled to maintain correct z-ordering
8280 	 * and to enable fast surface updates.
8281 	 */
8282 	drm_for_each_crtc(crtc, dev) {
8283 		bool modified = false;
8284 
8285 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8286 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8287 				continue;
8288 
8289 			if (new_plane_state->crtc == crtc ||
8290 			    old_plane_state->crtc == crtc) {
8291 				modified = true;
8292 				break;
8293 			}
8294 		}
8295 
8296 		if (!modified)
8297 			continue;
8298 
8299 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8300 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8301 				continue;
8302 
8303 			new_plane_state =
8304 				drm_atomic_get_plane_state(state, plane);
8305 
8306 			if (IS_ERR(new_plane_state)) {
8307 				ret = PTR_ERR(new_plane_state);
8308 				goto fail;
8309 			}
8310 		}
8311 	}
8312 
8313 	/* Remove exiting planes if they are modified */
8314 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8315 		ret = dm_update_plane_state(dc, state, plane,
8316 					    old_plane_state,
8317 					    new_plane_state,
8318 					    false,
8319 					    &lock_and_validation_needed);
8320 		if (ret)
8321 			goto fail;
8322 	}
8323 
8324 	/* Disable all crtcs which require disable */
8325 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8326 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8327 					   old_crtc_state,
8328 					   new_crtc_state,
8329 					   false,
8330 					   &lock_and_validation_needed);
8331 		if (ret)
8332 			goto fail;
8333 	}
8334 
8335 	/* Enable all crtcs which require enable */
8336 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8337 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8338 					   old_crtc_state,
8339 					   new_crtc_state,
8340 					   true,
8341 					   &lock_and_validation_needed);
8342 		if (ret)
8343 			goto fail;
8344 	}
8345 
8346 	/* Add new/modified planes */
8347 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8348 		ret = dm_update_plane_state(dc, state, plane,
8349 					    old_plane_state,
8350 					    new_plane_state,
8351 					    true,
8352 					    &lock_and_validation_needed);
8353 		if (ret)
8354 			goto fail;
8355 	}
8356 
8357 	/* Run this here since we want to validate the streams we created */
8358 	ret = drm_atomic_helper_check_planes(dev, state);
8359 	if (ret)
8360 		goto fail;
8361 
8362 	if (state->legacy_cursor_update) {
8363 		/*
8364 		 * This is a fast cursor update coming from the plane update
8365 		 * helper, check if it can be done asynchronously for better
8366 		 * performance.
8367 		 */
8368 		state->async_update =
8369 			!drm_atomic_helper_async_check(dev, state);
8370 
8371 		/*
8372 		 * Skip the remaining global validation if this is an async
8373 		 * update. Cursor updates can be done without affecting
8374 		 * state or bandwidth calcs and this avoids the performance
8375 		 * penalty of locking the private state object and
8376 		 * allocating a new dc_state.
8377 		 */
8378 		if (state->async_update)
8379 			return 0;
8380 	}
8381 
8382 	/* Check scaling and underscan changes*/
8383 	/* TODO Removed scaling changes validation due to inability to commit
8384 	 * new stream into context w\o causing full reset. Need to
8385 	 * decide how to handle.
8386 	 */
8387 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8388 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8389 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8390 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8391 
8392 		/* Skip any modesets/resets */
8393 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8394 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8395 			continue;
8396 
8397 		/* Skip any thing not scale or underscan changes */
8398 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8399 			continue;
8400 
8401 		overall_update_type = UPDATE_TYPE_FULL;
8402 		lock_and_validation_needed = true;
8403 	}
8404 
8405 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8406 	if (ret)
8407 		goto fail;
8408 
8409 	if (overall_update_type < update_type)
8410 		overall_update_type = update_type;
8411 
8412 	/*
8413 	 * lock_and_validation_needed was an old way to determine if we need to set
8414 	 * the global lock. Leaving it in to check if we broke any corner cases
8415 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8416 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8417 	 */
8418 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8419 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8420 
8421 	if (overall_update_type > UPDATE_TYPE_FAST) {
8422 		ret = dm_atomic_get_state(state, &dm_state);
8423 		if (ret)
8424 			goto fail;
8425 
8426 		ret = do_aquire_global_lock(dev, state);
8427 		if (ret)
8428 			goto fail;
8429 
8430 #if defined(CONFIG_DRM_AMD_DC_DCN)
8431 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8432 			goto fail;
8433 
8434 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8435 		if (ret)
8436 			goto fail;
8437 #endif
8438 
8439 		/*
8440 		 * Perform validation of MST topology in the state:
8441 		 * We need to perform MST atomic check before calling
8442 		 * dc_validate_global_state(), or there is a chance
8443 		 * to get stuck in an infinite loop and hang eventually.
8444 		 */
8445 		ret = drm_dp_mst_atomic_check(state);
8446 		if (ret)
8447 			goto fail;
8448 
8449 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8450 			ret = -EINVAL;
8451 			goto fail;
8452 		}
8453 	} else {
8454 		/*
8455 		 * The commit is a fast update. Fast updates shouldn't change
8456 		 * the DC context, affect global validation, and can have their
8457 		 * commit work done in parallel with other commits not touching
8458 		 * the same resource. If we have a new DC context as part of
8459 		 * the DM atomic state from validation we need to free it and
8460 		 * retain the existing one instead.
8461 		 */
8462 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8463 
8464 		new_dm_state = dm_atomic_get_new_state(state);
8465 		old_dm_state = dm_atomic_get_old_state(state);
8466 
8467 		if (new_dm_state && old_dm_state) {
8468 			if (new_dm_state->context)
8469 				dc_release_state(new_dm_state->context);
8470 
8471 			new_dm_state->context = old_dm_state->context;
8472 
8473 			if (old_dm_state->context)
8474 				dc_retain_state(old_dm_state->context);
8475 		}
8476 	}
8477 
8478 	/* Store the overall update type for use later in atomic check. */
8479 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8480 		struct dm_crtc_state *dm_new_crtc_state =
8481 			to_dm_crtc_state(new_crtc_state);
8482 
8483 		dm_new_crtc_state->update_type = (int)overall_update_type;
8484 	}
8485 
8486 	/* Must be success */
8487 	WARN_ON(ret);
8488 	return ret;
8489 
8490 fail:
8491 	if (ret == -EDEADLK)
8492 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8493 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8494 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8495 	else
8496 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8497 
8498 	return ret;
8499 }
8500 
8501 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8502 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8503 {
8504 	uint8_t dpcd_data;
8505 	bool capable = false;
8506 
8507 	if (amdgpu_dm_connector->dc_link &&
8508 		dm_helpers_dp_read_dpcd(
8509 				NULL,
8510 				amdgpu_dm_connector->dc_link,
8511 				DP_DOWN_STREAM_PORT_COUNT,
8512 				&dpcd_data,
8513 				sizeof(dpcd_data))) {
8514 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8515 	}
8516 
8517 	return capable;
8518 }
8519 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8520 					struct edid *edid)
8521 {
8522 	int i;
8523 	bool edid_check_required;
8524 	struct detailed_timing *timing;
8525 	struct detailed_non_pixel *data;
8526 	struct detailed_data_monitor_range *range;
8527 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8528 			to_amdgpu_dm_connector(connector);
8529 	struct dm_connector_state *dm_con_state = NULL;
8530 
8531 	struct drm_device *dev = connector->dev;
8532 	struct amdgpu_device *adev = dev->dev_private;
8533 	bool freesync_capable = false;
8534 
8535 	if (!connector->state) {
8536 		DRM_ERROR("%s - Connector has no state", __func__);
8537 		goto update;
8538 	}
8539 
8540 	if (!edid) {
8541 		dm_con_state = to_dm_connector_state(connector->state);
8542 
8543 		amdgpu_dm_connector->min_vfreq = 0;
8544 		amdgpu_dm_connector->max_vfreq = 0;
8545 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8546 
8547 		goto update;
8548 	}
8549 
8550 	dm_con_state = to_dm_connector_state(connector->state);
8551 
8552 	edid_check_required = false;
8553 	if (!amdgpu_dm_connector->dc_sink) {
8554 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8555 		goto update;
8556 	}
8557 	if (!adev->dm.freesync_module)
8558 		goto update;
8559 	/*
8560 	 * if edid non zero restrict freesync only for dp and edp
8561 	 */
8562 	if (edid) {
8563 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8564 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8565 			edid_check_required = is_dp_capable_without_timing_msa(
8566 						adev->dm.dc,
8567 						amdgpu_dm_connector);
8568 		}
8569 	}
8570 	if (edid_check_required == true && (edid->version > 1 ||
8571 	   (edid->version == 1 && edid->revision > 1))) {
8572 		for (i = 0; i < 4; i++) {
8573 
8574 			timing	= &edid->detailed_timings[i];
8575 			data	= &timing->data.other_data;
8576 			range	= &data->data.range;
8577 			/*
8578 			 * Check if monitor has continuous frequency mode
8579 			 */
8580 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8581 				continue;
8582 			/*
8583 			 * Check for flag range limits only. If flag == 1 then
8584 			 * no additional timing information provided.
8585 			 * Default GTF, GTF Secondary curve and CVT are not
8586 			 * supported
8587 			 */
8588 			if (range->flags != 1)
8589 				continue;
8590 
8591 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8592 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8593 			amdgpu_dm_connector->pixel_clock_mhz =
8594 				range->pixel_clock_mhz * 10;
8595 			break;
8596 		}
8597 
8598 		if (amdgpu_dm_connector->max_vfreq -
8599 		    amdgpu_dm_connector->min_vfreq > 10) {
8600 
8601 			freesync_capable = true;
8602 		}
8603 	}
8604 
8605 update:
8606 	if (dm_con_state)
8607 		dm_con_state->freesync_capable = freesync_capable;
8608 
8609 	if (connector->vrr_capable_property)
8610 		drm_connector_set_vrr_capable_property(connector,
8611 						       freesync_capable);
8612 }
8613 
8614 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8615 {
8616 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8617 
8618 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8619 		return;
8620 	if (link->type == dc_connection_none)
8621 		return;
8622 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8623 					dpcd_data, sizeof(dpcd_data))) {
8624 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8625 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8626 	}
8627 }
8628 
8629 /*
8630  * amdgpu_dm_link_setup_psr() - configure psr link
8631  * @stream: stream state
8632  *
8633  * Return: true if success
8634  */
8635 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8636 {
8637 	struct dc_link *link = NULL;
8638 	struct psr_config psr_config = {0};
8639 	struct psr_context psr_context = {0};
8640 	struct dc *dc = NULL;
8641 	bool ret = false;
8642 
8643 	if (stream == NULL)
8644 		return false;
8645 
8646 	link = stream->link;
8647 	dc = link->ctx->dc;
8648 
8649 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8650 
8651 	if (psr_config.psr_version > 0) {
8652 		psr_config.psr_exit_link_training_required = 0x1;
8653 		psr_config.psr_frame_capture_indication_req = 0;
8654 		psr_config.psr_rfb_setup_time = 0x37;
8655 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8656 		psr_config.allow_smu_optimizations = 0x0;
8657 
8658 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8659 
8660 	}
8661 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8662 
8663 	return ret;
8664 }
8665 
8666 /*
8667  * amdgpu_dm_psr_enable() - enable psr f/w
8668  * @stream: stream state
8669  *
8670  * Return: true if success
8671  */
8672 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8673 {
8674 	struct dc_link *link = stream->link;
8675 	unsigned int vsync_rate_hz = 0;
8676 	struct dc_static_screen_params params = {0};
8677 	/* Calculate number of static frames before generating interrupt to
8678 	 * enter PSR.
8679 	 */
8680 	// Init fail safe of 2 frames static
8681 	unsigned int num_frames_static = 2;
8682 
8683 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8684 
8685 	vsync_rate_hz = div64_u64(div64_u64((
8686 			stream->timing.pix_clk_100hz * 100),
8687 			stream->timing.v_total),
8688 			stream->timing.h_total);
8689 
8690 	/* Round up
8691 	 * Calculate number of frames such that at least 30 ms of time has
8692 	 * passed.
8693 	 */
8694 	if (vsync_rate_hz != 0) {
8695 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8696 		num_frames_static = (30000 / frame_time_microsec) + 1;
8697 	}
8698 
8699 	params.triggers.cursor_update = true;
8700 	params.triggers.overlay_update = true;
8701 	params.triggers.surface_update = true;
8702 	params.num_frames = num_frames_static;
8703 
8704 	dc_stream_set_static_screen_params(link->ctx->dc,
8705 					   &stream, 1,
8706 					   &params);
8707 
8708 	return dc_link_set_psr_allow_active(link, true, false);
8709 }
8710 
8711 /*
8712  * amdgpu_dm_psr_disable() - disable psr f/w
8713  * @stream:  stream state
8714  *
8715  * Return: true if success
8716  */
8717 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8718 {
8719 
8720 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8721 
8722 	return dc_link_set_psr_allow_active(stream->link, false, true);
8723 }
8724