1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 
459 	if (acrtc) {
460 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
461 
462 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463 			      acrtc->crtc_id,
464 			      amdgpu_dm_vrr_active(acrtc_state));
465 
466 		/* Core vblank handling at start of front-porch is only possible
467 		 * in non-vrr mode, as only there vblank timestamping will give
468 		 * valid results while done in front-porch. Otherwise defer it
469 		 * to dm_vupdate_high_irq after end of front-porch.
470 		 */
471 		if (!amdgpu_dm_vrr_active(acrtc_state))
472 			drm_crtc_handle_vblank(&acrtc->base);
473 
474 		/* Following stuff must happen at start of vblank, for crc
475 		 * computation and below-the-range btr support in vrr mode.
476 		 */
477 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478 
479 		if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 		    acrtc_state->vrr_params.supported &&
481 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 			spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 			mod_freesync_handle_v_update(
484 				adev->dm.freesync_module,
485 				acrtc_state->stream,
486 				&acrtc_state->vrr_params);
487 
488 			dc_stream_adjust_vmin_vmax(
489 				adev->dm.dc,
490 				acrtc_state->stream,
491 				&acrtc_state->vrr_params.adjust);
492 			spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493 		}
494 	}
495 }
496 
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514 	struct common_irq_params *irq_params = interrupt_params;
515 	struct amdgpu_device *adev = irq_params->adev;
516 	struct amdgpu_crtc *acrtc;
517 	struct dm_crtc_state *acrtc_state;
518 	unsigned long flags;
519 
520 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521 
522 	if (!acrtc)
523 		return;
524 
525 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
526 
527 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 			 amdgpu_dm_vrr_active(acrtc_state),
529 			 acrtc_state->active_planes);
530 
531 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 	drm_crtc_handle_vblank(&acrtc->base);
533 
534 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
535 
536 	if (acrtc_state->vrr_params.supported &&
537 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 		mod_freesync_handle_v_update(
539 		adev->dm.freesync_module,
540 		acrtc_state->stream,
541 		&acrtc_state->vrr_params);
542 
543 		dc_stream_adjust_vmin_vmax(
544 			adev->dm.dc,
545 			acrtc_state->stream,
546 			&acrtc_state->vrr_params.adjust);
547 	}
548 
549 	/*
550 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 	 * In that case, pageflip completion interrupts won't fire and pageflip
552 	 * completion events won't get delivered. Prevent this by sending
553 	 * pending pageflip events from here if a flip is still pending.
554 	 *
555 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 	 * avoid race conditions between flip programming and completion,
557 	 * which could cause too early flip completion events.
558 	 */
559 	if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 	    acrtc_state->active_planes == 0) {
561 		if (acrtc->event) {
562 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563 			acrtc->event = NULL;
564 			drm_crtc_vblank_put(&acrtc->base);
565 		}
566 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 	}
568 
569 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572 
573 static int dm_set_clockgating_state(void *handle,
574 		  enum amd_clockgating_state state)
575 {
576 	return 0;
577 }
578 
579 static int dm_set_powergating_state(void *handle,
580 		  enum amd_powergating_state state)
581 {
582 	return 0;
583 }
584 
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587 
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591 	struct drm_device *dev = connector->dev;
592 	struct amdgpu_device *adev = dev->dev_private;
593 	struct dm_comressor_info *compressor = &adev->dm.compressor;
594 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 	struct drm_display_mode *mode;
596 	unsigned long max_size = 0;
597 
598 	if (adev->dm.dc->fbc_compressor == NULL)
599 		return;
600 
601 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602 		return;
603 
604 	if (compressor->bo_ptr)
605 		return;
606 
607 
608 	list_for_each_entry(mode, &connector->modes, head) {
609 		if (max_size < mode->htotal * mode->vtotal)
610 			max_size = mode->htotal * mode->vtotal;
611 	}
612 
613 	if (max_size) {
614 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 			    &compressor->gpu_addr, &compressor->cpu_addr);
617 
618 		if (r)
619 			DRM_ERROR("DM: Failed to initialize FBC\n");
620 		else {
621 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623 		}
624 
625 	}
626 
627 }
628 
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 					  int pipe, bool *enabled,
631 					  unsigned char *buf, int max_bytes)
632 {
633 	struct drm_device *dev = dev_get_drvdata(kdev);
634 	struct amdgpu_device *adev = dev->dev_private;
635 	struct drm_connector *connector;
636 	struct drm_connector_list_iter conn_iter;
637 	struct amdgpu_dm_connector *aconnector;
638 	int ret = 0;
639 
640 	*enabled = false;
641 
642 	mutex_lock(&adev->dm.audio_lock);
643 
644 	drm_connector_list_iter_begin(dev, &conn_iter);
645 	drm_for_each_connector_iter(connector, &conn_iter) {
646 		aconnector = to_amdgpu_dm_connector(connector);
647 		if (aconnector->audio_inst != port)
648 			continue;
649 
650 		*enabled = true;
651 		ret = drm_eld_size(connector->eld);
652 		memcpy(buf, connector->eld, min(max_bytes, ret));
653 
654 		break;
655 	}
656 	drm_connector_list_iter_end(&conn_iter);
657 
658 	mutex_unlock(&adev->dm.audio_lock);
659 
660 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 
662 	return ret;
663 }
664 
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 	.get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668 
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 				       struct device *hda_kdev, void *data)
671 {
672 	struct drm_device *dev = dev_get_drvdata(kdev);
673 	struct amdgpu_device *adev = dev->dev_private;
674 	struct drm_audio_component *acomp = data;
675 
676 	acomp->ops = &amdgpu_dm_audio_component_ops;
677 	acomp->dev = kdev;
678 	adev->dm.audio_component = acomp;
679 
680 	return 0;
681 }
682 
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 					  struct device *hda_kdev, void *data)
685 {
686 	struct drm_device *dev = dev_get_drvdata(kdev);
687 	struct amdgpu_device *adev = dev->dev_private;
688 	struct drm_audio_component *acomp = data;
689 
690 	acomp->ops = NULL;
691 	acomp->dev = NULL;
692 	adev->dm.audio_component = NULL;
693 }
694 
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 	.bind	= amdgpu_dm_audio_component_bind,
697 	.unbind	= amdgpu_dm_audio_component_unbind,
698 };
699 
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702 	int i, ret;
703 
704 	if (!amdgpu_audio)
705 		return 0;
706 
707 	adev->mode_info.audio.enabled = true;
708 
709 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710 
711 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 		adev->mode_info.audio.pin[i].channels = -1;
713 		adev->mode_info.audio.pin[i].rate = -1;
714 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 		adev->mode_info.audio.pin[i].status_bits = 0;
716 		adev->mode_info.audio.pin[i].category_code = 0;
717 		adev->mode_info.audio.pin[i].connected = false;
718 		adev->mode_info.audio.pin[i].id =
719 			adev->dm.dc->res_pool->audios[i]->inst;
720 		adev->mode_info.audio.pin[i].offset = 0;
721 	}
722 
723 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 	if (ret < 0)
725 		return ret;
726 
727 	adev->dm.audio_registered = true;
728 
729 	return 0;
730 }
731 
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734 	if (!amdgpu_audio)
735 		return;
736 
737 	if (!adev->mode_info.audio.enabled)
738 		return;
739 
740 	if (adev->dm.audio_registered) {
741 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 		adev->dm.audio_registered = false;
743 	}
744 
745 	/* TODO: Disable audio? */
746 
747 	adev->mode_info.audio.enabled = false;
748 }
749 
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752 	struct drm_audio_component *acomp = adev->dm.audio_component;
753 
754 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756 
757 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 						 pin, -1);
759 	}
760 }
761 
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764 	const struct dmcub_firmware_header_v1_0 *hdr;
765 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 	struct abm *abm = adev->dm.dc->res_pool->abm;
770 	struct dmub_srv_hw_params hw_params;
771 	enum dmub_status status;
772 	const unsigned char *fw_inst_const, *fw_bss_data;
773 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 	bool has_hw_support;
775 
776 	if (!dmub_srv)
777 		/* DMUB isn't supported on the ASIC. */
778 		return 0;
779 
780 	if (!fb_info) {
781 		DRM_ERROR("No framebuffer info for DMUB service.\n");
782 		return -EINVAL;
783 	}
784 
785 	if (!dmub_fw) {
786 		/* Firmware required for DMUB support. */
787 		DRM_ERROR("No firmware provided for DMUB.\n");
788 		return -EINVAL;
789 	}
790 
791 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 	if (status != DMUB_STATUS_OK) {
793 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 		return -EINVAL;
795 	}
796 
797 	if (!has_hw_support) {
798 		DRM_INFO("DMUB unsupported on ASIC\n");
799 		return 0;
800 	}
801 
802 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803 
804 	fw_inst_const = dmub_fw->data +
805 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 			PSP_HEADER_BYTES;
807 
808 	fw_bss_data = dmub_fw->data +
809 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 		      le32_to_cpu(hdr->inst_const_bytes);
811 
812 	/* Copy firmware and bios info into FB memory. */
813 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815 
816 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817 
818 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 	 * amdgpu_ucode_init_single_fw will load dmub firmware
820 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 	 * will be done by dm_dmub_hw_init
822 	 */
823 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 				fw_inst_const_size);
826 	}
827 
828 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
829 	       fw_bss_data_size);
830 
831 	/* Copy firmware bios info into FB memory. */
832 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833 	       adev->bios_size);
834 
835 	/* Reset regions that need to be reset. */
836 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
837 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
841 
842 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
843 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
844 
845 	/* Initialize hardware. */
846 	memset(&hw_params, 0, sizeof(hw_params));
847 	hw_params.fb_base = adev->gmc.fb_start;
848 	hw_params.fb_offset = adev->gmc.aper_base;
849 
850 	/* backdoor load firmware and trigger dmub running */
851 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
852 		hw_params.load_inst_const = true;
853 
854 	if (dmcu)
855 		hw_params.psp_version = dmcu->psp_version;
856 
857 	for (i = 0; i < fb_info->num_fb; ++i)
858 		hw_params.fb[i] = &fb_info->fb[i];
859 
860 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
861 	if (status != DMUB_STATUS_OK) {
862 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 		return -EINVAL;
864 	}
865 
866 	/* Wait for firmware load to finish. */
867 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
868 	if (status != DMUB_STATUS_OK)
869 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
870 
871 	/* Init DMCU and ABM if available. */
872 	if (dmcu && abm) {
873 		dmcu->funcs->dmcu_init(dmcu);
874 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875 	}
876 
877 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
878 	if (!adev->dm.dc->ctx->dmub_srv) {
879 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 		return -ENOMEM;
881 	}
882 
883 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
884 		 adev->dm.dmcub_fw_version);
885 
886 	return 0;
887 }
888 
889 static int amdgpu_dm_init(struct amdgpu_device *adev)
890 {
891 	struct dc_init_data init_data;
892 #ifdef CONFIG_DRM_AMD_DC_HDCP
893 	struct dc_callback_init init_params;
894 #endif
895 	int r;
896 
897 	adev->dm.ddev = adev->ddev;
898 	adev->dm.adev = adev;
899 
900 	/* Zero all the fields */
901 	memset(&init_data, 0, sizeof(init_data));
902 #ifdef CONFIG_DRM_AMD_DC_HDCP
903 	memset(&init_params, 0, sizeof(init_params));
904 #endif
905 
906 	mutex_init(&adev->dm.dc_lock);
907 	mutex_init(&adev->dm.audio_lock);
908 
909 	if(amdgpu_dm_irq_init(adev)) {
910 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
911 		goto error;
912 	}
913 
914 	init_data.asic_id.chip_family = adev->family;
915 
916 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
917 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
918 
919 	init_data.asic_id.vram_width = adev->gmc.vram_width;
920 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
921 	init_data.asic_id.atombios_base_address =
922 		adev->mode_info.atom_context->bios;
923 
924 	init_data.driver = adev;
925 
926 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
927 
928 	if (!adev->dm.cgs_device) {
929 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
930 		goto error;
931 	}
932 
933 	init_data.cgs_device = adev->dm.cgs_device;
934 
935 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
936 
937 	switch (adev->asic_type) {
938 	case CHIP_CARRIZO:
939 	case CHIP_STONEY:
940 	case CHIP_RAVEN:
941 	case CHIP_RENOIR:
942 		init_data.flags.gpu_vm_support = true;
943 		break;
944 	default:
945 		break;
946 	}
947 
948 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
949 		init_data.flags.fbc_support = true;
950 
951 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
952 		init_data.flags.multi_mon_pp_mclk_switch = true;
953 
954 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
955 		init_data.flags.disable_fractional_pwm = true;
956 
957 	init_data.flags.power_down_display_on_boot = true;
958 
959 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
960 
961 	/* Display Core create. */
962 	adev->dm.dc = dc_create(&init_data);
963 
964 	if (adev->dm.dc) {
965 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
966 	} else {
967 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
968 		goto error;
969 	}
970 
971 	r = dm_dmub_hw_init(adev);
972 	if (r) {
973 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
974 		goto error;
975 	}
976 
977 	dc_hardware_init(adev->dm.dc);
978 
979 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
980 	if (!adev->dm.freesync_module) {
981 		DRM_ERROR(
982 		"amdgpu: failed to initialize freesync_module.\n");
983 	} else
984 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
985 				adev->dm.freesync_module);
986 
987 	amdgpu_dm_init_color_mod();
988 
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990 	if (adev->asic_type >= CHIP_RAVEN) {
991 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
992 
993 		if (!adev->dm.hdcp_workqueue)
994 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
995 		else
996 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
997 
998 		dc_init_callbacks(adev->dm.dc, &init_params);
999 	}
1000 #endif
1001 	if (amdgpu_dm_initialize_drm_device(adev)) {
1002 		DRM_ERROR(
1003 		"amdgpu: failed to initialize sw for display support.\n");
1004 		goto error;
1005 	}
1006 
1007 	/* Update the actual used number of crtc */
1008 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1009 
1010 	/* TODO: Add_display_info? */
1011 
1012 	/* TODO use dynamic cursor width */
1013 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1014 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1015 
1016 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1017 		DRM_ERROR(
1018 		"amdgpu: failed to initialize sw for display support.\n");
1019 		goto error;
1020 	}
1021 
1022 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1023 
1024 	return 0;
1025 error:
1026 	amdgpu_dm_fini(adev);
1027 
1028 	return -EINVAL;
1029 }
1030 
1031 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1032 {
1033 	amdgpu_dm_audio_fini(adev);
1034 
1035 	amdgpu_dm_destroy_drm_device(&adev->dm);
1036 
1037 #ifdef CONFIG_DRM_AMD_DC_HDCP
1038 	if (adev->dm.hdcp_workqueue) {
1039 		hdcp_destroy(adev->dm.hdcp_workqueue);
1040 		adev->dm.hdcp_workqueue = NULL;
1041 	}
1042 
1043 	if (adev->dm.dc)
1044 		dc_deinit_callbacks(adev->dm.dc);
1045 #endif
1046 	if (adev->dm.dc->ctx->dmub_srv) {
1047 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1048 		adev->dm.dc->ctx->dmub_srv = NULL;
1049 	}
1050 
1051 	if (adev->dm.dmub_bo)
1052 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1053 				      &adev->dm.dmub_bo_gpu_addr,
1054 				      &adev->dm.dmub_bo_cpu_addr);
1055 
1056 	/* DC Destroy TODO: Replace destroy DAL */
1057 	if (adev->dm.dc)
1058 		dc_destroy(&adev->dm.dc);
1059 	/*
1060 	 * TODO: pageflip, vlank interrupt
1061 	 *
1062 	 * amdgpu_dm_irq_fini(adev);
1063 	 */
1064 
1065 	if (adev->dm.cgs_device) {
1066 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1067 		adev->dm.cgs_device = NULL;
1068 	}
1069 	if (adev->dm.freesync_module) {
1070 		mod_freesync_destroy(adev->dm.freesync_module);
1071 		adev->dm.freesync_module = NULL;
1072 	}
1073 
1074 	mutex_destroy(&adev->dm.audio_lock);
1075 	mutex_destroy(&adev->dm.dc_lock);
1076 
1077 	return;
1078 }
1079 
1080 static int load_dmcu_fw(struct amdgpu_device *adev)
1081 {
1082 	const char *fw_name_dmcu = NULL;
1083 	int r;
1084 	const struct dmcu_firmware_header_v1_0 *hdr;
1085 
1086 	switch(adev->asic_type) {
1087 	case CHIP_BONAIRE:
1088 	case CHIP_HAWAII:
1089 	case CHIP_KAVERI:
1090 	case CHIP_KABINI:
1091 	case CHIP_MULLINS:
1092 	case CHIP_TONGA:
1093 	case CHIP_FIJI:
1094 	case CHIP_CARRIZO:
1095 	case CHIP_STONEY:
1096 	case CHIP_POLARIS11:
1097 	case CHIP_POLARIS10:
1098 	case CHIP_POLARIS12:
1099 	case CHIP_VEGAM:
1100 	case CHIP_VEGA10:
1101 	case CHIP_VEGA12:
1102 	case CHIP_VEGA20:
1103 	case CHIP_NAVI10:
1104 	case CHIP_NAVI14:
1105 	case CHIP_RENOIR:
1106 		return 0;
1107 	case CHIP_NAVI12:
1108 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1109 		break;
1110 	case CHIP_RAVEN:
1111 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1112 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1113 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1114 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1115 		else
1116 			return 0;
1117 		break;
1118 	default:
1119 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1124 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1125 		return 0;
1126 	}
1127 
1128 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1129 	if (r == -ENOENT) {
1130 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1131 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1132 		adev->dm.fw_dmcu = NULL;
1133 		return 0;
1134 	}
1135 	if (r) {
1136 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1137 			fw_name_dmcu);
1138 		return r;
1139 	}
1140 
1141 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1142 	if (r) {
1143 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1144 			fw_name_dmcu);
1145 		release_firmware(adev->dm.fw_dmcu);
1146 		adev->dm.fw_dmcu = NULL;
1147 		return r;
1148 	}
1149 
1150 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1151 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1152 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1153 	adev->firmware.fw_size +=
1154 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1155 
1156 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1157 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1158 	adev->firmware.fw_size +=
1159 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1160 
1161 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1162 
1163 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1164 
1165 	return 0;
1166 }
1167 
1168 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1169 {
1170 	struct amdgpu_device *adev = ctx;
1171 
1172 	return dm_read_reg(adev->dm.dc->ctx, address);
1173 }
1174 
1175 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1176 				     uint32_t value)
1177 {
1178 	struct amdgpu_device *adev = ctx;
1179 
1180 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1181 }
1182 
1183 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1184 {
1185 	struct dmub_srv_create_params create_params;
1186 	struct dmub_srv_region_params region_params;
1187 	struct dmub_srv_region_info region_info;
1188 	struct dmub_srv_fb_params fb_params;
1189 	struct dmub_srv_fb_info *fb_info;
1190 	struct dmub_srv *dmub_srv;
1191 	const struct dmcub_firmware_header_v1_0 *hdr;
1192 	const char *fw_name_dmub;
1193 	enum dmub_asic dmub_asic;
1194 	enum dmub_status status;
1195 	int r;
1196 
1197 	switch (adev->asic_type) {
1198 	case CHIP_RENOIR:
1199 		dmub_asic = DMUB_ASIC_DCN21;
1200 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1201 		break;
1202 
1203 	default:
1204 		/* ASIC doesn't support DMUB. */
1205 		return 0;
1206 	}
1207 
1208 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1209 	if (r) {
1210 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1211 		return 0;
1212 	}
1213 
1214 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1215 	if (r) {
1216 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1217 		return 0;
1218 	}
1219 
1220 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1221 
1222 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1223 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1224 			AMDGPU_UCODE_ID_DMCUB;
1225 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1226 			adev->dm.dmub_fw;
1227 		adev->firmware.fw_size +=
1228 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1229 
1230 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1231 			 adev->dm.dmcub_fw_version);
1232 	}
1233 
1234 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1235 
1236 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1237 	dmub_srv = adev->dm.dmub_srv;
1238 
1239 	if (!dmub_srv) {
1240 		DRM_ERROR("Failed to allocate DMUB service!\n");
1241 		return -ENOMEM;
1242 	}
1243 
1244 	memset(&create_params, 0, sizeof(create_params));
1245 	create_params.user_ctx = adev;
1246 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1247 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1248 	create_params.asic = dmub_asic;
1249 
1250 	/* Create the DMUB service. */
1251 	status = dmub_srv_create(dmub_srv, &create_params);
1252 	if (status != DMUB_STATUS_OK) {
1253 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1254 		return -EINVAL;
1255 	}
1256 
1257 	/* Calculate the size of all the regions for the DMUB service. */
1258 	memset(&region_params, 0, sizeof(region_params));
1259 
1260 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1261 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1262 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1263 	region_params.vbios_size = adev->bios_size;
1264 	region_params.fw_bss_data =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		le32_to_cpu(hdr->inst_const_bytes);
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	if (adev->dm.dmub_fw) {
1338 		release_firmware(adev->dm.dmub_fw);
1339 		adev->dm.dmub_fw = NULL;
1340 	}
1341 
1342 	if(adev->dm.fw_dmcu) {
1343 		release_firmware(adev->dm.fw_dmcu);
1344 		adev->dm.fw_dmcu = NULL;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
1350 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1351 {
1352 	struct amdgpu_dm_connector *aconnector;
1353 	struct drm_connector *connector;
1354 	struct drm_connector_list_iter iter;
1355 	int ret = 0;
1356 
1357 	drm_connector_list_iter_begin(dev, &iter);
1358 	drm_for_each_connector_iter(connector, &iter) {
1359 		aconnector = to_amdgpu_dm_connector(connector);
1360 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1361 		    aconnector->mst_mgr.aux) {
1362 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1363 					 aconnector,
1364 					 aconnector->base.base.id);
1365 
1366 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1367 			if (ret < 0) {
1368 				DRM_ERROR("DM_MST: Failed to start MST\n");
1369 				aconnector->dc_link->type =
1370 					dc_connection_single;
1371 				break;
1372 			}
1373 		}
1374 	}
1375 	drm_connector_list_iter_end(&iter);
1376 
1377 	return ret;
1378 }
1379 
1380 static int dm_late_init(void *handle)
1381 {
1382 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383 
1384 	struct dmcu_iram_parameters params;
1385 	unsigned int linear_lut[16];
1386 	int i;
1387 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1388 	bool ret = false;
1389 
1390 	for (i = 0; i < 16; i++)
1391 		linear_lut[i] = 0xFFFF * i / 15;
1392 
1393 	params.set = 0;
1394 	params.backlight_ramping_start = 0xCCCC;
1395 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1396 	params.backlight_lut_array_size = 16;
1397 	params.backlight_lut_array = linear_lut;
1398 
1399 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1400 	 * 0xFFFF x 0.01 = 0x28F
1401 	 */
1402 	params.min_abm_backlight = 0x28F;
1403 
1404 	/* todo will enable for navi10 */
1405 	if (adev->asic_type <= CHIP_RAVEN) {
1406 		ret = dmcu_load_iram(dmcu, params);
1407 
1408 		if (!ret)
1409 			return -EINVAL;
1410 	}
1411 
1412 	return detect_mst_link_for_all_connectors(adev->ddev);
1413 }
1414 
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1416 {
1417 	struct amdgpu_dm_connector *aconnector;
1418 	struct drm_connector *connector;
1419 	struct drm_connector_list_iter iter;
1420 	struct drm_dp_mst_topology_mgr *mgr;
1421 	int ret;
1422 	bool need_hotplug = false;
1423 
1424 	drm_connector_list_iter_begin(dev, &iter);
1425 	drm_for_each_connector_iter(connector, &iter) {
1426 		aconnector = to_amdgpu_dm_connector(connector);
1427 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428 		    aconnector->mst_port)
1429 			continue;
1430 
1431 		mgr = &aconnector->mst_mgr;
1432 
1433 		if (suspend) {
1434 			drm_dp_mst_topology_mgr_suspend(mgr);
1435 		} else {
1436 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1437 			if (ret < 0) {
1438 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439 				need_hotplug = true;
1440 			}
1441 		}
1442 	}
1443 	drm_connector_list_iter_end(&iter);
1444 
1445 	if (need_hotplug)
1446 		drm_kms_helper_hotplug_event(dev);
1447 }
1448 
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1450 {
1451 	struct smu_context *smu = &adev->smu;
1452 	int ret = 0;
1453 
1454 	if (!is_support_sw_smu(adev))
1455 		return 0;
1456 
1457 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458 	 * on window driver dc implementation.
1459 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460 	 * should be passed to smu during boot up and resume from s3.
1461 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1462 	 * dcn20_resource_construct
1463 	 * then call pplib functions below to pass the settings to smu:
1464 	 * smu_set_watermarks_for_clock_ranges
1465 	 * smu_set_watermarks_table
1466 	 * navi10_set_watermarks_table
1467 	 * smu_write_watermarks_table
1468 	 *
1469 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1470 	 * dc has implemented different flow for window driver:
1471 	 * dc_hardware_init / dc_set_power_state
1472 	 * dcn10_init_hw
1473 	 * notify_wm_ranges
1474 	 * set_wm_ranges
1475 	 * -- Linux
1476 	 * smu_set_watermarks_for_clock_ranges
1477 	 * renoir_set_watermarks_table
1478 	 * smu_write_watermarks_table
1479 	 *
1480 	 * For Linux,
1481 	 * dc_hardware_init -> amdgpu_dm_init
1482 	 * dc_set_power_state --> dm_resume
1483 	 *
1484 	 * therefore, this function apply to navi10/12/14 but not Renoir
1485 	 * *
1486 	 */
1487 	switch(adev->asic_type) {
1488 	case CHIP_NAVI10:
1489 	case CHIP_NAVI14:
1490 	case CHIP_NAVI12:
1491 		break;
1492 	default:
1493 		return 0;
1494 	}
1495 
1496 	mutex_lock(&smu->mutex);
1497 
1498 	/* pass data to smu controller */
1499 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1500 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1501 		ret = smu_write_watermarks_table(smu);
1502 
1503 		if (ret) {
1504 			mutex_unlock(&smu->mutex);
1505 			DRM_ERROR("Failed to update WMTABLE!\n");
1506 			return ret;
1507 		}
1508 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1509 	}
1510 
1511 	mutex_unlock(&smu->mutex);
1512 
1513 	return 0;
1514 }
1515 
1516 /**
1517  * dm_hw_init() - Initialize DC device
1518  * @handle: The base driver device containing the amdgpu_dm device.
1519  *
1520  * Initialize the &struct amdgpu_display_manager device. This involves calling
1521  * the initializers of each DM component, then populating the struct with them.
1522  *
1523  * Although the function implies hardware initialization, both hardware and
1524  * software are initialized here. Splitting them out to their relevant init
1525  * hooks is a future TODO item.
1526  *
1527  * Some notable things that are initialized here:
1528  *
1529  * - Display Core, both software and hardware
1530  * - DC modules that we need (freesync and color management)
1531  * - DRM software states
1532  * - Interrupt sources and handlers
1533  * - Vblank support
1534  * - Debug FS entries, if enabled
1535  */
1536 static int dm_hw_init(void *handle)
1537 {
1538 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1539 	/* Create DAL display manager */
1540 	amdgpu_dm_init(adev);
1541 	amdgpu_dm_hpd_init(adev);
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * dm_hw_fini() - Teardown DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Teardown components within &struct amdgpu_display_manager that require
1551  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1552  * were loaded. Also flush IRQ workqueues and disable them.
1553  */
1554 static int dm_hw_fini(void *handle)
1555 {
1556 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1557 
1558 	amdgpu_dm_hpd_fini(adev);
1559 
1560 	amdgpu_dm_irq_fini(adev);
1561 	amdgpu_dm_fini(adev);
1562 	return 0;
1563 }
1564 
1565 static int dm_suspend(void *handle)
1566 {
1567 	struct amdgpu_device *adev = handle;
1568 	struct amdgpu_display_manager *dm = &adev->dm;
1569 	int ret = 0;
1570 
1571 	WARN_ON(adev->dm.cached_state);
1572 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1573 
1574 	s3_handle_mst(adev->ddev, true);
1575 
1576 	amdgpu_dm_irq_suspend(adev);
1577 
1578 
1579 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1580 
1581 	return ret;
1582 }
1583 
1584 static struct amdgpu_dm_connector *
1585 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1586 					     struct drm_crtc *crtc)
1587 {
1588 	uint32_t i;
1589 	struct drm_connector_state *new_con_state;
1590 	struct drm_connector *connector;
1591 	struct drm_crtc *crtc_from_state;
1592 
1593 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1594 		crtc_from_state = new_con_state->crtc;
1595 
1596 		if (crtc_from_state == crtc)
1597 			return to_amdgpu_dm_connector(connector);
1598 	}
1599 
1600 	return NULL;
1601 }
1602 
1603 static void emulated_link_detect(struct dc_link *link)
1604 {
1605 	struct dc_sink_init_data sink_init_data = { 0 };
1606 	struct display_sink_capability sink_caps = { 0 };
1607 	enum dc_edid_status edid_status;
1608 	struct dc_context *dc_ctx = link->ctx;
1609 	struct dc_sink *sink = NULL;
1610 	struct dc_sink *prev_sink = NULL;
1611 
1612 	link->type = dc_connection_none;
1613 	prev_sink = link->local_sink;
1614 
1615 	if (prev_sink != NULL)
1616 		dc_sink_retain(prev_sink);
1617 
1618 	switch (link->connector_signal) {
1619 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1620 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1621 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1622 		break;
1623 	}
1624 
1625 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1626 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1627 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1628 		break;
1629 	}
1630 
1631 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1632 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1633 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1634 		break;
1635 	}
1636 
1637 	case SIGNAL_TYPE_LVDS: {
1638 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1639 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1640 		break;
1641 	}
1642 
1643 	case SIGNAL_TYPE_EDP: {
1644 		sink_caps.transaction_type =
1645 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1646 		sink_caps.signal = SIGNAL_TYPE_EDP;
1647 		break;
1648 	}
1649 
1650 	case SIGNAL_TYPE_DISPLAY_PORT: {
1651 		sink_caps.transaction_type =
1652 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1653 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1654 		break;
1655 	}
1656 
1657 	default:
1658 		DC_ERROR("Invalid connector type! signal:%d\n",
1659 			link->connector_signal);
1660 		return;
1661 	}
1662 
1663 	sink_init_data.link = link;
1664 	sink_init_data.sink_signal = sink_caps.signal;
1665 
1666 	sink = dc_sink_create(&sink_init_data);
1667 	if (!sink) {
1668 		DC_ERROR("Failed to create sink!\n");
1669 		return;
1670 	}
1671 
1672 	/* dc_sink_create returns a new reference */
1673 	link->local_sink = sink;
1674 
1675 	edid_status = dm_helpers_read_local_edid(
1676 			link->ctx,
1677 			link,
1678 			sink);
1679 
1680 	if (edid_status != EDID_OK)
1681 		DC_ERROR("Failed to read EDID");
1682 
1683 }
1684 
1685 static int dm_resume(void *handle)
1686 {
1687 	struct amdgpu_device *adev = handle;
1688 	struct drm_device *ddev = adev->ddev;
1689 	struct amdgpu_display_manager *dm = &adev->dm;
1690 	struct amdgpu_dm_connector *aconnector;
1691 	struct drm_connector *connector;
1692 	struct drm_connector_list_iter iter;
1693 	struct drm_crtc *crtc;
1694 	struct drm_crtc_state *new_crtc_state;
1695 	struct dm_crtc_state *dm_new_crtc_state;
1696 	struct drm_plane *plane;
1697 	struct drm_plane_state *new_plane_state;
1698 	struct dm_plane_state *dm_new_plane_state;
1699 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1700 	enum dc_connection_type new_connection_type = dc_connection_none;
1701 	int i, r;
1702 
1703 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1704 	dc_release_state(dm_state->context);
1705 	dm_state->context = dc_create_state(dm->dc);
1706 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1707 	dc_resource_state_construct(dm->dc, dm_state->context);
1708 
1709 	/* Before powering on DC we need to re-initialize DMUB. */
1710 	r = dm_dmub_hw_init(adev);
1711 	if (r)
1712 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1713 
1714 	/* power on hardware */
1715 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1716 
1717 	/* program HPD filter */
1718 	dc_resume(dm->dc);
1719 
1720 	/*
1721 	 * early enable HPD Rx IRQ, should be done before set mode as short
1722 	 * pulse interrupts are used for MST
1723 	 */
1724 	amdgpu_dm_irq_resume_early(adev);
1725 
1726 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1727 	s3_handle_mst(ddev, false);
1728 
1729 	/* Do detection*/
1730 	drm_connector_list_iter_begin(ddev, &iter);
1731 	drm_for_each_connector_iter(connector, &iter) {
1732 		aconnector = to_amdgpu_dm_connector(connector);
1733 
1734 		/*
1735 		 * this is the case when traversing through already created
1736 		 * MST connectors, should be skipped
1737 		 */
1738 		if (aconnector->mst_port)
1739 			continue;
1740 
1741 		mutex_lock(&aconnector->hpd_lock);
1742 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1743 			DRM_ERROR("KMS: Failed to detect connector\n");
1744 
1745 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1746 			emulated_link_detect(aconnector->dc_link);
1747 		else
1748 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1749 
1750 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1751 			aconnector->fake_enable = false;
1752 
1753 		if (aconnector->dc_sink)
1754 			dc_sink_release(aconnector->dc_sink);
1755 		aconnector->dc_sink = NULL;
1756 		amdgpu_dm_update_connector_after_detect(aconnector);
1757 		mutex_unlock(&aconnector->hpd_lock);
1758 	}
1759 	drm_connector_list_iter_end(&iter);
1760 
1761 	/* Force mode set in atomic commit */
1762 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1763 		new_crtc_state->active_changed = true;
1764 
1765 	/*
1766 	 * atomic_check is expected to create the dc states. We need to release
1767 	 * them here, since they were duplicated as part of the suspend
1768 	 * procedure.
1769 	 */
1770 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1771 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1772 		if (dm_new_crtc_state->stream) {
1773 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1774 			dc_stream_release(dm_new_crtc_state->stream);
1775 			dm_new_crtc_state->stream = NULL;
1776 		}
1777 	}
1778 
1779 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1780 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1781 		if (dm_new_plane_state->dc_state) {
1782 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1783 			dc_plane_state_release(dm_new_plane_state->dc_state);
1784 			dm_new_plane_state->dc_state = NULL;
1785 		}
1786 	}
1787 
1788 	drm_atomic_helper_resume(ddev, dm->cached_state);
1789 
1790 	dm->cached_state = NULL;
1791 
1792 	amdgpu_dm_irq_resume_late(adev);
1793 
1794 	amdgpu_dm_smu_write_watermarks_table(adev);
1795 
1796 	return 0;
1797 }
1798 
1799 /**
1800  * DOC: DM Lifecycle
1801  *
1802  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1803  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1804  * the base driver's device list to be initialized and torn down accordingly.
1805  *
1806  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1807  */
1808 
1809 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1810 	.name = "dm",
1811 	.early_init = dm_early_init,
1812 	.late_init = dm_late_init,
1813 	.sw_init = dm_sw_init,
1814 	.sw_fini = dm_sw_fini,
1815 	.hw_init = dm_hw_init,
1816 	.hw_fini = dm_hw_fini,
1817 	.suspend = dm_suspend,
1818 	.resume = dm_resume,
1819 	.is_idle = dm_is_idle,
1820 	.wait_for_idle = dm_wait_for_idle,
1821 	.check_soft_reset = dm_check_soft_reset,
1822 	.soft_reset = dm_soft_reset,
1823 	.set_clockgating_state = dm_set_clockgating_state,
1824 	.set_powergating_state = dm_set_powergating_state,
1825 };
1826 
1827 const struct amdgpu_ip_block_version dm_ip_block =
1828 {
1829 	.type = AMD_IP_BLOCK_TYPE_DCE,
1830 	.major = 1,
1831 	.minor = 0,
1832 	.rev = 0,
1833 	.funcs = &amdgpu_dm_funcs,
1834 };
1835 
1836 
1837 /**
1838  * DOC: atomic
1839  *
1840  * *WIP*
1841  */
1842 
1843 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1844 	.fb_create = amdgpu_display_user_framebuffer_create,
1845 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1846 	.atomic_check = amdgpu_dm_atomic_check,
1847 	.atomic_commit = amdgpu_dm_atomic_commit,
1848 };
1849 
1850 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1851 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1852 };
1853 
1854 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1855 {
1856 	u32 max_cll, min_cll, max, min, q, r;
1857 	struct amdgpu_dm_backlight_caps *caps;
1858 	struct amdgpu_display_manager *dm;
1859 	struct drm_connector *conn_base;
1860 	struct amdgpu_device *adev;
1861 	static const u8 pre_computed_values[] = {
1862 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1863 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1864 
1865 	if (!aconnector || !aconnector->dc_link)
1866 		return;
1867 
1868 	conn_base = &aconnector->base;
1869 	adev = conn_base->dev->dev_private;
1870 	dm = &adev->dm;
1871 	caps = &dm->backlight_caps;
1872 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1873 	caps->aux_support = false;
1874 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1875 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1876 
1877 	if (caps->ext_caps->bits.oled == 1 ||
1878 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1879 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1880 		caps->aux_support = true;
1881 
1882 	/* From the specification (CTA-861-G), for calculating the maximum
1883 	 * luminance we need to use:
1884 	 *	Luminance = 50*2**(CV/32)
1885 	 * Where CV is a one-byte value.
1886 	 * For calculating this expression we may need float point precision;
1887 	 * to avoid this complexity level, we take advantage that CV is divided
1888 	 * by a constant. From the Euclids division algorithm, we know that CV
1889 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1890 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1891 	 * need to pre-compute the value of r/32. For pre-computing the values
1892 	 * We just used the following Ruby line:
1893 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1894 	 * The results of the above expressions can be verified at
1895 	 * pre_computed_values.
1896 	 */
1897 	q = max_cll >> 5;
1898 	r = max_cll % 32;
1899 	max = (1 << q) * pre_computed_values[r];
1900 
1901 	// min luminance: maxLum * (CV/255)^2 / 100
1902 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1903 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1904 
1905 	caps->aux_max_input_signal = max;
1906 	caps->aux_min_input_signal = min;
1907 }
1908 
1909 void amdgpu_dm_update_connector_after_detect(
1910 		struct amdgpu_dm_connector *aconnector)
1911 {
1912 	struct drm_connector *connector = &aconnector->base;
1913 	struct drm_device *dev = connector->dev;
1914 	struct dc_sink *sink;
1915 
1916 	/* MST handled by drm_mst framework */
1917 	if (aconnector->mst_mgr.mst_state == true)
1918 		return;
1919 
1920 
1921 	sink = aconnector->dc_link->local_sink;
1922 	if (sink)
1923 		dc_sink_retain(sink);
1924 
1925 	/*
1926 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1927 	 * the connector sink is set to either fake or physical sink depends on link status.
1928 	 * Skip if already done during boot.
1929 	 */
1930 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1931 			&& aconnector->dc_em_sink) {
1932 
1933 		/*
1934 		 * For S3 resume with headless use eml_sink to fake stream
1935 		 * because on resume connector->sink is set to NULL
1936 		 */
1937 		mutex_lock(&dev->mode_config.mutex);
1938 
1939 		if (sink) {
1940 			if (aconnector->dc_sink) {
1941 				amdgpu_dm_update_freesync_caps(connector, NULL);
1942 				/*
1943 				 * retain and release below are used to
1944 				 * bump up refcount for sink because the link doesn't point
1945 				 * to it anymore after disconnect, so on next crtc to connector
1946 				 * reshuffle by UMD we will get into unwanted dc_sink release
1947 				 */
1948 				dc_sink_release(aconnector->dc_sink);
1949 			}
1950 			aconnector->dc_sink = sink;
1951 			dc_sink_retain(aconnector->dc_sink);
1952 			amdgpu_dm_update_freesync_caps(connector,
1953 					aconnector->edid);
1954 		} else {
1955 			amdgpu_dm_update_freesync_caps(connector, NULL);
1956 			if (!aconnector->dc_sink) {
1957 				aconnector->dc_sink = aconnector->dc_em_sink;
1958 				dc_sink_retain(aconnector->dc_sink);
1959 			}
1960 		}
1961 
1962 		mutex_unlock(&dev->mode_config.mutex);
1963 
1964 		if (sink)
1965 			dc_sink_release(sink);
1966 		return;
1967 	}
1968 
1969 	/*
1970 	 * TODO: temporary guard to look for proper fix
1971 	 * if this sink is MST sink, we should not do anything
1972 	 */
1973 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1974 		dc_sink_release(sink);
1975 		return;
1976 	}
1977 
1978 	if (aconnector->dc_sink == sink) {
1979 		/*
1980 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1981 		 * Do nothing!!
1982 		 */
1983 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1984 				aconnector->connector_id);
1985 		if (sink)
1986 			dc_sink_release(sink);
1987 		return;
1988 	}
1989 
1990 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1991 		aconnector->connector_id, aconnector->dc_sink, sink);
1992 
1993 	mutex_lock(&dev->mode_config.mutex);
1994 
1995 	/*
1996 	 * 1. Update status of the drm connector
1997 	 * 2. Send an event and let userspace tell us what to do
1998 	 */
1999 	if (sink) {
2000 		/*
2001 		 * TODO: check if we still need the S3 mode update workaround.
2002 		 * If yes, put it here.
2003 		 */
2004 		if (aconnector->dc_sink)
2005 			amdgpu_dm_update_freesync_caps(connector, NULL);
2006 
2007 		aconnector->dc_sink = sink;
2008 		dc_sink_retain(aconnector->dc_sink);
2009 		if (sink->dc_edid.length == 0) {
2010 			aconnector->edid = NULL;
2011 			drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2012 		} else {
2013 			aconnector->edid =
2014 				(struct edid *) sink->dc_edid.raw_edid;
2015 
2016 
2017 			drm_connector_update_edid_property(connector,
2018 					aconnector->edid);
2019 			drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2020 					    aconnector->edid);
2021 		}
2022 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2023 		update_connector_ext_caps(aconnector);
2024 	} else {
2025 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2026 		amdgpu_dm_update_freesync_caps(connector, NULL);
2027 		drm_connector_update_edid_property(connector, NULL);
2028 		aconnector->num_modes = 0;
2029 		dc_sink_release(aconnector->dc_sink);
2030 		aconnector->dc_sink = NULL;
2031 		aconnector->edid = NULL;
2032 #ifdef CONFIG_DRM_AMD_DC_HDCP
2033 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2034 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2035 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2036 #endif
2037 	}
2038 
2039 	mutex_unlock(&dev->mode_config.mutex);
2040 
2041 	if (sink)
2042 		dc_sink_release(sink);
2043 }
2044 
2045 static void handle_hpd_irq(void *param)
2046 {
2047 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2048 	struct drm_connector *connector = &aconnector->base;
2049 	struct drm_device *dev = connector->dev;
2050 	enum dc_connection_type new_connection_type = dc_connection_none;
2051 #ifdef CONFIG_DRM_AMD_DC_HDCP
2052 	struct amdgpu_device *adev = dev->dev_private;
2053 #endif
2054 
2055 	/*
2056 	 * In case of failure or MST no need to update connector status or notify the OS
2057 	 * since (for MST case) MST does this in its own context.
2058 	 */
2059 	mutex_lock(&aconnector->hpd_lock);
2060 
2061 #ifdef CONFIG_DRM_AMD_DC_HDCP
2062 	if (adev->dm.hdcp_workqueue)
2063 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2064 #endif
2065 	if (aconnector->fake_enable)
2066 		aconnector->fake_enable = false;
2067 
2068 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069 		DRM_ERROR("KMS: Failed to detect connector\n");
2070 
2071 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2072 		emulated_link_detect(aconnector->dc_link);
2073 
2074 
2075 		drm_modeset_lock_all(dev);
2076 		dm_restore_drm_connector_state(dev, connector);
2077 		drm_modeset_unlock_all(dev);
2078 
2079 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080 			drm_kms_helper_hotplug_event(dev);
2081 
2082 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2083 		amdgpu_dm_update_connector_after_detect(aconnector);
2084 
2085 
2086 		drm_modeset_lock_all(dev);
2087 		dm_restore_drm_connector_state(dev, connector);
2088 		drm_modeset_unlock_all(dev);
2089 
2090 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2091 			drm_kms_helper_hotplug_event(dev);
2092 	}
2093 	mutex_unlock(&aconnector->hpd_lock);
2094 
2095 }
2096 
2097 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2098 {
2099 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2100 	uint8_t dret;
2101 	bool new_irq_handled = false;
2102 	int dpcd_addr;
2103 	int dpcd_bytes_to_read;
2104 
2105 	const int max_process_count = 30;
2106 	int process_count = 0;
2107 
2108 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2109 
2110 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2111 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2112 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2113 		dpcd_addr = DP_SINK_COUNT;
2114 	} else {
2115 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2116 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2117 		dpcd_addr = DP_SINK_COUNT_ESI;
2118 	}
2119 
2120 	dret = drm_dp_dpcd_read(
2121 		&aconnector->dm_dp_aux.aux,
2122 		dpcd_addr,
2123 		esi,
2124 		dpcd_bytes_to_read);
2125 
2126 	while (dret == dpcd_bytes_to_read &&
2127 		process_count < max_process_count) {
2128 		uint8_t retry;
2129 		dret = 0;
2130 
2131 		process_count++;
2132 
2133 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2134 		/* handle HPD short pulse irq */
2135 		if (aconnector->mst_mgr.mst_state)
2136 			drm_dp_mst_hpd_irq(
2137 				&aconnector->mst_mgr,
2138 				esi,
2139 				&new_irq_handled);
2140 
2141 		if (new_irq_handled) {
2142 			/* ACK at DPCD to notify down stream */
2143 			const int ack_dpcd_bytes_to_write =
2144 				dpcd_bytes_to_read - 1;
2145 
2146 			for (retry = 0; retry < 3; retry++) {
2147 				uint8_t wret;
2148 
2149 				wret = drm_dp_dpcd_write(
2150 					&aconnector->dm_dp_aux.aux,
2151 					dpcd_addr + 1,
2152 					&esi[1],
2153 					ack_dpcd_bytes_to_write);
2154 				if (wret == ack_dpcd_bytes_to_write)
2155 					break;
2156 			}
2157 
2158 			/* check if there is new irq to be handled */
2159 			dret = drm_dp_dpcd_read(
2160 				&aconnector->dm_dp_aux.aux,
2161 				dpcd_addr,
2162 				esi,
2163 				dpcd_bytes_to_read);
2164 
2165 			new_irq_handled = false;
2166 		} else {
2167 			break;
2168 		}
2169 	}
2170 
2171 	if (process_count == max_process_count)
2172 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2173 }
2174 
2175 static void handle_hpd_rx_irq(void *param)
2176 {
2177 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2178 	struct drm_connector *connector = &aconnector->base;
2179 	struct drm_device *dev = connector->dev;
2180 	struct dc_link *dc_link = aconnector->dc_link;
2181 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2182 	enum dc_connection_type new_connection_type = dc_connection_none;
2183 #ifdef CONFIG_DRM_AMD_DC_HDCP
2184 	union hpd_irq_data hpd_irq_data;
2185 	struct amdgpu_device *adev = dev->dev_private;
2186 
2187 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2188 #endif
2189 
2190 	/*
2191 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2192 	 * conflict, after implement i2c helper, this mutex should be
2193 	 * retired.
2194 	 */
2195 	if (dc_link->type != dc_connection_mst_branch)
2196 		mutex_lock(&aconnector->hpd_lock);
2197 
2198 
2199 #ifdef CONFIG_DRM_AMD_DC_HDCP
2200 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2201 #else
2202 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2203 #endif
2204 			!is_mst_root_connector) {
2205 		/* Downstream Port status changed. */
2206 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2207 			DRM_ERROR("KMS: Failed to detect connector\n");
2208 
2209 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2210 			emulated_link_detect(dc_link);
2211 
2212 			if (aconnector->fake_enable)
2213 				aconnector->fake_enable = false;
2214 
2215 			amdgpu_dm_update_connector_after_detect(aconnector);
2216 
2217 
2218 			drm_modeset_lock_all(dev);
2219 			dm_restore_drm_connector_state(dev, connector);
2220 			drm_modeset_unlock_all(dev);
2221 
2222 			drm_kms_helper_hotplug_event(dev);
2223 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2224 
2225 			if (aconnector->fake_enable)
2226 				aconnector->fake_enable = false;
2227 
2228 			amdgpu_dm_update_connector_after_detect(aconnector);
2229 
2230 
2231 			drm_modeset_lock_all(dev);
2232 			dm_restore_drm_connector_state(dev, connector);
2233 			drm_modeset_unlock_all(dev);
2234 
2235 			drm_kms_helper_hotplug_event(dev);
2236 		}
2237 	}
2238 #ifdef CONFIG_DRM_AMD_DC_HDCP
2239 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2240 		if (adev->dm.hdcp_workqueue)
2241 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2242 	}
2243 #endif
2244 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2245 	    (dc_link->type == dc_connection_mst_branch))
2246 		dm_handle_hpd_rx_irq(aconnector);
2247 
2248 	if (dc_link->type != dc_connection_mst_branch) {
2249 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2250 		mutex_unlock(&aconnector->hpd_lock);
2251 	}
2252 }
2253 
2254 static void register_hpd_handlers(struct amdgpu_device *adev)
2255 {
2256 	struct drm_device *dev = adev->ddev;
2257 	struct drm_connector *connector;
2258 	struct amdgpu_dm_connector *aconnector;
2259 	const struct dc_link *dc_link;
2260 	struct dc_interrupt_params int_params = {0};
2261 
2262 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2263 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2264 
2265 	list_for_each_entry(connector,
2266 			&dev->mode_config.connector_list, head)	{
2267 
2268 		aconnector = to_amdgpu_dm_connector(connector);
2269 		dc_link = aconnector->dc_link;
2270 
2271 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2272 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273 			int_params.irq_source = dc_link->irq_source_hpd;
2274 
2275 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2276 					handle_hpd_irq,
2277 					(void *) aconnector);
2278 		}
2279 
2280 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2281 
2282 			/* Also register for DP short pulse (hpd_rx). */
2283 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2284 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2285 
2286 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2287 					handle_hpd_rx_irq,
2288 					(void *) aconnector);
2289 		}
2290 	}
2291 }
2292 
2293 /* Register IRQ sources and initialize IRQ callbacks */
2294 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2295 {
2296 	struct dc *dc = adev->dm.dc;
2297 	struct common_irq_params *c_irq_params;
2298 	struct dc_interrupt_params int_params = {0};
2299 	int r;
2300 	int i;
2301 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2302 
2303 	if (adev->asic_type >= CHIP_VEGA10)
2304 		client_id = SOC15_IH_CLIENTID_DCE;
2305 
2306 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2307 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2308 
2309 	/*
2310 	 * Actions of amdgpu_irq_add_id():
2311 	 * 1. Register a set() function with base driver.
2312 	 *    Base driver will call set() function to enable/disable an
2313 	 *    interrupt in DC hardware.
2314 	 * 2. Register amdgpu_dm_irq_handler().
2315 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2316 	 *    coming from DC hardware.
2317 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2318 	 *    for acknowledging and handling. */
2319 
2320 	/* Use VBLANK interrupt */
2321 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2322 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2323 		if (r) {
2324 			DRM_ERROR("Failed to add crtc irq id!\n");
2325 			return r;
2326 		}
2327 
2328 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2329 		int_params.irq_source =
2330 			dc_interrupt_to_irq_source(dc, i, 0);
2331 
2332 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2333 
2334 		c_irq_params->adev = adev;
2335 		c_irq_params->irq_src = int_params.irq_source;
2336 
2337 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2338 				dm_crtc_high_irq, c_irq_params);
2339 	}
2340 
2341 	/* Use VUPDATE interrupt */
2342 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2343 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2344 		if (r) {
2345 			DRM_ERROR("Failed to add vupdate irq id!\n");
2346 			return r;
2347 		}
2348 
2349 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2350 		int_params.irq_source =
2351 			dc_interrupt_to_irq_source(dc, i, 0);
2352 
2353 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2354 
2355 		c_irq_params->adev = adev;
2356 		c_irq_params->irq_src = int_params.irq_source;
2357 
2358 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2359 				dm_vupdate_high_irq, c_irq_params);
2360 	}
2361 
2362 	/* Use GRPH_PFLIP interrupt */
2363 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2364 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2365 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2366 		if (r) {
2367 			DRM_ERROR("Failed to add page flip irq id!\n");
2368 			return r;
2369 		}
2370 
2371 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2372 		int_params.irq_source =
2373 			dc_interrupt_to_irq_source(dc, i, 0);
2374 
2375 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2376 
2377 		c_irq_params->adev = adev;
2378 		c_irq_params->irq_src = int_params.irq_source;
2379 
2380 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2381 				dm_pflip_high_irq, c_irq_params);
2382 
2383 	}
2384 
2385 	/* HPD */
2386 	r = amdgpu_irq_add_id(adev, client_id,
2387 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2388 	if (r) {
2389 		DRM_ERROR("Failed to add hpd irq id!\n");
2390 		return r;
2391 	}
2392 
2393 	register_hpd_handlers(adev);
2394 
2395 	return 0;
2396 }
2397 
2398 #if defined(CONFIG_DRM_AMD_DC_DCN)
2399 /* Register IRQ sources and initialize IRQ callbacks */
2400 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2401 {
2402 	struct dc *dc = adev->dm.dc;
2403 	struct common_irq_params *c_irq_params;
2404 	struct dc_interrupt_params int_params = {0};
2405 	int r;
2406 	int i;
2407 
2408 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2409 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2410 
2411 	/*
2412 	 * Actions of amdgpu_irq_add_id():
2413 	 * 1. Register a set() function with base driver.
2414 	 *    Base driver will call set() function to enable/disable an
2415 	 *    interrupt in DC hardware.
2416 	 * 2. Register amdgpu_dm_irq_handler().
2417 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2418 	 *    coming from DC hardware.
2419 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2420 	 *    for acknowledging and handling.
2421 	 */
2422 
2423 	/* Use VSTARTUP interrupt */
2424 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2425 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2426 			i++) {
2427 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2428 
2429 		if (r) {
2430 			DRM_ERROR("Failed to add crtc irq id!\n");
2431 			return r;
2432 		}
2433 
2434 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2435 		int_params.irq_source =
2436 			dc_interrupt_to_irq_source(dc, i, 0);
2437 
2438 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2439 
2440 		c_irq_params->adev = adev;
2441 		c_irq_params->irq_src = int_params.irq_source;
2442 
2443 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2444 				dm_dcn_crtc_high_irq, c_irq_params);
2445 	}
2446 
2447 	/* Use GRPH_PFLIP interrupt */
2448 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2449 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2450 			i++) {
2451 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2452 		if (r) {
2453 			DRM_ERROR("Failed to add page flip irq id!\n");
2454 			return r;
2455 		}
2456 
2457 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2458 		int_params.irq_source =
2459 			dc_interrupt_to_irq_source(dc, i, 0);
2460 
2461 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2462 
2463 		c_irq_params->adev = adev;
2464 		c_irq_params->irq_src = int_params.irq_source;
2465 
2466 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2467 				dm_pflip_high_irq, c_irq_params);
2468 
2469 	}
2470 
2471 	/* HPD */
2472 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2473 			&adev->hpd_irq);
2474 	if (r) {
2475 		DRM_ERROR("Failed to add hpd irq id!\n");
2476 		return r;
2477 	}
2478 
2479 	register_hpd_handlers(adev);
2480 
2481 	return 0;
2482 }
2483 #endif
2484 
2485 /*
2486  * Acquires the lock for the atomic state object and returns
2487  * the new atomic state.
2488  *
2489  * This should only be called during atomic check.
2490  */
2491 static int dm_atomic_get_state(struct drm_atomic_state *state,
2492 			       struct dm_atomic_state **dm_state)
2493 {
2494 	struct drm_device *dev = state->dev;
2495 	struct amdgpu_device *adev = dev->dev_private;
2496 	struct amdgpu_display_manager *dm = &adev->dm;
2497 	struct drm_private_state *priv_state;
2498 
2499 	if (*dm_state)
2500 		return 0;
2501 
2502 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2503 	if (IS_ERR(priv_state))
2504 		return PTR_ERR(priv_state);
2505 
2506 	*dm_state = to_dm_atomic_state(priv_state);
2507 
2508 	return 0;
2509 }
2510 
2511 struct dm_atomic_state *
2512 dm_atomic_get_new_state(struct drm_atomic_state *state)
2513 {
2514 	struct drm_device *dev = state->dev;
2515 	struct amdgpu_device *adev = dev->dev_private;
2516 	struct amdgpu_display_manager *dm = &adev->dm;
2517 	struct drm_private_obj *obj;
2518 	struct drm_private_state *new_obj_state;
2519 	int i;
2520 
2521 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2522 		if (obj->funcs == dm->atomic_obj.funcs)
2523 			return to_dm_atomic_state(new_obj_state);
2524 	}
2525 
2526 	return NULL;
2527 }
2528 
2529 struct dm_atomic_state *
2530 dm_atomic_get_old_state(struct drm_atomic_state *state)
2531 {
2532 	struct drm_device *dev = state->dev;
2533 	struct amdgpu_device *adev = dev->dev_private;
2534 	struct amdgpu_display_manager *dm = &adev->dm;
2535 	struct drm_private_obj *obj;
2536 	struct drm_private_state *old_obj_state;
2537 	int i;
2538 
2539 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2540 		if (obj->funcs == dm->atomic_obj.funcs)
2541 			return to_dm_atomic_state(old_obj_state);
2542 	}
2543 
2544 	return NULL;
2545 }
2546 
2547 static struct drm_private_state *
2548 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2549 {
2550 	struct dm_atomic_state *old_state, *new_state;
2551 
2552 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2553 	if (!new_state)
2554 		return NULL;
2555 
2556 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2557 
2558 	old_state = to_dm_atomic_state(obj->state);
2559 
2560 	if (old_state && old_state->context)
2561 		new_state->context = dc_copy_state(old_state->context);
2562 
2563 	if (!new_state->context) {
2564 		kfree(new_state);
2565 		return NULL;
2566 	}
2567 
2568 	return &new_state->base;
2569 }
2570 
2571 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2572 				    struct drm_private_state *state)
2573 {
2574 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2575 
2576 	if (dm_state && dm_state->context)
2577 		dc_release_state(dm_state->context);
2578 
2579 	kfree(dm_state);
2580 }
2581 
2582 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2583 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2584 	.atomic_destroy_state = dm_atomic_destroy_state,
2585 };
2586 
2587 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2588 {
2589 	struct dm_atomic_state *state;
2590 	int r;
2591 
2592 	adev->mode_info.mode_config_initialized = true;
2593 
2594 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2595 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2596 
2597 	adev->ddev->mode_config.max_width = 16384;
2598 	adev->ddev->mode_config.max_height = 16384;
2599 
2600 	adev->ddev->mode_config.preferred_depth = 24;
2601 	adev->ddev->mode_config.prefer_shadow = 1;
2602 	/* indicates support for immediate flip */
2603 	adev->ddev->mode_config.async_page_flip = true;
2604 
2605 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2606 
2607 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2608 	if (!state)
2609 		return -ENOMEM;
2610 
2611 	state->context = dc_create_state(adev->dm.dc);
2612 	if (!state->context) {
2613 		kfree(state);
2614 		return -ENOMEM;
2615 	}
2616 
2617 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2618 
2619 	drm_atomic_private_obj_init(adev->ddev,
2620 				    &adev->dm.atomic_obj,
2621 				    &state->base,
2622 				    &dm_atomic_state_funcs);
2623 
2624 	r = amdgpu_display_modeset_create_props(adev);
2625 	if (r)
2626 		return r;
2627 
2628 	r = amdgpu_dm_audio_init(adev);
2629 	if (r)
2630 		return r;
2631 
2632 	return 0;
2633 }
2634 
2635 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2636 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2637 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2638 
2639 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2640 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2641 
2642 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2643 {
2644 #if defined(CONFIG_ACPI)
2645 	struct amdgpu_dm_backlight_caps caps;
2646 
2647 	if (dm->backlight_caps.caps_valid)
2648 		return;
2649 
2650 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2651 	if (caps.caps_valid) {
2652 		dm->backlight_caps.caps_valid = true;
2653 		if (caps.aux_support)
2654 			return;
2655 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2656 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2657 	} else {
2658 		dm->backlight_caps.min_input_signal =
2659 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2660 		dm->backlight_caps.max_input_signal =
2661 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2662 	}
2663 #else
2664 	if (dm->backlight_caps.aux_support)
2665 		return;
2666 
2667 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2668 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2669 #endif
2670 }
2671 
2672 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2673 {
2674 	bool rc;
2675 
2676 	if (!link)
2677 		return 1;
2678 
2679 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2680 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2681 
2682 	return rc ? 0 : 1;
2683 }
2684 
2685 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2686 			      const uint32_t user_brightness)
2687 {
2688 	u32 min, max, conversion_pace;
2689 	u32 brightness = user_brightness;
2690 
2691 	if (!caps)
2692 		goto out;
2693 
2694 	if (!caps->aux_support) {
2695 		max = caps->max_input_signal;
2696 		min = caps->min_input_signal;
2697 		/*
2698 		 * The brightness input is in the range 0-255
2699 		 * It needs to be rescaled to be between the
2700 		 * requested min and max input signal
2701 		 * It also needs to be scaled up by 0x101 to
2702 		 * match the DC interface which has a range of
2703 		 * 0 to 0xffff
2704 		 */
2705 		conversion_pace = 0x101;
2706 		brightness =
2707 			user_brightness
2708 			* conversion_pace
2709 			* (max - min)
2710 			/ AMDGPU_MAX_BL_LEVEL
2711 			+ min * conversion_pace;
2712 	} else {
2713 		/* TODO
2714 		 * We are doing a linear interpolation here, which is OK but
2715 		 * does not provide the optimal result. We probably want
2716 		 * something close to the Perceptual Quantizer (PQ) curve.
2717 		 */
2718 		max = caps->aux_max_input_signal;
2719 		min = caps->aux_min_input_signal;
2720 
2721 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2722 			       + user_brightness * max;
2723 		// Multiple the value by 1000 since we use millinits
2724 		brightness *= 1000;
2725 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2726 	}
2727 
2728 out:
2729 	return brightness;
2730 }
2731 
2732 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2733 {
2734 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2735 	struct amdgpu_dm_backlight_caps caps;
2736 	struct dc_link *link = NULL;
2737 	u32 brightness;
2738 	bool rc;
2739 
2740 	amdgpu_dm_update_backlight_caps(dm);
2741 	caps = dm->backlight_caps;
2742 
2743 	link = (struct dc_link *)dm->backlight_link;
2744 
2745 	brightness = convert_brightness(&caps, bd->props.brightness);
2746 	// Change brightness based on AUX property
2747 	if (caps.aux_support)
2748 		return set_backlight_via_aux(link, brightness);
2749 
2750 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2751 
2752 	return rc ? 0 : 1;
2753 }
2754 
2755 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2756 {
2757 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2758 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2759 
2760 	if (ret == DC_ERROR_UNEXPECTED)
2761 		return bd->props.brightness;
2762 	return ret;
2763 }
2764 
2765 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2766 	.options = BL_CORE_SUSPENDRESUME,
2767 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2768 	.update_status	= amdgpu_dm_backlight_update_status,
2769 };
2770 
2771 static void
2772 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2773 {
2774 	char bl_name[16];
2775 	struct backlight_properties props = { 0 };
2776 
2777 	amdgpu_dm_update_backlight_caps(dm);
2778 
2779 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2780 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2781 	props.type = BACKLIGHT_RAW;
2782 
2783 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2784 			dm->adev->ddev->primary->index);
2785 
2786 	dm->backlight_dev = backlight_device_register(bl_name,
2787 			dm->adev->ddev->dev,
2788 			dm,
2789 			&amdgpu_dm_backlight_ops,
2790 			&props);
2791 
2792 	if (IS_ERR(dm->backlight_dev))
2793 		DRM_ERROR("DM: Backlight registration failed!\n");
2794 	else
2795 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2796 }
2797 
2798 #endif
2799 
2800 static int initialize_plane(struct amdgpu_display_manager *dm,
2801 			    struct amdgpu_mode_info *mode_info, int plane_id,
2802 			    enum drm_plane_type plane_type,
2803 			    const struct dc_plane_cap *plane_cap)
2804 {
2805 	struct drm_plane *plane;
2806 	unsigned long possible_crtcs;
2807 	int ret = 0;
2808 
2809 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2810 	if (!plane) {
2811 		DRM_ERROR("KMS: Failed to allocate plane\n");
2812 		return -ENOMEM;
2813 	}
2814 	plane->type = plane_type;
2815 
2816 	/*
2817 	 * HACK: IGT tests expect that the primary plane for a CRTC
2818 	 * can only have one possible CRTC. Only expose support for
2819 	 * any CRTC if they're not going to be used as a primary plane
2820 	 * for a CRTC - like overlay or underlay planes.
2821 	 */
2822 	possible_crtcs = 1 << plane_id;
2823 	if (plane_id >= dm->dc->caps.max_streams)
2824 		possible_crtcs = 0xff;
2825 
2826 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2827 
2828 	if (ret) {
2829 		DRM_ERROR("KMS: Failed to initialize plane\n");
2830 		kfree(plane);
2831 		return ret;
2832 	}
2833 
2834 	if (mode_info)
2835 		mode_info->planes[plane_id] = plane;
2836 
2837 	return ret;
2838 }
2839 
2840 
2841 static void register_backlight_device(struct amdgpu_display_manager *dm,
2842 				      struct dc_link *link)
2843 {
2844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2845 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2846 
2847 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2848 	    link->type != dc_connection_none) {
2849 		/*
2850 		 * Event if registration failed, we should continue with
2851 		 * DM initialization because not having a backlight control
2852 		 * is better then a black screen.
2853 		 */
2854 		amdgpu_dm_register_backlight_device(dm);
2855 
2856 		if (dm->backlight_dev)
2857 			dm->backlight_link = link;
2858 	}
2859 #endif
2860 }
2861 
2862 
2863 /*
2864  * In this architecture, the association
2865  * connector -> encoder -> crtc
2866  * id not really requried. The crtc and connector will hold the
2867  * display_index as an abstraction to use with DAL component
2868  *
2869  * Returns 0 on success
2870  */
2871 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2872 {
2873 	struct amdgpu_display_manager *dm = &adev->dm;
2874 	int32_t i;
2875 	struct amdgpu_dm_connector *aconnector = NULL;
2876 	struct amdgpu_encoder *aencoder = NULL;
2877 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2878 	uint32_t link_cnt;
2879 	int32_t primary_planes;
2880 	enum dc_connection_type new_connection_type = dc_connection_none;
2881 	const struct dc_plane_cap *plane;
2882 
2883 	link_cnt = dm->dc->caps.max_links;
2884 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2885 		DRM_ERROR("DM: Failed to initialize mode config\n");
2886 		return -EINVAL;
2887 	}
2888 
2889 	/* There is one primary plane per CRTC */
2890 	primary_planes = dm->dc->caps.max_streams;
2891 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2892 
2893 	/*
2894 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2895 	 * Order is reversed to match iteration order in atomic check.
2896 	 */
2897 	for (i = (primary_planes - 1); i >= 0; i--) {
2898 		plane = &dm->dc->caps.planes[i];
2899 
2900 		if (initialize_plane(dm, mode_info, i,
2901 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2902 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2903 			goto fail;
2904 		}
2905 	}
2906 
2907 	/*
2908 	 * Initialize overlay planes, index starting after primary planes.
2909 	 * These planes have a higher DRM index than the primary planes since
2910 	 * they should be considered as having a higher z-order.
2911 	 * Order is reversed to match iteration order in atomic check.
2912 	 *
2913 	 * Only support DCN for now, and only expose one so we don't encourage
2914 	 * userspace to use up all the pipes.
2915 	 */
2916 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2917 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2918 
2919 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2920 			continue;
2921 
2922 		if (!plane->blends_with_above || !plane->blends_with_below)
2923 			continue;
2924 
2925 		if (!plane->pixel_format_support.argb8888)
2926 			continue;
2927 
2928 		if (initialize_plane(dm, NULL, primary_planes + i,
2929 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2930 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2931 			goto fail;
2932 		}
2933 
2934 		/* Only create one overlay plane. */
2935 		break;
2936 	}
2937 
2938 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2939 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2940 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2941 			goto fail;
2942 		}
2943 
2944 	dm->display_indexes_num = dm->dc->caps.max_streams;
2945 
2946 	/* loops over all connectors on the board */
2947 	for (i = 0; i < link_cnt; i++) {
2948 		struct dc_link *link = NULL;
2949 
2950 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2951 			DRM_ERROR(
2952 				"KMS: Cannot support more than %d display indexes\n",
2953 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2954 			continue;
2955 		}
2956 
2957 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2958 		if (!aconnector)
2959 			goto fail;
2960 
2961 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2962 		if (!aencoder)
2963 			goto fail;
2964 
2965 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2966 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2967 			goto fail;
2968 		}
2969 
2970 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2971 			DRM_ERROR("KMS: Failed to initialize connector\n");
2972 			goto fail;
2973 		}
2974 
2975 		link = dc_get_link_at_index(dm->dc, i);
2976 
2977 		if (!dc_link_detect_sink(link, &new_connection_type))
2978 			DRM_ERROR("KMS: Failed to detect connector\n");
2979 
2980 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2981 			emulated_link_detect(link);
2982 			amdgpu_dm_update_connector_after_detect(aconnector);
2983 
2984 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2985 			amdgpu_dm_update_connector_after_detect(aconnector);
2986 			register_backlight_device(dm, link);
2987 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2988 				amdgpu_dm_set_psr_caps(link);
2989 		}
2990 
2991 
2992 	}
2993 
2994 	/* Software is initialized. Now we can register interrupt handlers. */
2995 	switch (adev->asic_type) {
2996 	case CHIP_BONAIRE:
2997 	case CHIP_HAWAII:
2998 	case CHIP_KAVERI:
2999 	case CHIP_KABINI:
3000 	case CHIP_MULLINS:
3001 	case CHIP_TONGA:
3002 	case CHIP_FIJI:
3003 	case CHIP_CARRIZO:
3004 	case CHIP_STONEY:
3005 	case CHIP_POLARIS11:
3006 	case CHIP_POLARIS10:
3007 	case CHIP_POLARIS12:
3008 	case CHIP_VEGAM:
3009 	case CHIP_VEGA10:
3010 	case CHIP_VEGA12:
3011 	case CHIP_VEGA20:
3012 		if (dce110_register_irq_handlers(dm->adev)) {
3013 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3014 			goto fail;
3015 		}
3016 		break;
3017 #if defined(CONFIG_DRM_AMD_DC_DCN)
3018 	case CHIP_RAVEN:
3019 	case CHIP_NAVI12:
3020 	case CHIP_NAVI10:
3021 	case CHIP_NAVI14:
3022 	case CHIP_RENOIR:
3023 		if (dcn10_register_irq_handlers(dm->adev)) {
3024 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3025 			goto fail;
3026 		}
3027 		break;
3028 #endif
3029 	default:
3030 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3031 		goto fail;
3032 	}
3033 
3034 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3035 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3036 
3037 	/* No userspace support. */
3038 	dm->dc->debug.disable_tri_buf = true;
3039 
3040 	return 0;
3041 fail:
3042 	kfree(aencoder);
3043 	kfree(aconnector);
3044 
3045 	return -EINVAL;
3046 }
3047 
3048 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3049 {
3050 	drm_mode_config_cleanup(dm->ddev);
3051 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3052 	return;
3053 }
3054 
3055 /******************************************************************************
3056  * amdgpu_display_funcs functions
3057  *****************************************************************************/
3058 
3059 /*
3060  * dm_bandwidth_update - program display watermarks
3061  *
3062  * @adev: amdgpu_device pointer
3063  *
3064  * Calculate and program the display watermarks and line buffer allocation.
3065  */
3066 static void dm_bandwidth_update(struct amdgpu_device *adev)
3067 {
3068 	/* TODO: implement later */
3069 }
3070 
3071 static const struct amdgpu_display_funcs dm_display_funcs = {
3072 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3073 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3074 	.backlight_set_level = NULL, /* never called for DC */
3075 	.backlight_get_level = NULL, /* never called for DC */
3076 	.hpd_sense = NULL,/* called unconditionally */
3077 	.hpd_set_polarity = NULL, /* called unconditionally */
3078 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3079 	.page_flip_get_scanoutpos =
3080 		dm_crtc_get_scanoutpos,/* called unconditionally */
3081 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3082 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3083 };
3084 
3085 #if defined(CONFIG_DEBUG_KERNEL_DC)
3086 
3087 static ssize_t s3_debug_store(struct device *device,
3088 			      struct device_attribute *attr,
3089 			      const char *buf,
3090 			      size_t count)
3091 {
3092 	int ret;
3093 	int s3_state;
3094 	struct drm_device *drm_dev = dev_get_drvdata(device);
3095 	struct amdgpu_device *adev = drm_dev->dev_private;
3096 
3097 	ret = kstrtoint(buf, 0, &s3_state);
3098 
3099 	if (ret == 0) {
3100 		if (s3_state) {
3101 			dm_resume(adev);
3102 			drm_kms_helper_hotplug_event(adev->ddev);
3103 		} else
3104 			dm_suspend(adev);
3105 	}
3106 
3107 	return ret == 0 ? count : 0;
3108 }
3109 
3110 DEVICE_ATTR_WO(s3_debug);
3111 
3112 #endif
3113 
3114 static int dm_early_init(void *handle)
3115 {
3116 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3117 
3118 	switch (adev->asic_type) {
3119 	case CHIP_BONAIRE:
3120 	case CHIP_HAWAII:
3121 		adev->mode_info.num_crtc = 6;
3122 		adev->mode_info.num_hpd = 6;
3123 		adev->mode_info.num_dig = 6;
3124 		break;
3125 	case CHIP_KAVERI:
3126 		adev->mode_info.num_crtc = 4;
3127 		adev->mode_info.num_hpd = 6;
3128 		adev->mode_info.num_dig = 7;
3129 		break;
3130 	case CHIP_KABINI:
3131 	case CHIP_MULLINS:
3132 		adev->mode_info.num_crtc = 2;
3133 		adev->mode_info.num_hpd = 6;
3134 		adev->mode_info.num_dig = 6;
3135 		break;
3136 	case CHIP_FIJI:
3137 	case CHIP_TONGA:
3138 		adev->mode_info.num_crtc = 6;
3139 		adev->mode_info.num_hpd = 6;
3140 		adev->mode_info.num_dig = 7;
3141 		break;
3142 	case CHIP_CARRIZO:
3143 		adev->mode_info.num_crtc = 3;
3144 		adev->mode_info.num_hpd = 6;
3145 		adev->mode_info.num_dig = 9;
3146 		break;
3147 	case CHIP_STONEY:
3148 		adev->mode_info.num_crtc = 2;
3149 		adev->mode_info.num_hpd = 6;
3150 		adev->mode_info.num_dig = 9;
3151 		break;
3152 	case CHIP_POLARIS11:
3153 	case CHIP_POLARIS12:
3154 		adev->mode_info.num_crtc = 5;
3155 		adev->mode_info.num_hpd = 5;
3156 		adev->mode_info.num_dig = 5;
3157 		break;
3158 	case CHIP_POLARIS10:
3159 	case CHIP_VEGAM:
3160 		adev->mode_info.num_crtc = 6;
3161 		adev->mode_info.num_hpd = 6;
3162 		adev->mode_info.num_dig = 6;
3163 		break;
3164 	case CHIP_VEGA10:
3165 	case CHIP_VEGA12:
3166 	case CHIP_VEGA20:
3167 		adev->mode_info.num_crtc = 6;
3168 		adev->mode_info.num_hpd = 6;
3169 		adev->mode_info.num_dig = 6;
3170 		break;
3171 #if defined(CONFIG_DRM_AMD_DC_DCN)
3172 	case CHIP_RAVEN:
3173 		adev->mode_info.num_crtc = 4;
3174 		adev->mode_info.num_hpd = 4;
3175 		adev->mode_info.num_dig = 4;
3176 		break;
3177 #endif
3178 	case CHIP_NAVI10:
3179 	case CHIP_NAVI12:
3180 		adev->mode_info.num_crtc = 6;
3181 		adev->mode_info.num_hpd = 6;
3182 		adev->mode_info.num_dig = 6;
3183 		break;
3184 	case CHIP_NAVI14:
3185 		adev->mode_info.num_crtc = 5;
3186 		adev->mode_info.num_hpd = 5;
3187 		adev->mode_info.num_dig = 5;
3188 		break;
3189 	case CHIP_RENOIR:
3190 		adev->mode_info.num_crtc = 4;
3191 		adev->mode_info.num_hpd = 4;
3192 		adev->mode_info.num_dig = 4;
3193 		break;
3194 	default:
3195 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3196 		return -EINVAL;
3197 	}
3198 
3199 	amdgpu_dm_set_irq_funcs(adev);
3200 
3201 	if (adev->mode_info.funcs == NULL)
3202 		adev->mode_info.funcs = &dm_display_funcs;
3203 
3204 	/*
3205 	 * Note: Do NOT change adev->audio_endpt_rreg and
3206 	 * adev->audio_endpt_wreg because they are initialised in
3207 	 * amdgpu_device_init()
3208 	 */
3209 #if defined(CONFIG_DEBUG_KERNEL_DC)
3210 	device_create_file(
3211 		adev->ddev->dev,
3212 		&dev_attr_s3_debug);
3213 #endif
3214 
3215 	return 0;
3216 }
3217 
3218 static bool modeset_required(struct drm_crtc_state *crtc_state,
3219 			     struct dc_stream_state *new_stream,
3220 			     struct dc_stream_state *old_stream)
3221 {
3222 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3223 		return false;
3224 
3225 	if (!crtc_state->enable)
3226 		return false;
3227 
3228 	return crtc_state->active;
3229 }
3230 
3231 static bool modereset_required(struct drm_crtc_state *crtc_state)
3232 {
3233 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3234 		return false;
3235 
3236 	return !crtc_state->enable || !crtc_state->active;
3237 }
3238 
3239 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3240 {
3241 	drm_encoder_cleanup(encoder);
3242 	kfree(encoder);
3243 }
3244 
3245 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3246 	.destroy = amdgpu_dm_encoder_destroy,
3247 };
3248 
3249 
3250 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3251 				struct dc_scaling_info *scaling_info)
3252 {
3253 	int scale_w, scale_h;
3254 
3255 	memset(scaling_info, 0, sizeof(*scaling_info));
3256 
3257 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3258 	scaling_info->src_rect.x = state->src_x >> 16;
3259 	scaling_info->src_rect.y = state->src_y >> 16;
3260 
3261 	scaling_info->src_rect.width = state->src_w >> 16;
3262 	if (scaling_info->src_rect.width == 0)
3263 		return -EINVAL;
3264 
3265 	scaling_info->src_rect.height = state->src_h >> 16;
3266 	if (scaling_info->src_rect.height == 0)
3267 		return -EINVAL;
3268 
3269 	scaling_info->dst_rect.x = state->crtc_x;
3270 	scaling_info->dst_rect.y = state->crtc_y;
3271 
3272 	if (state->crtc_w == 0)
3273 		return -EINVAL;
3274 
3275 	scaling_info->dst_rect.width = state->crtc_w;
3276 
3277 	if (state->crtc_h == 0)
3278 		return -EINVAL;
3279 
3280 	scaling_info->dst_rect.height = state->crtc_h;
3281 
3282 	/* DRM doesn't specify clipping on destination output. */
3283 	scaling_info->clip_rect = scaling_info->dst_rect;
3284 
3285 	/* TODO: Validate scaling per-format with DC plane caps */
3286 	scale_w = scaling_info->dst_rect.width * 1000 /
3287 		  scaling_info->src_rect.width;
3288 
3289 	if (scale_w < 250 || scale_w > 16000)
3290 		return -EINVAL;
3291 
3292 	scale_h = scaling_info->dst_rect.height * 1000 /
3293 		  scaling_info->src_rect.height;
3294 
3295 	if (scale_h < 250 || scale_h > 16000)
3296 		return -EINVAL;
3297 
3298 	/*
3299 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3300 	 * assume reasonable defaults based on the format.
3301 	 */
3302 
3303 	return 0;
3304 }
3305 
3306 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3307 		       uint64_t *tiling_flags)
3308 {
3309 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3310 	int r = amdgpu_bo_reserve(rbo, false);
3311 
3312 	if (unlikely(r)) {
3313 		/* Don't show error message when returning -ERESTARTSYS */
3314 		if (r != -ERESTARTSYS)
3315 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3316 		return r;
3317 	}
3318 
3319 	if (tiling_flags)
3320 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3321 
3322 	amdgpu_bo_unreserve(rbo);
3323 
3324 	return r;
3325 }
3326 
3327 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3328 {
3329 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3330 
3331 	return offset ? (address + offset * 256) : 0;
3332 }
3333 
3334 static int
3335 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3336 			  const struct amdgpu_framebuffer *afb,
3337 			  const enum surface_pixel_format format,
3338 			  const enum dc_rotation_angle rotation,
3339 			  const struct plane_size *plane_size,
3340 			  const union dc_tiling_info *tiling_info,
3341 			  const uint64_t info,
3342 			  struct dc_plane_dcc_param *dcc,
3343 			  struct dc_plane_address *address,
3344 			  bool force_disable_dcc)
3345 {
3346 	struct dc *dc = adev->dm.dc;
3347 	struct dc_dcc_surface_param input;
3348 	struct dc_surface_dcc_cap output;
3349 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3350 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3351 	uint64_t dcc_address;
3352 
3353 	memset(&input, 0, sizeof(input));
3354 	memset(&output, 0, sizeof(output));
3355 
3356 	if (force_disable_dcc)
3357 		return 0;
3358 
3359 	if (!offset)
3360 		return 0;
3361 
3362 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3363 		return 0;
3364 
3365 	if (!dc->cap_funcs.get_dcc_compression_cap)
3366 		return -EINVAL;
3367 
3368 	input.format = format;
3369 	input.surface_size.width = plane_size->surface_size.width;
3370 	input.surface_size.height = plane_size->surface_size.height;
3371 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3372 
3373 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3374 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3375 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3376 		input.scan = SCAN_DIRECTION_VERTICAL;
3377 
3378 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3379 		return -EINVAL;
3380 
3381 	if (!output.capable)
3382 		return -EINVAL;
3383 
3384 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3385 		return -EINVAL;
3386 
3387 	dcc->enable = 1;
3388 	dcc->meta_pitch =
3389 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3390 	dcc->independent_64b_blks = i64b;
3391 
3392 	dcc_address = get_dcc_address(afb->address, info);
3393 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3394 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3395 
3396 	return 0;
3397 }
3398 
3399 static int
3400 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3401 			     const struct amdgpu_framebuffer *afb,
3402 			     const enum surface_pixel_format format,
3403 			     const enum dc_rotation_angle rotation,
3404 			     const uint64_t tiling_flags,
3405 			     union dc_tiling_info *tiling_info,
3406 			     struct plane_size *plane_size,
3407 			     struct dc_plane_dcc_param *dcc,
3408 			     struct dc_plane_address *address,
3409 			     bool force_disable_dcc)
3410 {
3411 	const struct drm_framebuffer *fb = &afb->base;
3412 	int ret;
3413 
3414 	memset(tiling_info, 0, sizeof(*tiling_info));
3415 	memset(plane_size, 0, sizeof(*plane_size));
3416 	memset(dcc, 0, sizeof(*dcc));
3417 	memset(address, 0, sizeof(*address));
3418 
3419 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3420 		plane_size->surface_size.x = 0;
3421 		plane_size->surface_size.y = 0;
3422 		plane_size->surface_size.width = fb->width;
3423 		plane_size->surface_size.height = fb->height;
3424 		plane_size->surface_pitch =
3425 			fb->pitches[0] / fb->format->cpp[0];
3426 
3427 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3428 		address->grph.addr.low_part = lower_32_bits(afb->address);
3429 		address->grph.addr.high_part = upper_32_bits(afb->address);
3430 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3431 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3432 
3433 		plane_size->surface_size.x = 0;
3434 		plane_size->surface_size.y = 0;
3435 		plane_size->surface_size.width = fb->width;
3436 		plane_size->surface_size.height = fb->height;
3437 		plane_size->surface_pitch =
3438 			fb->pitches[0] / fb->format->cpp[0];
3439 
3440 		plane_size->chroma_size.x = 0;
3441 		plane_size->chroma_size.y = 0;
3442 		/* TODO: set these based on surface format */
3443 		plane_size->chroma_size.width = fb->width / 2;
3444 		plane_size->chroma_size.height = fb->height / 2;
3445 
3446 		plane_size->chroma_pitch =
3447 			fb->pitches[1] / fb->format->cpp[1];
3448 
3449 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3450 		address->video_progressive.luma_addr.low_part =
3451 			lower_32_bits(afb->address);
3452 		address->video_progressive.luma_addr.high_part =
3453 			upper_32_bits(afb->address);
3454 		address->video_progressive.chroma_addr.low_part =
3455 			lower_32_bits(chroma_addr);
3456 		address->video_progressive.chroma_addr.high_part =
3457 			upper_32_bits(chroma_addr);
3458 	}
3459 
3460 	/* Fill GFX8 params */
3461 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3462 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3463 
3464 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3465 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3466 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3467 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3468 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3469 
3470 		/* XXX fix me for VI */
3471 		tiling_info->gfx8.num_banks = num_banks;
3472 		tiling_info->gfx8.array_mode =
3473 				DC_ARRAY_2D_TILED_THIN1;
3474 		tiling_info->gfx8.tile_split = tile_split;
3475 		tiling_info->gfx8.bank_width = bankw;
3476 		tiling_info->gfx8.bank_height = bankh;
3477 		tiling_info->gfx8.tile_aspect = mtaspect;
3478 		tiling_info->gfx8.tile_mode =
3479 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3480 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3481 			== DC_ARRAY_1D_TILED_THIN1) {
3482 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3483 	}
3484 
3485 	tiling_info->gfx8.pipe_config =
3486 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3487 
3488 	if (adev->asic_type == CHIP_VEGA10 ||
3489 	    adev->asic_type == CHIP_VEGA12 ||
3490 	    adev->asic_type == CHIP_VEGA20 ||
3491 	    adev->asic_type == CHIP_NAVI10 ||
3492 	    adev->asic_type == CHIP_NAVI14 ||
3493 	    adev->asic_type == CHIP_NAVI12 ||
3494 	    adev->asic_type == CHIP_RENOIR ||
3495 	    adev->asic_type == CHIP_RAVEN) {
3496 		/* Fill GFX9 params */
3497 		tiling_info->gfx9.num_pipes =
3498 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3499 		tiling_info->gfx9.num_banks =
3500 			adev->gfx.config.gb_addr_config_fields.num_banks;
3501 		tiling_info->gfx9.pipe_interleave =
3502 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3503 		tiling_info->gfx9.num_shader_engines =
3504 			adev->gfx.config.gb_addr_config_fields.num_se;
3505 		tiling_info->gfx9.max_compressed_frags =
3506 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3507 		tiling_info->gfx9.num_rb_per_se =
3508 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3509 		tiling_info->gfx9.swizzle =
3510 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3511 		tiling_info->gfx9.shaderEnable = 1;
3512 
3513 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3514 						plane_size, tiling_info,
3515 						tiling_flags, dcc, address,
3516 						force_disable_dcc);
3517 		if (ret)
3518 			return ret;
3519 	}
3520 
3521 	return 0;
3522 }
3523 
3524 static void
3525 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3526 			       bool *per_pixel_alpha, bool *global_alpha,
3527 			       int *global_alpha_value)
3528 {
3529 	*per_pixel_alpha = false;
3530 	*global_alpha = false;
3531 	*global_alpha_value = 0xff;
3532 
3533 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3534 		return;
3535 
3536 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3537 		static const uint32_t alpha_formats[] = {
3538 			DRM_FORMAT_ARGB8888,
3539 			DRM_FORMAT_RGBA8888,
3540 			DRM_FORMAT_ABGR8888,
3541 		};
3542 		uint32_t format = plane_state->fb->format->format;
3543 		unsigned int i;
3544 
3545 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3546 			if (format == alpha_formats[i]) {
3547 				*per_pixel_alpha = true;
3548 				break;
3549 			}
3550 		}
3551 	}
3552 
3553 	if (plane_state->alpha < 0xffff) {
3554 		*global_alpha = true;
3555 		*global_alpha_value = plane_state->alpha >> 8;
3556 	}
3557 }
3558 
3559 static int
3560 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3561 			    const enum surface_pixel_format format,
3562 			    enum dc_color_space *color_space)
3563 {
3564 	bool full_range;
3565 
3566 	*color_space = COLOR_SPACE_SRGB;
3567 
3568 	/* DRM color properties only affect non-RGB formats. */
3569 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3570 		return 0;
3571 
3572 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3573 
3574 	switch (plane_state->color_encoding) {
3575 	case DRM_COLOR_YCBCR_BT601:
3576 		if (full_range)
3577 			*color_space = COLOR_SPACE_YCBCR601;
3578 		else
3579 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3580 		break;
3581 
3582 	case DRM_COLOR_YCBCR_BT709:
3583 		if (full_range)
3584 			*color_space = COLOR_SPACE_YCBCR709;
3585 		else
3586 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3587 		break;
3588 
3589 	case DRM_COLOR_YCBCR_BT2020:
3590 		if (full_range)
3591 			*color_space = COLOR_SPACE_2020_YCBCR;
3592 		else
3593 			return -EINVAL;
3594 		break;
3595 
3596 	default:
3597 		return -EINVAL;
3598 	}
3599 
3600 	return 0;
3601 }
3602 
3603 static int
3604 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3605 			    const struct drm_plane_state *plane_state,
3606 			    const uint64_t tiling_flags,
3607 			    struct dc_plane_info *plane_info,
3608 			    struct dc_plane_address *address,
3609 			    bool force_disable_dcc)
3610 {
3611 	const struct drm_framebuffer *fb = plane_state->fb;
3612 	const struct amdgpu_framebuffer *afb =
3613 		to_amdgpu_framebuffer(plane_state->fb);
3614 	struct drm_format_name_buf format_name;
3615 	int ret;
3616 
3617 	memset(plane_info, 0, sizeof(*plane_info));
3618 
3619 	switch (fb->format->format) {
3620 	case DRM_FORMAT_C8:
3621 		plane_info->format =
3622 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3623 		break;
3624 	case DRM_FORMAT_RGB565:
3625 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3626 		break;
3627 	case DRM_FORMAT_XRGB8888:
3628 	case DRM_FORMAT_ARGB8888:
3629 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3630 		break;
3631 	case DRM_FORMAT_XRGB2101010:
3632 	case DRM_FORMAT_ARGB2101010:
3633 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3634 		break;
3635 	case DRM_FORMAT_XBGR2101010:
3636 	case DRM_FORMAT_ABGR2101010:
3637 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3638 		break;
3639 	case DRM_FORMAT_XBGR8888:
3640 	case DRM_FORMAT_ABGR8888:
3641 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3642 		break;
3643 	case DRM_FORMAT_NV21:
3644 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3645 		break;
3646 	case DRM_FORMAT_NV12:
3647 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3648 		break;
3649 	case DRM_FORMAT_P010:
3650 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3651 		break;
3652 	default:
3653 		DRM_ERROR(
3654 			"Unsupported screen format %s\n",
3655 			drm_get_format_name(fb->format->format, &format_name));
3656 		return -EINVAL;
3657 	}
3658 
3659 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3660 	case DRM_MODE_ROTATE_0:
3661 		plane_info->rotation = ROTATION_ANGLE_0;
3662 		break;
3663 	case DRM_MODE_ROTATE_90:
3664 		plane_info->rotation = ROTATION_ANGLE_90;
3665 		break;
3666 	case DRM_MODE_ROTATE_180:
3667 		plane_info->rotation = ROTATION_ANGLE_180;
3668 		break;
3669 	case DRM_MODE_ROTATE_270:
3670 		plane_info->rotation = ROTATION_ANGLE_270;
3671 		break;
3672 	default:
3673 		plane_info->rotation = ROTATION_ANGLE_0;
3674 		break;
3675 	}
3676 
3677 	plane_info->visible = true;
3678 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3679 
3680 	plane_info->layer_index = 0;
3681 
3682 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3683 					  &plane_info->color_space);
3684 	if (ret)
3685 		return ret;
3686 
3687 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3688 					   plane_info->rotation, tiling_flags,
3689 					   &plane_info->tiling_info,
3690 					   &plane_info->plane_size,
3691 					   &plane_info->dcc, address,
3692 					   force_disable_dcc);
3693 	if (ret)
3694 		return ret;
3695 
3696 	fill_blending_from_plane_state(
3697 		plane_state, &plane_info->per_pixel_alpha,
3698 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3699 
3700 	return 0;
3701 }
3702 
3703 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3704 				    struct dc_plane_state *dc_plane_state,
3705 				    struct drm_plane_state *plane_state,
3706 				    struct drm_crtc_state *crtc_state)
3707 {
3708 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3709 	const struct amdgpu_framebuffer *amdgpu_fb =
3710 		to_amdgpu_framebuffer(plane_state->fb);
3711 	struct dc_scaling_info scaling_info;
3712 	struct dc_plane_info plane_info;
3713 	uint64_t tiling_flags;
3714 	int ret;
3715 	bool force_disable_dcc = false;
3716 
3717 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3718 	if (ret)
3719 		return ret;
3720 
3721 	dc_plane_state->src_rect = scaling_info.src_rect;
3722 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3723 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3724 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3725 
3726 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3727 	if (ret)
3728 		return ret;
3729 
3730 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3731 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3732 					  &plane_info,
3733 					  &dc_plane_state->address,
3734 					  force_disable_dcc);
3735 	if (ret)
3736 		return ret;
3737 
3738 	dc_plane_state->format = plane_info.format;
3739 	dc_plane_state->color_space = plane_info.color_space;
3740 	dc_plane_state->format = plane_info.format;
3741 	dc_plane_state->plane_size = plane_info.plane_size;
3742 	dc_plane_state->rotation = plane_info.rotation;
3743 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3744 	dc_plane_state->stereo_format = plane_info.stereo_format;
3745 	dc_plane_state->tiling_info = plane_info.tiling_info;
3746 	dc_plane_state->visible = plane_info.visible;
3747 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3748 	dc_plane_state->global_alpha = plane_info.global_alpha;
3749 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3750 	dc_plane_state->dcc = plane_info.dcc;
3751 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3752 
3753 	/*
3754 	 * Always set input transfer function, since plane state is refreshed
3755 	 * every time.
3756 	 */
3757 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3758 	if (ret)
3759 		return ret;
3760 
3761 	return 0;
3762 }
3763 
3764 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3765 					   const struct dm_connector_state *dm_state,
3766 					   struct dc_stream_state *stream)
3767 {
3768 	enum amdgpu_rmx_type rmx_type;
3769 
3770 	struct rect src = { 0 }; /* viewport in composition space*/
3771 	struct rect dst = { 0 }; /* stream addressable area */
3772 
3773 	/* no mode. nothing to be done */
3774 	if (!mode)
3775 		return;
3776 
3777 	/* Full screen scaling by default */
3778 	src.width = mode->hdisplay;
3779 	src.height = mode->vdisplay;
3780 	dst.width = stream->timing.h_addressable;
3781 	dst.height = stream->timing.v_addressable;
3782 
3783 	if (dm_state) {
3784 		rmx_type = dm_state->scaling;
3785 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3786 			if (src.width * dst.height <
3787 					src.height * dst.width) {
3788 				/* height needs less upscaling/more downscaling */
3789 				dst.width = src.width *
3790 						dst.height / src.height;
3791 			} else {
3792 				/* width needs less upscaling/more downscaling */
3793 				dst.height = src.height *
3794 						dst.width / src.width;
3795 			}
3796 		} else if (rmx_type == RMX_CENTER) {
3797 			dst = src;
3798 		}
3799 
3800 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3801 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3802 
3803 		if (dm_state->underscan_enable) {
3804 			dst.x += dm_state->underscan_hborder / 2;
3805 			dst.y += dm_state->underscan_vborder / 2;
3806 			dst.width -= dm_state->underscan_hborder;
3807 			dst.height -= dm_state->underscan_vborder;
3808 		}
3809 	}
3810 
3811 	stream->src = src;
3812 	stream->dst = dst;
3813 
3814 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3815 			dst.x, dst.y, dst.width, dst.height);
3816 
3817 }
3818 
3819 static enum dc_color_depth
3820 convert_color_depth_from_display_info(const struct drm_connector *connector,
3821 				      const struct drm_connector_state *state,
3822 				      bool is_y420)
3823 {
3824 	uint8_t bpc;
3825 
3826 	if (is_y420) {
3827 		bpc = 8;
3828 
3829 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3830 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3831 			bpc = 16;
3832 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3833 			bpc = 12;
3834 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3835 			bpc = 10;
3836 	} else {
3837 		bpc = (uint8_t)connector->display_info.bpc;
3838 		/* Assume 8 bpc by default if no bpc is specified. */
3839 		bpc = bpc ? bpc : 8;
3840 	}
3841 
3842 	if (!state)
3843 		state = connector->state;
3844 
3845 	if (state) {
3846 		/*
3847 		 * Cap display bpc based on the user requested value.
3848 		 *
3849 		 * The value for state->max_bpc may not correctly updated
3850 		 * depending on when the connector gets added to the state
3851 		 * or if this was called outside of atomic check, so it
3852 		 * can't be used directly.
3853 		 */
3854 		bpc = min(bpc, state->max_requested_bpc);
3855 
3856 		/* Round down to the nearest even number. */
3857 		bpc = bpc - (bpc & 1);
3858 	}
3859 
3860 	switch (bpc) {
3861 	case 0:
3862 		/*
3863 		 * Temporary Work around, DRM doesn't parse color depth for
3864 		 * EDID revision before 1.4
3865 		 * TODO: Fix edid parsing
3866 		 */
3867 		return COLOR_DEPTH_888;
3868 	case 6:
3869 		return COLOR_DEPTH_666;
3870 	case 8:
3871 		return COLOR_DEPTH_888;
3872 	case 10:
3873 		return COLOR_DEPTH_101010;
3874 	case 12:
3875 		return COLOR_DEPTH_121212;
3876 	case 14:
3877 		return COLOR_DEPTH_141414;
3878 	case 16:
3879 		return COLOR_DEPTH_161616;
3880 	default:
3881 		return COLOR_DEPTH_UNDEFINED;
3882 	}
3883 }
3884 
3885 static enum dc_aspect_ratio
3886 get_aspect_ratio(const struct drm_display_mode *mode_in)
3887 {
3888 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3889 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3890 }
3891 
3892 static enum dc_color_space
3893 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3894 {
3895 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3896 
3897 	switch (dc_crtc_timing->pixel_encoding)	{
3898 	case PIXEL_ENCODING_YCBCR422:
3899 	case PIXEL_ENCODING_YCBCR444:
3900 	case PIXEL_ENCODING_YCBCR420:
3901 	{
3902 		/*
3903 		 * 27030khz is the separation point between HDTV and SDTV
3904 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3905 		 * respectively
3906 		 */
3907 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3908 			if (dc_crtc_timing->flags.Y_ONLY)
3909 				color_space =
3910 					COLOR_SPACE_YCBCR709_LIMITED;
3911 			else
3912 				color_space = COLOR_SPACE_YCBCR709;
3913 		} else {
3914 			if (dc_crtc_timing->flags.Y_ONLY)
3915 				color_space =
3916 					COLOR_SPACE_YCBCR601_LIMITED;
3917 			else
3918 				color_space = COLOR_SPACE_YCBCR601;
3919 		}
3920 
3921 	}
3922 	break;
3923 	case PIXEL_ENCODING_RGB:
3924 		color_space = COLOR_SPACE_SRGB;
3925 		break;
3926 
3927 	default:
3928 		WARN_ON(1);
3929 		break;
3930 	}
3931 
3932 	return color_space;
3933 }
3934 
3935 static bool adjust_colour_depth_from_display_info(
3936 	struct dc_crtc_timing *timing_out,
3937 	const struct drm_display_info *info)
3938 {
3939 	enum dc_color_depth depth = timing_out->display_color_depth;
3940 	int normalized_clk;
3941 	do {
3942 		normalized_clk = timing_out->pix_clk_100hz / 10;
3943 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3944 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3945 			normalized_clk /= 2;
3946 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3947 		switch (depth) {
3948 		case COLOR_DEPTH_888:
3949 			break;
3950 		case COLOR_DEPTH_101010:
3951 			normalized_clk = (normalized_clk * 30) / 24;
3952 			break;
3953 		case COLOR_DEPTH_121212:
3954 			normalized_clk = (normalized_clk * 36) / 24;
3955 			break;
3956 		case COLOR_DEPTH_161616:
3957 			normalized_clk = (normalized_clk * 48) / 24;
3958 			break;
3959 		default:
3960 			/* The above depths are the only ones valid for HDMI. */
3961 			return false;
3962 		}
3963 		if (normalized_clk <= info->max_tmds_clock) {
3964 			timing_out->display_color_depth = depth;
3965 			return true;
3966 		}
3967 	} while (--depth > COLOR_DEPTH_666);
3968 	return false;
3969 }
3970 
3971 static void fill_stream_properties_from_drm_display_mode(
3972 	struct dc_stream_state *stream,
3973 	const struct drm_display_mode *mode_in,
3974 	const struct drm_connector *connector,
3975 	const struct drm_connector_state *connector_state,
3976 	const struct dc_stream_state *old_stream)
3977 {
3978 	struct dc_crtc_timing *timing_out = &stream->timing;
3979 	const struct drm_display_info *info = &connector->display_info;
3980 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3981 	struct hdmi_vendor_infoframe hv_frame;
3982 	struct hdmi_avi_infoframe avi_frame;
3983 
3984 	memset(&hv_frame, 0, sizeof(hv_frame));
3985 	memset(&avi_frame, 0, sizeof(avi_frame));
3986 
3987 	timing_out->h_border_left = 0;
3988 	timing_out->h_border_right = 0;
3989 	timing_out->v_border_top = 0;
3990 	timing_out->v_border_bottom = 0;
3991 	/* TODO: un-hardcode */
3992 	if (drm_mode_is_420_only(info, mode_in)
3993 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3994 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3995 	else if (drm_mode_is_420_also(info, mode_in)
3996 			&& aconnector->force_yuv420_output)
3997 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3998 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3999 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4000 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4001 	else
4002 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4003 
4004 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4005 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4006 		connector, connector_state,
4007 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4008 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4009 	timing_out->hdmi_vic = 0;
4010 
4011 	if(old_stream) {
4012 		timing_out->vic = old_stream->timing.vic;
4013 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4014 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4015 	} else {
4016 		timing_out->vic = drm_match_cea_mode(mode_in);
4017 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4018 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4019 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4020 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4021 	}
4022 
4023 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4024 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4025 		timing_out->vic = avi_frame.video_code;
4026 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4027 		timing_out->hdmi_vic = hv_frame.vic;
4028 	}
4029 
4030 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4031 	timing_out->h_total = mode_in->crtc_htotal;
4032 	timing_out->h_sync_width =
4033 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4034 	timing_out->h_front_porch =
4035 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4036 	timing_out->v_total = mode_in->crtc_vtotal;
4037 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4038 	timing_out->v_front_porch =
4039 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4040 	timing_out->v_sync_width =
4041 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4042 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4043 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4044 
4045 	stream->output_color_space = get_output_color_space(timing_out);
4046 
4047 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4048 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4049 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4050 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4051 		    drm_mode_is_420_also(info, mode_in) &&
4052 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4053 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4054 			adjust_colour_depth_from_display_info(timing_out, info);
4055 		}
4056 	}
4057 }
4058 
4059 static void fill_audio_info(struct audio_info *audio_info,
4060 			    const struct drm_connector *drm_connector,
4061 			    const struct dc_sink *dc_sink)
4062 {
4063 	int i = 0;
4064 	int cea_revision = 0;
4065 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4066 
4067 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4068 	audio_info->product_id = edid_caps->product_id;
4069 
4070 	cea_revision = drm_connector->display_info.cea_rev;
4071 
4072 	strscpy(audio_info->display_name,
4073 		edid_caps->display_name,
4074 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4075 
4076 	if (cea_revision >= 3) {
4077 		audio_info->mode_count = edid_caps->audio_mode_count;
4078 
4079 		for (i = 0; i < audio_info->mode_count; ++i) {
4080 			audio_info->modes[i].format_code =
4081 					(enum audio_format_code)
4082 					(edid_caps->audio_modes[i].format_code);
4083 			audio_info->modes[i].channel_count =
4084 					edid_caps->audio_modes[i].channel_count;
4085 			audio_info->modes[i].sample_rates.all =
4086 					edid_caps->audio_modes[i].sample_rate;
4087 			audio_info->modes[i].sample_size =
4088 					edid_caps->audio_modes[i].sample_size;
4089 		}
4090 	}
4091 
4092 	audio_info->flags.all = edid_caps->speaker_flags;
4093 
4094 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4095 	if (drm_connector->latency_present[0]) {
4096 		audio_info->video_latency = drm_connector->video_latency[0];
4097 		audio_info->audio_latency = drm_connector->audio_latency[0];
4098 	}
4099 
4100 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4101 
4102 }
4103 
4104 static void
4105 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4106 				      struct drm_display_mode *dst_mode)
4107 {
4108 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4109 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4110 	dst_mode->crtc_clock = src_mode->crtc_clock;
4111 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4112 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4113 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4114 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4115 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4116 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4117 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4118 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4119 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4120 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4121 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4122 }
4123 
4124 static void
4125 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4126 					const struct drm_display_mode *native_mode,
4127 					bool scale_enabled)
4128 {
4129 	if (scale_enabled) {
4130 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4131 	} else if (native_mode->clock == drm_mode->clock &&
4132 			native_mode->htotal == drm_mode->htotal &&
4133 			native_mode->vtotal == drm_mode->vtotal) {
4134 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4135 	} else {
4136 		/* no scaling nor amdgpu inserted, no need to patch */
4137 	}
4138 }
4139 
4140 static struct dc_sink *
4141 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4142 {
4143 	struct dc_sink_init_data sink_init_data = { 0 };
4144 	struct dc_sink *sink = NULL;
4145 	sink_init_data.link = aconnector->dc_link;
4146 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4147 
4148 	sink = dc_sink_create(&sink_init_data);
4149 	if (!sink) {
4150 		DRM_ERROR("Failed to create sink!\n");
4151 		return NULL;
4152 	}
4153 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4154 
4155 	return sink;
4156 }
4157 
4158 static void set_multisync_trigger_params(
4159 		struct dc_stream_state *stream)
4160 {
4161 	if (stream->triggered_crtc_reset.enabled) {
4162 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4163 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4164 	}
4165 }
4166 
4167 static void set_master_stream(struct dc_stream_state *stream_set[],
4168 			      int stream_count)
4169 {
4170 	int j, highest_rfr = 0, master_stream = 0;
4171 
4172 	for (j = 0;  j < stream_count; j++) {
4173 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4174 			int refresh_rate = 0;
4175 
4176 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4177 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4178 			if (refresh_rate > highest_rfr) {
4179 				highest_rfr = refresh_rate;
4180 				master_stream = j;
4181 			}
4182 		}
4183 	}
4184 	for (j = 0;  j < stream_count; j++) {
4185 		if (stream_set[j])
4186 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4187 	}
4188 }
4189 
4190 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4191 {
4192 	int i = 0;
4193 
4194 	if (context->stream_count < 2)
4195 		return;
4196 	for (i = 0; i < context->stream_count ; i++) {
4197 		if (!context->streams[i])
4198 			continue;
4199 		/*
4200 		 * TODO: add a function to read AMD VSDB bits and set
4201 		 * crtc_sync_master.multi_sync_enabled flag
4202 		 * For now it's set to false
4203 		 */
4204 		set_multisync_trigger_params(context->streams[i]);
4205 	}
4206 	set_master_stream(context->streams, context->stream_count);
4207 }
4208 
4209 static struct dc_stream_state *
4210 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4211 		       const struct drm_display_mode *drm_mode,
4212 		       const struct dm_connector_state *dm_state,
4213 		       const struct dc_stream_state *old_stream)
4214 {
4215 	struct drm_display_mode *preferred_mode = NULL;
4216 	struct drm_connector *drm_connector;
4217 	const struct drm_connector_state *con_state =
4218 		dm_state ? &dm_state->base : NULL;
4219 	struct dc_stream_state *stream = NULL;
4220 	struct drm_display_mode mode = *drm_mode;
4221 	bool native_mode_found = false;
4222 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4223 	int mode_refresh;
4224 	int preferred_refresh = 0;
4225 #if defined(CONFIG_DRM_AMD_DC_DCN)
4226 	struct dsc_dec_dpcd_caps dsc_caps;
4227 #endif
4228 	uint32_t link_bandwidth_kbps;
4229 
4230 	struct dc_sink *sink = NULL;
4231 	if (aconnector == NULL) {
4232 		DRM_ERROR("aconnector is NULL!\n");
4233 		return stream;
4234 	}
4235 
4236 	drm_connector = &aconnector->base;
4237 
4238 	if (!aconnector->dc_sink) {
4239 		sink = create_fake_sink(aconnector);
4240 		if (!sink)
4241 			return stream;
4242 	} else {
4243 		sink = aconnector->dc_sink;
4244 		dc_sink_retain(sink);
4245 	}
4246 
4247 	stream = dc_create_stream_for_sink(sink);
4248 
4249 	if (stream == NULL) {
4250 		DRM_ERROR("Failed to create stream for sink!\n");
4251 		goto finish;
4252 	}
4253 
4254 	stream->dm_stream_context = aconnector;
4255 
4256 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4257 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4258 
4259 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4260 		/* Search for preferred mode */
4261 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4262 			native_mode_found = true;
4263 			break;
4264 		}
4265 	}
4266 	if (!native_mode_found)
4267 		preferred_mode = list_first_entry_or_null(
4268 				&aconnector->base.modes,
4269 				struct drm_display_mode,
4270 				head);
4271 
4272 	mode_refresh = drm_mode_vrefresh(&mode);
4273 
4274 	if (preferred_mode == NULL) {
4275 		/*
4276 		 * This may not be an error, the use case is when we have no
4277 		 * usermode calls to reset and set mode upon hotplug. In this
4278 		 * case, we call set mode ourselves to restore the previous mode
4279 		 * and the modelist may not be filled in in time.
4280 		 */
4281 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4282 	} else {
4283 		decide_crtc_timing_for_drm_display_mode(
4284 				&mode, preferred_mode,
4285 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4286 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4287 	}
4288 
4289 	if (!dm_state)
4290 		drm_mode_set_crtcinfo(&mode, 0);
4291 
4292 	/*
4293 	* If scaling is enabled and refresh rate didn't change
4294 	* we copy the vic and polarities of the old timings
4295 	*/
4296 	if (!scale || mode_refresh != preferred_refresh)
4297 		fill_stream_properties_from_drm_display_mode(stream,
4298 			&mode, &aconnector->base, con_state, NULL);
4299 	else
4300 		fill_stream_properties_from_drm_display_mode(stream,
4301 			&mode, &aconnector->base, con_state, old_stream);
4302 
4303 	stream->timing.flags.DSC = 0;
4304 
4305 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4306 #if defined(CONFIG_DRM_AMD_DC_DCN)
4307 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4308 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4309 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4310 				      &dsc_caps);
4311 #endif
4312 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4313 							     dc_link_get_link_cap(aconnector->dc_link));
4314 
4315 #if defined(CONFIG_DRM_AMD_DC_DCN)
4316 		if (dsc_caps.is_dsc_supported)
4317 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4318 						  &dsc_caps,
4319 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4320 						  link_bandwidth_kbps,
4321 						  &stream->timing,
4322 						  &stream->timing.dsc_cfg))
4323 				stream->timing.flags.DSC = 1;
4324 #endif
4325 	}
4326 
4327 	update_stream_scaling_settings(&mode, dm_state, stream);
4328 
4329 	fill_audio_info(
4330 		&stream->audio_info,
4331 		drm_connector,
4332 		sink);
4333 
4334 	update_stream_signal(stream, sink);
4335 
4336 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4337 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4338 	if (stream->link->psr_feature_enabled)	{
4339 		struct dc  *core_dc = stream->link->ctx->dc;
4340 
4341 		if (dc_is_dmcu_initialized(core_dc)) {
4342 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4343 
4344 			stream->psr_version = dmcu->dmcu_version.psr_version;
4345 
4346 			//
4347 			// should decide stream support vsc sdp colorimetry capability
4348 			// before building vsc info packet
4349 			//
4350 			stream->use_vsc_sdp_for_colorimetry = false;
4351 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4352 				stream->use_vsc_sdp_for_colorimetry =
4353 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4354 			} else {
4355 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4356 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4357 					stream->use_vsc_sdp_for_colorimetry = true;
4358 				}
4359 			}
4360 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4361 		}
4362 	}
4363 finish:
4364 	dc_sink_release(sink);
4365 
4366 	return stream;
4367 }
4368 
4369 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4370 {
4371 	drm_crtc_cleanup(crtc);
4372 	kfree(crtc);
4373 }
4374 
4375 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4376 				  struct drm_crtc_state *state)
4377 {
4378 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4379 
4380 	/* TODO Destroy dc_stream objects are stream object is flattened */
4381 	if (cur->stream)
4382 		dc_stream_release(cur->stream);
4383 
4384 
4385 	__drm_atomic_helper_crtc_destroy_state(state);
4386 
4387 
4388 	kfree(state);
4389 }
4390 
4391 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4392 {
4393 	struct dm_crtc_state *state;
4394 
4395 	if (crtc->state)
4396 		dm_crtc_destroy_state(crtc, crtc->state);
4397 
4398 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4399 	if (WARN_ON(!state))
4400 		return;
4401 
4402 	crtc->state = &state->base;
4403 	crtc->state->crtc = crtc;
4404 
4405 }
4406 
4407 static struct drm_crtc_state *
4408 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4409 {
4410 	struct dm_crtc_state *state, *cur;
4411 
4412 	cur = to_dm_crtc_state(crtc->state);
4413 
4414 	if (WARN_ON(!crtc->state))
4415 		return NULL;
4416 
4417 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4418 	if (!state)
4419 		return NULL;
4420 
4421 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4422 
4423 	if (cur->stream) {
4424 		state->stream = cur->stream;
4425 		dc_stream_retain(state->stream);
4426 	}
4427 
4428 	state->active_planes = cur->active_planes;
4429 	state->interrupts_enabled = cur->interrupts_enabled;
4430 	state->vrr_params = cur->vrr_params;
4431 	state->vrr_infopacket = cur->vrr_infopacket;
4432 	state->abm_level = cur->abm_level;
4433 	state->vrr_supported = cur->vrr_supported;
4434 	state->freesync_config = cur->freesync_config;
4435 	state->crc_src = cur->crc_src;
4436 	state->cm_has_degamma = cur->cm_has_degamma;
4437 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4438 
4439 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4440 
4441 	return &state->base;
4442 }
4443 
4444 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4445 {
4446 	enum dc_irq_source irq_source;
4447 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4448 	struct amdgpu_device *adev = crtc->dev->dev_private;
4449 	int rc;
4450 
4451 	/* Do not set vupdate for DCN hardware */
4452 	if (adev->family > AMDGPU_FAMILY_AI)
4453 		return 0;
4454 
4455 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4456 
4457 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4458 
4459 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4460 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4461 	return rc;
4462 }
4463 
4464 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4465 {
4466 	enum dc_irq_source irq_source;
4467 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4468 	struct amdgpu_device *adev = crtc->dev->dev_private;
4469 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4470 	int rc = 0;
4471 
4472 	if (enable) {
4473 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4474 		if (amdgpu_dm_vrr_active(acrtc_state))
4475 			rc = dm_set_vupdate_irq(crtc, true);
4476 	} else {
4477 		/* vblank irq off -> vupdate irq off */
4478 		rc = dm_set_vupdate_irq(crtc, false);
4479 	}
4480 
4481 	if (rc)
4482 		return rc;
4483 
4484 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4485 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4486 }
4487 
4488 static int dm_enable_vblank(struct drm_crtc *crtc)
4489 {
4490 	return dm_set_vblank(crtc, true);
4491 }
4492 
4493 static void dm_disable_vblank(struct drm_crtc *crtc)
4494 {
4495 	dm_set_vblank(crtc, false);
4496 }
4497 
4498 /* Implemented only the options currently availible for the driver */
4499 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4500 	.reset = dm_crtc_reset_state,
4501 	.destroy = amdgpu_dm_crtc_destroy,
4502 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4503 	.set_config = drm_atomic_helper_set_config,
4504 	.page_flip = drm_atomic_helper_page_flip,
4505 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4506 	.atomic_destroy_state = dm_crtc_destroy_state,
4507 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4508 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4509 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4510 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4511 	.enable_vblank = dm_enable_vblank,
4512 	.disable_vblank = dm_disable_vblank,
4513 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4514 };
4515 
4516 static enum drm_connector_status
4517 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4518 {
4519 	bool connected;
4520 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4521 
4522 	/*
4523 	 * Notes:
4524 	 * 1. This interface is NOT called in context of HPD irq.
4525 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4526 	 * makes it a bad place for *any* MST-related activity.
4527 	 */
4528 
4529 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4530 	    !aconnector->fake_enable)
4531 		connected = (aconnector->dc_sink != NULL);
4532 	else
4533 		connected = (aconnector->base.force == DRM_FORCE_ON);
4534 
4535 	return (connected ? connector_status_connected :
4536 			connector_status_disconnected);
4537 }
4538 
4539 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4540 					    struct drm_connector_state *connector_state,
4541 					    struct drm_property *property,
4542 					    uint64_t val)
4543 {
4544 	struct drm_device *dev = connector->dev;
4545 	struct amdgpu_device *adev = dev->dev_private;
4546 	struct dm_connector_state *dm_old_state =
4547 		to_dm_connector_state(connector->state);
4548 	struct dm_connector_state *dm_new_state =
4549 		to_dm_connector_state(connector_state);
4550 
4551 	int ret = -EINVAL;
4552 
4553 	if (property == dev->mode_config.scaling_mode_property) {
4554 		enum amdgpu_rmx_type rmx_type;
4555 
4556 		switch (val) {
4557 		case DRM_MODE_SCALE_CENTER:
4558 			rmx_type = RMX_CENTER;
4559 			break;
4560 		case DRM_MODE_SCALE_ASPECT:
4561 			rmx_type = RMX_ASPECT;
4562 			break;
4563 		case DRM_MODE_SCALE_FULLSCREEN:
4564 			rmx_type = RMX_FULL;
4565 			break;
4566 		case DRM_MODE_SCALE_NONE:
4567 		default:
4568 			rmx_type = RMX_OFF;
4569 			break;
4570 		}
4571 
4572 		if (dm_old_state->scaling == rmx_type)
4573 			return 0;
4574 
4575 		dm_new_state->scaling = rmx_type;
4576 		ret = 0;
4577 	} else if (property == adev->mode_info.underscan_hborder_property) {
4578 		dm_new_state->underscan_hborder = val;
4579 		ret = 0;
4580 	} else if (property == adev->mode_info.underscan_vborder_property) {
4581 		dm_new_state->underscan_vborder = val;
4582 		ret = 0;
4583 	} else if (property == adev->mode_info.underscan_property) {
4584 		dm_new_state->underscan_enable = val;
4585 		ret = 0;
4586 	} else if (property == adev->mode_info.abm_level_property) {
4587 		dm_new_state->abm_level = val;
4588 		ret = 0;
4589 	}
4590 
4591 	return ret;
4592 }
4593 
4594 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4595 					    const struct drm_connector_state *state,
4596 					    struct drm_property *property,
4597 					    uint64_t *val)
4598 {
4599 	struct drm_device *dev = connector->dev;
4600 	struct amdgpu_device *adev = dev->dev_private;
4601 	struct dm_connector_state *dm_state =
4602 		to_dm_connector_state(state);
4603 	int ret = -EINVAL;
4604 
4605 	if (property == dev->mode_config.scaling_mode_property) {
4606 		switch (dm_state->scaling) {
4607 		case RMX_CENTER:
4608 			*val = DRM_MODE_SCALE_CENTER;
4609 			break;
4610 		case RMX_ASPECT:
4611 			*val = DRM_MODE_SCALE_ASPECT;
4612 			break;
4613 		case RMX_FULL:
4614 			*val = DRM_MODE_SCALE_FULLSCREEN;
4615 			break;
4616 		case RMX_OFF:
4617 		default:
4618 			*val = DRM_MODE_SCALE_NONE;
4619 			break;
4620 		}
4621 		ret = 0;
4622 	} else if (property == adev->mode_info.underscan_hborder_property) {
4623 		*val = dm_state->underscan_hborder;
4624 		ret = 0;
4625 	} else if (property == adev->mode_info.underscan_vborder_property) {
4626 		*val = dm_state->underscan_vborder;
4627 		ret = 0;
4628 	} else if (property == adev->mode_info.underscan_property) {
4629 		*val = dm_state->underscan_enable;
4630 		ret = 0;
4631 	} else if (property == adev->mode_info.abm_level_property) {
4632 		*val = dm_state->abm_level;
4633 		ret = 0;
4634 	}
4635 
4636 	return ret;
4637 }
4638 
4639 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4640 {
4641 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4642 
4643 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4644 }
4645 
4646 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4647 {
4648 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4649 	const struct dc_link *link = aconnector->dc_link;
4650 	struct amdgpu_device *adev = connector->dev->dev_private;
4651 	struct amdgpu_display_manager *dm = &adev->dm;
4652 
4653 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4654 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4655 
4656 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4657 	    link->type != dc_connection_none &&
4658 	    dm->backlight_dev) {
4659 		backlight_device_unregister(dm->backlight_dev);
4660 		dm->backlight_dev = NULL;
4661 	}
4662 #endif
4663 
4664 	if (aconnector->dc_em_sink)
4665 		dc_sink_release(aconnector->dc_em_sink);
4666 	aconnector->dc_em_sink = NULL;
4667 	if (aconnector->dc_sink)
4668 		dc_sink_release(aconnector->dc_sink);
4669 	aconnector->dc_sink = NULL;
4670 
4671 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4672 	drm_connector_unregister(connector);
4673 	drm_connector_cleanup(connector);
4674 	if (aconnector->i2c) {
4675 		i2c_del_adapter(&aconnector->i2c->base);
4676 		kfree(aconnector->i2c);
4677 	}
4678 	kfree(aconnector->dm_dp_aux.aux.name);
4679 
4680 	kfree(connector);
4681 }
4682 
4683 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4684 {
4685 	struct dm_connector_state *state =
4686 		to_dm_connector_state(connector->state);
4687 
4688 	if (connector->state)
4689 		__drm_atomic_helper_connector_destroy_state(connector->state);
4690 
4691 	kfree(state);
4692 
4693 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4694 
4695 	if (state) {
4696 		state->scaling = RMX_OFF;
4697 		state->underscan_enable = false;
4698 		state->underscan_hborder = 0;
4699 		state->underscan_vborder = 0;
4700 		state->base.max_requested_bpc = 8;
4701 		state->vcpi_slots = 0;
4702 		state->pbn = 0;
4703 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4704 			state->abm_level = amdgpu_dm_abm_level;
4705 
4706 		__drm_atomic_helper_connector_reset(connector, &state->base);
4707 	}
4708 }
4709 
4710 struct drm_connector_state *
4711 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4712 {
4713 	struct dm_connector_state *state =
4714 		to_dm_connector_state(connector->state);
4715 
4716 	struct dm_connector_state *new_state =
4717 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4718 
4719 	if (!new_state)
4720 		return NULL;
4721 
4722 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4723 
4724 	new_state->freesync_capable = state->freesync_capable;
4725 	new_state->abm_level = state->abm_level;
4726 	new_state->scaling = state->scaling;
4727 	new_state->underscan_enable = state->underscan_enable;
4728 	new_state->underscan_hborder = state->underscan_hborder;
4729 	new_state->underscan_vborder = state->underscan_vborder;
4730 	new_state->vcpi_slots = state->vcpi_slots;
4731 	new_state->pbn = state->pbn;
4732 	return &new_state->base;
4733 }
4734 
4735 static int
4736 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4737 {
4738 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4739 		to_amdgpu_dm_connector(connector);
4740 	int r;
4741 
4742 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4743 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4744 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4745 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4746 		if (r)
4747 			return r;
4748 	}
4749 
4750 #if defined(CONFIG_DEBUG_FS)
4751 	connector_debugfs_init(amdgpu_dm_connector);
4752 #endif
4753 
4754 	return 0;
4755 }
4756 
4757 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4758 	.reset = amdgpu_dm_connector_funcs_reset,
4759 	.detect = amdgpu_dm_connector_detect,
4760 	.fill_modes = drm_helper_probe_single_connector_modes,
4761 	.destroy = amdgpu_dm_connector_destroy,
4762 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4763 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4764 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4765 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4766 	.late_register = amdgpu_dm_connector_late_register,
4767 	.early_unregister = amdgpu_dm_connector_unregister
4768 };
4769 
4770 static int get_modes(struct drm_connector *connector)
4771 {
4772 	return amdgpu_dm_connector_get_modes(connector);
4773 }
4774 
4775 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4776 {
4777 	struct dc_sink_init_data init_params = {
4778 			.link = aconnector->dc_link,
4779 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4780 	};
4781 	struct edid *edid;
4782 
4783 	if (!aconnector->base.edid_blob_ptr) {
4784 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4785 				aconnector->base.name);
4786 
4787 		aconnector->base.force = DRM_FORCE_OFF;
4788 		aconnector->base.override_edid = false;
4789 		return;
4790 	}
4791 
4792 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4793 
4794 	aconnector->edid = edid;
4795 
4796 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4797 		aconnector->dc_link,
4798 		(uint8_t *)edid,
4799 		(edid->extensions + 1) * EDID_LENGTH,
4800 		&init_params);
4801 
4802 	if (aconnector->base.force == DRM_FORCE_ON) {
4803 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4804 		aconnector->dc_link->local_sink :
4805 		aconnector->dc_em_sink;
4806 		dc_sink_retain(aconnector->dc_sink);
4807 	}
4808 }
4809 
4810 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4811 {
4812 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4813 
4814 	/*
4815 	 * In case of headless boot with force on for DP managed connector
4816 	 * Those settings have to be != 0 to get initial modeset
4817 	 */
4818 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4819 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4820 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4821 	}
4822 
4823 
4824 	aconnector->base.override_edid = true;
4825 	create_eml_sink(aconnector);
4826 }
4827 
4828 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4829 				   struct drm_display_mode *mode)
4830 {
4831 	int result = MODE_ERROR;
4832 	struct dc_sink *dc_sink;
4833 	struct amdgpu_device *adev = connector->dev->dev_private;
4834 	/* TODO: Unhardcode stream count */
4835 	struct dc_stream_state *stream;
4836 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4837 	enum dc_status dc_result = DC_OK;
4838 
4839 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4840 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4841 		return result;
4842 
4843 	/*
4844 	 * Only run this the first time mode_valid is called to initilialize
4845 	 * EDID mgmt
4846 	 */
4847 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4848 		!aconnector->dc_em_sink)
4849 		handle_edid_mgmt(aconnector);
4850 
4851 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4852 
4853 	if (dc_sink == NULL) {
4854 		DRM_ERROR("dc_sink is NULL!\n");
4855 		goto fail;
4856 	}
4857 
4858 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4859 	if (stream == NULL) {
4860 		DRM_ERROR("Failed to create stream for sink!\n");
4861 		goto fail;
4862 	}
4863 
4864 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4865 
4866 	if (dc_result == DC_OK)
4867 		result = MODE_OK;
4868 	else
4869 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4870 			      mode->hdisplay,
4871 			      mode->vdisplay,
4872 			      mode->clock,
4873 			      dc_result);
4874 
4875 	dc_stream_release(stream);
4876 
4877 fail:
4878 	/* TODO: error handling*/
4879 	return result;
4880 }
4881 
4882 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4883 				struct dc_info_packet *out)
4884 {
4885 	struct hdmi_drm_infoframe frame;
4886 	unsigned char buf[30]; /* 26 + 4 */
4887 	ssize_t len;
4888 	int ret, i;
4889 
4890 	memset(out, 0, sizeof(*out));
4891 
4892 	if (!state->hdr_output_metadata)
4893 		return 0;
4894 
4895 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4896 	if (ret)
4897 		return ret;
4898 
4899 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4900 	if (len < 0)
4901 		return (int)len;
4902 
4903 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4904 	if (len != 30)
4905 		return -EINVAL;
4906 
4907 	/* Prepare the infopacket for DC. */
4908 	switch (state->connector->connector_type) {
4909 	case DRM_MODE_CONNECTOR_HDMIA:
4910 		out->hb0 = 0x87; /* type */
4911 		out->hb1 = 0x01; /* version */
4912 		out->hb2 = 0x1A; /* length */
4913 		out->sb[0] = buf[3]; /* checksum */
4914 		i = 1;
4915 		break;
4916 
4917 	case DRM_MODE_CONNECTOR_DisplayPort:
4918 	case DRM_MODE_CONNECTOR_eDP:
4919 		out->hb0 = 0x00; /* sdp id, zero */
4920 		out->hb1 = 0x87; /* type */
4921 		out->hb2 = 0x1D; /* payload len - 1 */
4922 		out->hb3 = (0x13 << 2); /* sdp version */
4923 		out->sb[0] = 0x01; /* version */
4924 		out->sb[1] = 0x1A; /* length */
4925 		i = 2;
4926 		break;
4927 
4928 	default:
4929 		return -EINVAL;
4930 	}
4931 
4932 	memcpy(&out->sb[i], &buf[4], 26);
4933 	out->valid = true;
4934 
4935 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4936 		       sizeof(out->sb), false);
4937 
4938 	return 0;
4939 }
4940 
4941 static bool
4942 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4943 			  const struct drm_connector_state *new_state)
4944 {
4945 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4946 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4947 
4948 	if (old_blob != new_blob) {
4949 		if (old_blob && new_blob &&
4950 		    old_blob->length == new_blob->length)
4951 			return memcmp(old_blob->data, new_blob->data,
4952 				      old_blob->length);
4953 
4954 		return true;
4955 	}
4956 
4957 	return false;
4958 }
4959 
4960 static int
4961 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4962 				 struct drm_atomic_state *state)
4963 {
4964 	struct drm_connector_state *new_con_state =
4965 		drm_atomic_get_new_connector_state(state, conn);
4966 	struct drm_connector_state *old_con_state =
4967 		drm_atomic_get_old_connector_state(state, conn);
4968 	struct drm_crtc *crtc = new_con_state->crtc;
4969 	struct drm_crtc_state *new_crtc_state;
4970 	int ret;
4971 
4972 	if (!crtc)
4973 		return 0;
4974 
4975 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4976 		struct dc_info_packet hdr_infopacket;
4977 
4978 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4979 		if (ret)
4980 			return ret;
4981 
4982 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4983 		if (IS_ERR(new_crtc_state))
4984 			return PTR_ERR(new_crtc_state);
4985 
4986 		/*
4987 		 * DC considers the stream backends changed if the
4988 		 * static metadata changes. Forcing the modeset also
4989 		 * gives a simple way for userspace to switch from
4990 		 * 8bpc to 10bpc when setting the metadata to enter
4991 		 * or exit HDR.
4992 		 *
4993 		 * Changing the static metadata after it's been
4994 		 * set is permissible, however. So only force a
4995 		 * modeset if we're entering or exiting HDR.
4996 		 */
4997 		new_crtc_state->mode_changed =
4998 			!old_con_state->hdr_output_metadata ||
4999 			!new_con_state->hdr_output_metadata;
5000 	}
5001 
5002 	return 0;
5003 }
5004 
5005 static const struct drm_connector_helper_funcs
5006 amdgpu_dm_connector_helper_funcs = {
5007 	/*
5008 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5009 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5010 	 * are missing after user start lightdm. So we need to renew modes list.
5011 	 * in get_modes call back, not just return the modes count
5012 	 */
5013 	.get_modes = get_modes,
5014 	.mode_valid = amdgpu_dm_connector_mode_valid,
5015 	.atomic_check = amdgpu_dm_connector_atomic_check,
5016 };
5017 
5018 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5019 {
5020 }
5021 
5022 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5023 {
5024 	struct drm_device *dev = new_crtc_state->crtc->dev;
5025 	struct drm_plane *plane;
5026 
5027 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5028 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5029 			return true;
5030 	}
5031 
5032 	return false;
5033 }
5034 
5035 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5036 {
5037 	struct drm_atomic_state *state = new_crtc_state->state;
5038 	struct drm_plane *plane;
5039 	int num_active = 0;
5040 
5041 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5042 		struct drm_plane_state *new_plane_state;
5043 
5044 		/* Cursor planes are "fake". */
5045 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5046 			continue;
5047 
5048 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5049 
5050 		if (!new_plane_state) {
5051 			/*
5052 			 * The plane is enable on the CRTC and hasn't changed
5053 			 * state. This means that it previously passed
5054 			 * validation and is therefore enabled.
5055 			 */
5056 			num_active += 1;
5057 			continue;
5058 		}
5059 
5060 		/* We need a framebuffer to be considered enabled. */
5061 		num_active += (new_plane_state->fb != NULL);
5062 	}
5063 
5064 	return num_active;
5065 }
5066 
5067 /*
5068  * Sets whether interrupts should be enabled on a specific CRTC.
5069  * We require that the stream be enabled and that there exist active
5070  * DC planes on the stream.
5071  */
5072 static void
5073 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5074 			       struct drm_crtc_state *new_crtc_state)
5075 {
5076 	struct dm_crtc_state *dm_new_crtc_state =
5077 		to_dm_crtc_state(new_crtc_state);
5078 
5079 	dm_new_crtc_state->active_planes = 0;
5080 	dm_new_crtc_state->interrupts_enabled = false;
5081 
5082 	if (!dm_new_crtc_state->stream)
5083 		return;
5084 
5085 	dm_new_crtc_state->active_planes =
5086 		count_crtc_active_planes(new_crtc_state);
5087 
5088 	dm_new_crtc_state->interrupts_enabled =
5089 		dm_new_crtc_state->active_planes > 0;
5090 }
5091 
5092 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5093 				       struct drm_crtc_state *state)
5094 {
5095 	struct amdgpu_device *adev = crtc->dev->dev_private;
5096 	struct dc *dc = adev->dm.dc;
5097 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5098 	int ret = -EINVAL;
5099 
5100 	/*
5101 	 * Update interrupt state for the CRTC. This needs to happen whenever
5102 	 * the CRTC has changed or whenever any of its planes have changed.
5103 	 * Atomic check satisfies both of these requirements since the CRTC
5104 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5105 	 */
5106 	dm_update_crtc_interrupt_state(crtc, state);
5107 
5108 	if (unlikely(!dm_crtc_state->stream &&
5109 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5110 		WARN_ON(1);
5111 		return ret;
5112 	}
5113 
5114 	/* In some use cases, like reset, no stream is attached */
5115 	if (!dm_crtc_state->stream)
5116 		return 0;
5117 
5118 	/*
5119 	 * We want at least one hardware plane enabled to use
5120 	 * the stream with a cursor enabled.
5121 	 */
5122 	if (state->enable && state->active &&
5123 	    does_crtc_have_active_cursor(state) &&
5124 	    dm_crtc_state->active_planes == 0)
5125 		return -EINVAL;
5126 
5127 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5128 		return 0;
5129 
5130 	return ret;
5131 }
5132 
5133 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5134 				      const struct drm_display_mode *mode,
5135 				      struct drm_display_mode *adjusted_mode)
5136 {
5137 	return true;
5138 }
5139 
5140 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5141 	.disable = dm_crtc_helper_disable,
5142 	.atomic_check = dm_crtc_helper_atomic_check,
5143 	.mode_fixup = dm_crtc_helper_mode_fixup,
5144 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5145 };
5146 
5147 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5148 {
5149 
5150 }
5151 
5152 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5153 {
5154 	switch (display_color_depth) {
5155 		case COLOR_DEPTH_666:
5156 			return 6;
5157 		case COLOR_DEPTH_888:
5158 			return 8;
5159 		case COLOR_DEPTH_101010:
5160 			return 10;
5161 		case COLOR_DEPTH_121212:
5162 			return 12;
5163 		case COLOR_DEPTH_141414:
5164 			return 14;
5165 		case COLOR_DEPTH_161616:
5166 			return 16;
5167 		default:
5168 			break;
5169 		}
5170 	return 0;
5171 }
5172 
5173 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5174 					  struct drm_crtc_state *crtc_state,
5175 					  struct drm_connector_state *conn_state)
5176 {
5177 	struct drm_atomic_state *state = crtc_state->state;
5178 	struct drm_connector *connector = conn_state->connector;
5179 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5180 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5181 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5182 	struct drm_dp_mst_topology_mgr *mst_mgr;
5183 	struct drm_dp_mst_port *mst_port;
5184 	enum dc_color_depth color_depth;
5185 	int clock, bpp = 0;
5186 	bool is_y420 = false;
5187 
5188 	if (!aconnector->port || !aconnector->dc_sink)
5189 		return 0;
5190 
5191 	mst_port = aconnector->port;
5192 	mst_mgr = &aconnector->mst_port->mst_mgr;
5193 
5194 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5195 		return 0;
5196 
5197 	if (!state->duplicated) {
5198 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5199 				aconnector->force_yuv420_output;
5200 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5201 								    is_y420);
5202 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5203 		clock = adjusted_mode->clock;
5204 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5205 	}
5206 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5207 									   mst_mgr,
5208 									   mst_port,
5209 									   dm_new_connector_state->pbn,
5210 									   0);
5211 	if (dm_new_connector_state->vcpi_slots < 0) {
5212 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5213 		return dm_new_connector_state->vcpi_slots;
5214 	}
5215 	return 0;
5216 }
5217 
5218 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5219 	.disable = dm_encoder_helper_disable,
5220 	.atomic_check = dm_encoder_helper_atomic_check
5221 };
5222 
5223 #if defined(CONFIG_DRM_AMD_DC_DCN)
5224 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5225 					    struct dc_state *dc_state)
5226 {
5227 	struct dc_stream_state *stream = NULL;
5228 	struct drm_connector *connector;
5229 	struct drm_connector_state *new_con_state, *old_con_state;
5230 	struct amdgpu_dm_connector *aconnector;
5231 	struct dm_connector_state *dm_conn_state;
5232 	int i, j, clock, bpp;
5233 	int vcpi, pbn_div, pbn = 0;
5234 
5235 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5236 
5237 		aconnector = to_amdgpu_dm_connector(connector);
5238 
5239 		if (!aconnector->port)
5240 			continue;
5241 
5242 		if (!new_con_state || !new_con_state->crtc)
5243 			continue;
5244 
5245 		dm_conn_state = to_dm_connector_state(new_con_state);
5246 
5247 		for (j = 0; j < dc_state->stream_count; j++) {
5248 			stream = dc_state->streams[j];
5249 			if (!stream)
5250 				continue;
5251 
5252 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5253 				break;
5254 
5255 			stream = NULL;
5256 		}
5257 
5258 		if (!stream)
5259 			continue;
5260 
5261 		if (stream->timing.flags.DSC != 1) {
5262 			drm_dp_mst_atomic_enable_dsc(state,
5263 						     aconnector->port,
5264 						     dm_conn_state->pbn,
5265 						     0,
5266 						     false);
5267 			continue;
5268 		}
5269 
5270 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5271 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5272 		clock = stream->timing.pix_clk_100hz / 10;
5273 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5274 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5275 						    aconnector->port,
5276 						    pbn, pbn_div,
5277 						    true);
5278 		if (vcpi < 0)
5279 			return vcpi;
5280 
5281 		dm_conn_state->pbn = pbn;
5282 		dm_conn_state->vcpi_slots = vcpi;
5283 	}
5284 	return 0;
5285 }
5286 #endif
5287 
5288 static void dm_drm_plane_reset(struct drm_plane *plane)
5289 {
5290 	struct dm_plane_state *amdgpu_state = NULL;
5291 
5292 	if (plane->state)
5293 		plane->funcs->atomic_destroy_state(plane, plane->state);
5294 
5295 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5296 	WARN_ON(amdgpu_state == NULL);
5297 
5298 	if (amdgpu_state)
5299 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5300 }
5301 
5302 static struct drm_plane_state *
5303 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5304 {
5305 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5306 
5307 	old_dm_plane_state = to_dm_plane_state(plane->state);
5308 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5309 	if (!dm_plane_state)
5310 		return NULL;
5311 
5312 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5313 
5314 	if (old_dm_plane_state->dc_state) {
5315 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5316 		dc_plane_state_retain(dm_plane_state->dc_state);
5317 	}
5318 
5319 	return &dm_plane_state->base;
5320 }
5321 
5322 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5323 				struct drm_plane_state *state)
5324 {
5325 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5326 
5327 	if (dm_plane_state->dc_state)
5328 		dc_plane_state_release(dm_plane_state->dc_state);
5329 
5330 	drm_atomic_helper_plane_destroy_state(plane, state);
5331 }
5332 
5333 static const struct drm_plane_funcs dm_plane_funcs = {
5334 	.update_plane	= drm_atomic_helper_update_plane,
5335 	.disable_plane	= drm_atomic_helper_disable_plane,
5336 	.destroy	= drm_primary_helper_destroy,
5337 	.reset = dm_drm_plane_reset,
5338 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5339 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5340 };
5341 
5342 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5343 				      struct drm_plane_state *new_state)
5344 {
5345 	struct amdgpu_framebuffer *afb;
5346 	struct drm_gem_object *obj;
5347 	struct amdgpu_device *adev;
5348 	struct amdgpu_bo *rbo;
5349 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5350 	struct list_head list;
5351 	struct ttm_validate_buffer tv;
5352 	struct ww_acquire_ctx ticket;
5353 	uint64_t tiling_flags;
5354 	uint32_t domain;
5355 	int r;
5356 	bool force_disable_dcc = false;
5357 
5358 	dm_plane_state_old = to_dm_plane_state(plane->state);
5359 	dm_plane_state_new = to_dm_plane_state(new_state);
5360 
5361 	if (!new_state->fb) {
5362 		DRM_DEBUG_DRIVER("No FB bound\n");
5363 		return 0;
5364 	}
5365 
5366 	afb = to_amdgpu_framebuffer(new_state->fb);
5367 	obj = new_state->fb->obj[0];
5368 	rbo = gem_to_amdgpu_bo(obj);
5369 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5370 	INIT_LIST_HEAD(&list);
5371 
5372 	tv.bo = &rbo->tbo;
5373 	tv.num_shared = 1;
5374 	list_add(&tv.head, &list);
5375 
5376 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5377 	if (r) {
5378 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5379 		return r;
5380 	}
5381 
5382 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5383 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5384 	else
5385 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5386 
5387 	r = amdgpu_bo_pin(rbo, domain);
5388 	if (unlikely(r != 0)) {
5389 		if (r != -ERESTARTSYS)
5390 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5391 		ttm_eu_backoff_reservation(&ticket, &list);
5392 		return r;
5393 	}
5394 
5395 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5396 	if (unlikely(r != 0)) {
5397 		amdgpu_bo_unpin(rbo);
5398 		ttm_eu_backoff_reservation(&ticket, &list);
5399 		DRM_ERROR("%p bind failed\n", rbo);
5400 		return r;
5401 	}
5402 
5403 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5404 
5405 	ttm_eu_backoff_reservation(&ticket, &list);
5406 
5407 	afb->address = amdgpu_bo_gpu_offset(rbo);
5408 
5409 	amdgpu_bo_ref(rbo);
5410 
5411 	if (dm_plane_state_new->dc_state &&
5412 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5413 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5414 
5415 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5416 		fill_plane_buffer_attributes(
5417 			adev, afb, plane_state->format, plane_state->rotation,
5418 			tiling_flags, &plane_state->tiling_info,
5419 			&plane_state->plane_size, &plane_state->dcc,
5420 			&plane_state->address,
5421 			force_disable_dcc);
5422 	}
5423 
5424 	return 0;
5425 }
5426 
5427 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5428 				       struct drm_plane_state *old_state)
5429 {
5430 	struct amdgpu_bo *rbo;
5431 	int r;
5432 
5433 	if (!old_state->fb)
5434 		return;
5435 
5436 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5437 	r = amdgpu_bo_reserve(rbo, false);
5438 	if (unlikely(r)) {
5439 		DRM_ERROR("failed to reserve rbo before unpin\n");
5440 		return;
5441 	}
5442 
5443 	amdgpu_bo_unpin(rbo);
5444 	amdgpu_bo_unreserve(rbo);
5445 	amdgpu_bo_unref(&rbo);
5446 }
5447 
5448 static int dm_plane_atomic_check(struct drm_plane *plane,
5449 				 struct drm_plane_state *state)
5450 {
5451 	struct amdgpu_device *adev = plane->dev->dev_private;
5452 	struct dc *dc = adev->dm.dc;
5453 	struct dm_plane_state *dm_plane_state;
5454 	struct dc_scaling_info scaling_info;
5455 	int ret;
5456 
5457 	dm_plane_state = to_dm_plane_state(state);
5458 
5459 	if (!dm_plane_state->dc_state)
5460 		return 0;
5461 
5462 	ret = fill_dc_scaling_info(state, &scaling_info);
5463 	if (ret)
5464 		return ret;
5465 
5466 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5467 		return 0;
5468 
5469 	return -EINVAL;
5470 }
5471 
5472 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5473 				       struct drm_plane_state *new_plane_state)
5474 {
5475 	/* Only support async updates on cursor planes. */
5476 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5477 		return -EINVAL;
5478 
5479 	return 0;
5480 }
5481 
5482 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5483 					 struct drm_plane_state *new_state)
5484 {
5485 	struct drm_plane_state *old_state =
5486 		drm_atomic_get_old_plane_state(new_state->state, plane);
5487 
5488 	swap(plane->state->fb, new_state->fb);
5489 
5490 	plane->state->src_x = new_state->src_x;
5491 	plane->state->src_y = new_state->src_y;
5492 	plane->state->src_w = new_state->src_w;
5493 	plane->state->src_h = new_state->src_h;
5494 	plane->state->crtc_x = new_state->crtc_x;
5495 	plane->state->crtc_y = new_state->crtc_y;
5496 	plane->state->crtc_w = new_state->crtc_w;
5497 	plane->state->crtc_h = new_state->crtc_h;
5498 
5499 	handle_cursor_update(plane, old_state);
5500 }
5501 
5502 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5503 	.prepare_fb = dm_plane_helper_prepare_fb,
5504 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5505 	.atomic_check = dm_plane_atomic_check,
5506 	.atomic_async_check = dm_plane_atomic_async_check,
5507 	.atomic_async_update = dm_plane_atomic_async_update
5508 };
5509 
5510 /*
5511  * TODO: these are currently initialized to rgb formats only.
5512  * For future use cases we should either initialize them dynamically based on
5513  * plane capabilities, or initialize this array to all formats, so internal drm
5514  * check will succeed, and let DC implement proper check
5515  */
5516 static const uint32_t rgb_formats[] = {
5517 	DRM_FORMAT_XRGB8888,
5518 	DRM_FORMAT_ARGB8888,
5519 	DRM_FORMAT_RGBA8888,
5520 	DRM_FORMAT_XRGB2101010,
5521 	DRM_FORMAT_XBGR2101010,
5522 	DRM_FORMAT_ARGB2101010,
5523 	DRM_FORMAT_ABGR2101010,
5524 	DRM_FORMAT_XBGR8888,
5525 	DRM_FORMAT_ABGR8888,
5526 	DRM_FORMAT_RGB565,
5527 };
5528 
5529 static const uint32_t overlay_formats[] = {
5530 	DRM_FORMAT_XRGB8888,
5531 	DRM_FORMAT_ARGB8888,
5532 	DRM_FORMAT_RGBA8888,
5533 	DRM_FORMAT_XBGR8888,
5534 	DRM_FORMAT_ABGR8888,
5535 	DRM_FORMAT_RGB565
5536 };
5537 
5538 static const u32 cursor_formats[] = {
5539 	DRM_FORMAT_ARGB8888
5540 };
5541 
5542 static int get_plane_formats(const struct drm_plane *plane,
5543 			     const struct dc_plane_cap *plane_cap,
5544 			     uint32_t *formats, int max_formats)
5545 {
5546 	int i, num_formats = 0;
5547 
5548 	/*
5549 	 * TODO: Query support for each group of formats directly from
5550 	 * DC plane caps. This will require adding more formats to the
5551 	 * caps list.
5552 	 */
5553 
5554 	switch (plane->type) {
5555 	case DRM_PLANE_TYPE_PRIMARY:
5556 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5557 			if (num_formats >= max_formats)
5558 				break;
5559 
5560 			formats[num_formats++] = rgb_formats[i];
5561 		}
5562 
5563 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5564 			formats[num_formats++] = DRM_FORMAT_NV12;
5565 		if (plane_cap && plane_cap->pixel_format_support.p010)
5566 			formats[num_formats++] = DRM_FORMAT_P010;
5567 		break;
5568 
5569 	case DRM_PLANE_TYPE_OVERLAY:
5570 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5571 			if (num_formats >= max_formats)
5572 				break;
5573 
5574 			formats[num_formats++] = overlay_formats[i];
5575 		}
5576 		break;
5577 
5578 	case DRM_PLANE_TYPE_CURSOR:
5579 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5580 			if (num_formats >= max_formats)
5581 				break;
5582 
5583 			formats[num_formats++] = cursor_formats[i];
5584 		}
5585 		break;
5586 	}
5587 
5588 	return num_formats;
5589 }
5590 
5591 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5592 				struct drm_plane *plane,
5593 				unsigned long possible_crtcs,
5594 				const struct dc_plane_cap *plane_cap)
5595 {
5596 	uint32_t formats[32];
5597 	int num_formats;
5598 	int res = -EPERM;
5599 
5600 	num_formats = get_plane_formats(plane, plane_cap, formats,
5601 					ARRAY_SIZE(formats));
5602 
5603 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5604 				       &dm_plane_funcs, formats, num_formats,
5605 				       NULL, plane->type, NULL);
5606 	if (res)
5607 		return res;
5608 
5609 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5610 	    plane_cap && plane_cap->per_pixel_alpha) {
5611 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5612 					  BIT(DRM_MODE_BLEND_PREMULTI);
5613 
5614 		drm_plane_create_alpha_property(plane);
5615 		drm_plane_create_blend_mode_property(plane, blend_caps);
5616 	}
5617 
5618 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5619 	    plane_cap &&
5620 	    (plane_cap->pixel_format_support.nv12 ||
5621 	     plane_cap->pixel_format_support.p010)) {
5622 		/* This only affects YUV formats. */
5623 		drm_plane_create_color_properties(
5624 			plane,
5625 			BIT(DRM_COLOR_YCBCR_BT601) |
5626 			BIT(DRM_COLOR_YCBCR_BT709) |
5627 			BIT(DRM_COLOR_YCBCR_BT2020),
5628 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5629 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5630 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5631 	}
5632 
5633 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5634 
5635 	/* Create (reset) the plane state */
5636 	if (plane->funcs->reset)
5637 		plane->funcs->reset(plane);
5638 
5639 	return 0;
5640 }
5641 
5642 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5643 			       struct drm_plane *plane,
5644 			       uint32_t crtc_index)
5645 {
5646 	struct amdgpu_crtc *acrtc = NULL;
5647 	struct drm_plane *cursor_plane;
5648 
5649 	int res = -ENOMEM;
5650 
5651 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5652 	if (!cursor_plane)
5653 		goto fail;
5654 
5655 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5656 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5657 
5658 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5659 	if (!acrtc)
5660 		goto fail;
5661 
5662 	res = drm_crtc_init_with_planes(
5663 			dm->ddev,
5664 			&acrtc->base,
5665 			plane,
5666 			cursor_plane,
5667 			&amdgpu_dm_crtc_funcs, NULL);
5668 
5669 	if (res)
5670 		goto fail;
5671 
5672 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5673 
5674 	/* Create (reset) the plane state */
5675 	if (acrtc->base.funcs->reset)
5676 		acrtc->base.funcs->reset(&acrtc->base);
5677 
5678 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5679 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5680 
5681 	acrtc->crtc_id = crtc_index;
5682 	acrtc->base.enabled = false;
5683 	acrtc->otg_inst = -1;
5684 
5685 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5686 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5687 				   true, MAX_COLOR_LUT_ENTRIES);
5688 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5689 
5690 	return 0;
5691 
5692 fail:
5693 	kfree(acrtc);
5694 	kfree(cursor_plane);
5695 	return res;
5696 }
5697 
5698 
5699 static int to_drm_connector_type(enum signal_type st)
5700 {
5701 	switch (st) {
5702 	case SIGNAL_TYPE_HDMI_TYPE_A:
5703 		return DRM_MODE_CONNECTOR_HDMIA;
5704 	case SIGNAL_TYPE_EDP:
5705 		return DRM_MODE_CONNECTOR_eDP;
5706 	case SIGNAL_TYPE_LVDS:
5707 		return DRM_MODE_CONNECTOR_LVDS;
5708 	case SIGNAL_TYPE_RGB:
5709 		return DRM_MODE_CONNECTOR_VGA;
5710 	case SIGNAL_TYPE_DISPLAY_PORT:
5711 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5712 		return DRM_MODE_CONNECTOR_DisplayPort;
5713 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5714 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5715 		return DRM_MODE_CONNECTOR_DVID;
5716 	case SIGNAL_TYPE_VIRTUAL:
5717 		return DRM_MODE_CONNECTOR_VIRTUAL;
5718 
5719 	default:
5720 		return DRM_MODE_CONNECTOR_Unknown;
5721 	}
5722 }
5723 
5724 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5725 {
5726 	struct drm_encoder *encoder;
5727 
5728 	/* There is only one encoder per connector */
5729 	drm_connector_for_each_possible_encoder(connector, encoder)
5730 		return encoder;
5731 
5732 	return NULL;
5733 }
5734 
5735 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5736 {
5737 	struct drm_encoder *encoder;
5738 	struct amdgpu_encoder *amdgpu_encoder;
5739 
5740 	encoder = amdgpu_dm_connector_to_encoder(connector);
5741 
5742 	if (encoder == NULL)
5743 		return;
5744 
5745 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5746 
5747 	amdgpu_encoder->native_mode.clock = 0;
5748 
5749 	if (!list_empty(&connector->probed_modes)) {
5750 		struct drm_display_mode *preferred_mode = NULL;
5751 
5752 		list_for_each_entry(preferred_mode,
5753 				    &connector->probed_modes,
5754 				    head) {
5755 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5756 				amdgpu_encoder->native_mode = *preferred_mode;
5757 
5758 			break;
5759 		}
5760 
5761 	}
5762 }
5763 
5764 static struct drm_display_mode *
5765 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5766 			     char *name,
5767 			     int hdisplay, int vdisplay)
5768 {
5769 	struct drm_device *dev = encoder->dev;
5770 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5771 	struct drm_display_mode *mode = NULL;
5772 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5773 
5774 	mode = drm_mode_duplicate(dev, native_mode);
5775 
5776 	if (mode == NULL)
5777 		return NULL;
5778 
5779 	mode->hdisplay = hdisplay;
5780 	mode->vdisplay = vdisplay;
5781 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5782 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5783 
5784 	return mode;
5785 
5786 }
5787 
5788 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5789 						 struct drm_connector *connector)
5790 {
5791 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5792 	struct drm_display_mode *mode = NULL;
5793 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5794 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5795 				to_amdgpu_dm_connector(connector);
5796 	int i;
5797 	int n;
5798 	struct mode_size {
5799 		char name[DRM_DISPLAY_MODE_LEN];
5800 		int w;
5801 		int h;
5802 	} common_modes[] = {
5803 		{  "640x480",  640,  480},
5804 		{  "800x600",  800,  600},
5805 		{ "1024x768", 1024,  768},
5806 		{ "1280x720", 1280,  720},
5807 		{ "1280x800", 1280,  800},
5808 		{"1280x1024", 1280, 1024},
5809 		{ "1440x900", 1440,  900},
5810 		{"1680x1050", 1680, 1050},
5811 		{"1600x1200", 1600, 1200},
5812 		{"1920x1080", 1920, 1080},
5813 		{"1920x1200", 1920, 1200}
5814 	};
5815 
5816 	n = ARRAY_SIZE(common_modes);
5817 
5818 	for (i = 0; i < n; i++) {
5819 		struct drm_display_mode *curmode = NULL;
5820 		bool mode_existed = false;
5821 
5822 		if (common_modes[i].w > native_mode->hdisplay ||
5823 		    common_modes[i].h > native_mode->vdisplay ||
5824 		   (common_modes[i].w == native_mode->hdisplay &&
5825 		    common_modes[i].h == native_mode->vdisplay))
5826 			continue;
5827 
5828 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5829 			if (common_modes[i].w == curmode->hdisplay &&
5830 			    common_modes[i].h == curmode->vdisplay) {
5831 				mode_existed = true;
5832 				break;
5833 			}
5834 		}
5835 
5836 		if (mode_existed)
5837 			continue;
5838 
5839 		mode = amdgpu_dm_create_common_mode(encoder,
5840 				common_modes[i].name, common_modes[i].w,
5841 				common_modes[i].h);
5842 		drm_mode_probed_add(connector, mode);
5843 		amdgpu_dm_connector->num_modes++;
5844 	}
5845 }
5846 
5847 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5848 					      struct edid *edid)
5849 {
5850 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5851 			to_amdgpu_dm_connector(connector);
5852 
5853 	if (edid) {
5854 		/* empty probed_modes */
5855 		INIT_LIST_HEAD(&connector->probed_modes);
5856 		amdgpu_dm_connector->num_modes =
5857 				drm_add_edid_modes(connector, edid);
5858 
5859 		/* sorting the probed modes before calling function
5860 		 * amdgpu_dm_get_native_mode() since EDID can have
5861 		 * more than one preferred mode. The modes that are
5862 		 * later in the probed mode list could be of higher
5863 		 * and preferred resolution. For example, 3840x2160
5864 		 * resolution in base EDID preferred timing and 4096x2160
5865 		 * preferred resolution in DID extension block later.
5866 		 */
5867 		drm_mode_sort(&connector->probed_modes);
5868 		amdgpu_dm_get_native_mode(connector);
5869 	} else {
5870 		amdgpu_dm_connector->num_modes = 0;
5871 	}
5872 }
5873 
5874 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5875 {
5876 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5877 			to_amdgpu_dm_connector(connector);
5878 	struct drm_encoder *encoder;
5879 	struct edid *edid = amdgpu_dm_connector->edid;
5880 
5881 	encoder = amdgpu_dm_connector_to_encoder(connector);
5882 
5883 	if (!edid || !drm_edid_is_valid(edid)) {
5884 		amdgpu_dm_connector->num_modes =
5885 				drm_add_modes_noedid(connector, 640, 480);
5886 	} else {
5887 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5888 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5889 	}
5890 	amdgpu_dm_fbc_init(connector);
5891 
5892 	return amdgpu_dm_connector->num_modes;
5893 }
5894 
5895 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5896 				     struct amdgpu_dm_connector *aconnector,
5897 				     int connector_type,
5898 				     struct dc_link *link,
5899 				     int link_index)
5900 {
5901 	struct amdgpu_device *adev = dm->ddev->dev_private;
5902 
5903 	/*
5904 	 * Some of the properties below require access to state, like bpc.
5905 	 * Allocate some default initial connector state with our reset helper.
5906 	 */
5907 	if (aconnector->base.funcs->reset)
5908 		aconnector->base.funcs->reset(&aconnector->base);
5909 
5910 	aconnector->connector_id = link_index;
5911 	aconnector->dc_link = link;
5912 	aconnector->base.interlace_allowed = false;
5913 	aconnector->base.doublescan_allowed = false;
5914 	aconnector->base.stereo_allowed = false;
5915 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5916 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5917 	aconnector->audio_inst = -1;
5918 	mutex_init(&aconnector->hpd_lock);
5919 
5920 	/*
5921 	 * configure support HPD hot plug connector_>polled default value is 0
5922 	 * which means HPD hot plug not supported
5923 	 */
5924 	switch (connector_type) {
5925 	case DRM_MODE_CONNECTOR_HDMIA:
5926 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5927 		aconnector->base.ycbcr_420_allowed =
5928 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5929 		break;
5930 	case DRM_MODE_CONNECTOR_DisplayPort:
5931 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5932 		aconnector->base.ycbcr_420_allowed =
5933 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5934 		break;
5935 	case DRM_MODE_CONNECTOR_DVID:
5936 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5937 		break;
5938 	default:
5939 		break;
5940 	}
5941 
5942 	drm_object_attach_property(&aconnector->base.base,
5943 				dm->ddev->mode_config.scaling_mode_property,
5944 				DRM_MODE_SCALE_NONE);
5945 
5946 	drm_object_attach_property(&aconnector->base.base,
5947 				adev->mode_info.underscan_property,
5948 				UNDERSCAN_OFF);
5949 	drm_object_attach_property(&aconnector->base.base,
5950 				adev->mode_info.underscan_hborder_property,
5951 				0);
5952 	drm_object_attach_property(&aconnector->base.base,
5953 				adev->mode_info.underscan_vborder_property,
5954 				0);
5955 
5956 	if (!aconnector->mst_port)
5957 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5958 
5959 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5960 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5961 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5962 
5963 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5964 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5965 		drm_object_attach_property(&aconnector->base.base,
5966 				adev->mode_info.abm_level_property, 0);
5967 	}
5968 
5969 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5970 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5971 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5972 		drm_object_attach_property(
5973 			&aconnector->base.base,
5974 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5975 
5976 		if (!aconnector->mst_port)
5977 			drm_connector_attach_vrr_capable_property(&aconnector->base);
5978 
5979 #ifdef CONFIG_DRM_AMD_DC_HDCP
5980 		if (adev->dm.hdcp_workqueue)
5981 			drm_connector_attach_content_protection_property(&aconnector->base, true);
5982 #endif
5983 	}
5984 }
5985 
5986 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5987 			      struct i2c_msg *msgs, int num)
5988 {
5989 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5990 	struct ddc_service *ddc_service = i2c->ddc_service;
5991 	struct i2c_command cmd;
5992 	int i;
5993 	int result = -EIO;
5994 
5995 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5996 
5997 	if (!cmd.payloads)
5998 		return result;
5999 
6000 	cmd.number_of_payloads = num;
6001 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6002 	cmd.speed = 100;
6003 
6004 	for (i = 0; i < num; i++) {
6005 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6006 		cmd.payloads[i].address = msgs[i].addr;
6007 		cmd.payloads[i].length = msgs[i].len;
6008 		cmd.payloads[i].data = msgs[i].buf;
6009 	}
6010 
6011 	if (dc_submit_i2c(
6012 			ddc_service->ctx->dc,
6013 			ddc_service->ddc_pin->hw_info.ddc_channel,
6014 			&cmd))
6015 		result = num;
6016 
6017 	kfree(cmd.payloads);
6018 	return result;
6019 }
6020 
6021 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6022 {
6023 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6024 }
6025 
6026 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6027 	.master_xfer = amdgpu_dm_i2c_xfer,
6028 	.functionality = amdgpu_dm_i2c_func,
6029 };
6030 
6031 static struct amdgpu_i2c_adapter *
6032 create_i2c(struct ddc_service *ddc_service,
6033 	   int link_index,
6034 	   int *res)
6035 {
6036 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6037 	struct amdgpu_i2c_adapter *i2c;
6038 
6039 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6040 	if (!i2c)
6041 		return NULL;
6042 	i2c->base.owner = THIS_MODULE;
6043 	i2c->base.class = I2C_CLASS_DDC;
6044 	i2c->base.dev.parent = &adev->pdev->dev;
6045 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6046 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6047 	i2c_set_adapdata(&i2c->base, i2c);
6048 	i2c->ddc_service = ddc_service;
6049 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6050 
6051 	return i2c;
6052 }
6053 
6054 
6055 /*
6056  * Note: this function assumes that dc_link_detect() was called for the
6057  * dc_link which will be represented by this aconnector.
6058  */
6059 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6060 				    struct amdgpu_dm_connector *aconnector,
6061 				    uint32_t link_index,
6062 				    struct amdgpu_encoder *aencoder)
6063 {
6064 	int res = 0;
6065 	int connector_type;
6066 	struct dc *dc = dm->dc;
6067 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6068 	struct amdgpu_i2c_adapter *i2c;
6069 
6070 	link->priv = aconnector;
6071 
6072 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6073 
6074 	i2c = create_i2c(link->ddc, link->link_index, &res);
6075 	if (!i2c) {
6076 		DRM_ERROR("Failed to create i2c adapter data\n");
6077 		return -ENOMEM;
6078 	}
6079 
6080 	aconnector->i2c = i2c;
6081 	res = i2c_add_adapter(&i2c->base);
6082 
6083 	if (res) {
6084 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6085 		goto out_free;
6086 	}
6087 
6088 	connector_type = to_drm_connector_type(link->connector_signal);
6089 
6090 	res = drm_connector_init_with_ddc(
6091 			dm->ddev,
6092 			&aconnector->base,
6093 			&amdgpu_dm_connector_funcs,
6094 			connector_type,
6095 			&i2c->base);
6096 
6097 	if (res) {
6098 		DRM_ERROR("connector_init failed\n");
6099 		aconnector->connector_id = -1;
6100 		goto out_free;
6101 	}
6102 
6103 	drm_connector_helper_add(
6104 			&aconnector->base,
6105 			&amdgpu_dm_connector_helper_funcs);
6106 
6107 	amdgpu_dm_connector_init_helper(
6108 		dm,
6109 		aconnector,
6110 		connector_type,
6111 		link,
6112 		link_index);
6113 
6114 	drm_connector_attach_encoder(
6115 		&aconnector->base, &aencoder->base);
6116 
6117 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6118 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6119 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6120 
6121 out_free:
6122 	if (res) {
6123 		kfree(i2c);
6124 		aconnector->i2c = NULL;
6125 	}
6126 	return res;
6127 }
6128 
6129 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6130 {
6131 	switch (adev->mode_info.num_crtc) {
6132 	case 1:
6133 		return 0x1;
6134 	case 2:
6135 		return 0x3;
6136 	case 3:
6137 		return 0x7;
6138 	case 4:
6139 		return 0xf;
6140 	case 5:
6141 		return 0x1f;
6142 	case 6:
6143 	default:
6144 		return 0x3f;
6145 	}
6146 }
6147 
6148 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6149 				  struct amdgpu_encoder *aencoder,
6150 				  uint32_t link_index)
6151 {
6152 	struct amdgpu_device *adev = dev->dev_private;
6153 
6154 	int res = drm_encoder_init(dev,
6155 				   &aencoder->base,
6156 				   &amdgpu_dm_encoder_funcs,
6157 				   DRM_MODE_ENCODER_TMDS,
6158 				   NULL);
6159 
6160 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6161 
6162 	if (!res)
6163 		aencoder->encoder_id = link_index;
6164 	else
6165 		aencoder->encoder_id = -1;
6166 
6167 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6168 
6169 	return res;
6170 }
6171 
6172 static void manage_dm_interrupts(struct amdgpu_device *adev,
6173 				 struct amdgpu_crtc *acrtc,
6174 				 bool enable)
6175 {
6176 	/*
6177 	 * this is not correct translation but will work as soon as VBLANK
6178 	 * constant is the same as PFLIP
6179 	 */
6180 	int irq_type =
6181 		amdgpu_display_crtc_idx_to_irq_type(
6182 			adev,
6183 			acrtc->crtc_id);
6184 
6185 	if (enable) {
6186 		drm_crtc_vblank_on(&acrtc->base);
6187 		amdgpu_irq_get(
6188 			adev,
6189 			&adev->pageflip_irq,
6190 			irq_type);
6191 	} else {
6192 
6193 		amdgpu_irq_put(
6194 			adev,
6195 			&adev->pageflip_irq,
6196 			irq_type);
6197 		drm_crtc_vblank_off(&acrtc->base);
6198 	}
6199 }
6200 
6201 static bool
6202 is_scaling_state_different(const struct dm_connector_state *dm_state,
6203 			   const struct dm_connector_state *old_dm_state)
6204 {
6205 	if (dm_state->scaling != old_dm_state->scaling)
6206 		return true;
6207 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6208 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6209 			return true;
6210 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6211 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6212 			return true;
6213 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6214 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6215 		return true;
6216 	return false;
6217 }
6218 
6219 #ifdef CONFIG_DRM_AMD_DC_HDCP
6220 static bool is_content_protection_different(struct drm_connector_state *state,
6221 					    const struct drm_connector_state *old_state,
6222 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6223 {
6224 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6225 
6226 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6227 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6228 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6229 		return true;
6230 	}
6231 
6232 	/* CP is being re enabled, ignore this */
6233 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6234 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6235 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6236 		return false;
6237 	}
6238 
6239 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6240 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6241 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6242 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6243 
6244 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6245 	 * hot-plug, headless s3, dpms
6246 	 */
6247 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6248 	    aconnector->dc_sink != NULL)
6249 		return true;
6250 
6251 	if (old_state->content_protection == state->content_protection)
6252 		return false;
6253 
6254 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6255 		return true;
6256 
6257 	return false;
6258 }
6259 
6260 #endif
6261 static void remove_stream(struct amdgpu_device *adev,
6262 			  struct amdgpu_crtc *acrtc,
6263 			  struct dc_stream_state *stream)
6264 {
6265 	/* this is the update mode case */
6266 
6267 	acrtc->otg_inst = -1;
6268 	acrtc->enabled = false;
6269 }
6270 
6271 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6272 			       struct dc_cursor_position *position)
6273 {
6274 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6275 	int x, y;
6276 	int xorigin = 0, yorigin = 0;
6277 
6278 	position->enable = false;
6279 	position->x = 0;
6280 	position->y = 0;
6281 
6282 	if (!crtc || !plane->state->fb)
6283 		return 0;
6284 
6285 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6286 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6287 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6288 			  __func__,
6289 			  plane->state->crtc_w,
6290 			  plane->state->crtc_h);
6291 		return -EINVAL;
6292 	}
6293 
6294 	x = plane->state->crtc_x;
6295 	y = plane->state->crtc_y;
6296 
6297 	if (x <= -amdgpu_crtc->max_cursor_width ||
6298 	    y <= -amdgpu_crtc->max_cursor_height)
6299 		return 0;
6300 
6301 	if (x < 0) {
6302 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6303 		x = 0;
6304 	}
6305 	if (y < 0) {
6306 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6307 		y = 0;
6308 	}
6309 	position->enable = true;
6310 	position->translate_by_source = true;
6311 	position->x = x;
6312 	position->y = y;
6313 	position->x_hotspot = xorigin;
6314 	position->y_hotspot = yorigin;
6315 
6316 	return 0;
6317 }
6318 
6319 static void handle_cursor_update(struct drm_plane *plane,
6320 				 struct drm_plane_state *old_plane_state)
6321 {
6322 	struct amdgpu_device *adev = plane->dev->dev_private;
6323 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6324 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6325 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6326 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6327 	uint64_t address = afb ? afb->address : 0;
6328 	struct dc_cursor_position position;
6329 	struct dc_cursor_attributes attributes;
6330 	int ret;
6331 
6332 	if (!plane->state->fb && !old_plane_state->fb)
6333 		return;
6334 
6335 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6336 			 __func__,
6337 			 amdgpu_crtc->crtc_id,
6338 			 plane->state->crtc_w,
6339 			 plane->state->crtc_h);
6340 
6341 	ret = get_cursor_position(plane, crtc, &position);
6342 	if (ret)
6343 		return;
6344 
6345 	if (!position.enable) {
6346 		/* turn off cursor */
6347 		if (crtc_state && crtc_state->stream) {
6348 			mutex_lock(&adev->dm.dc_lock);
6349 			dc_stream_set_cursor_position(crtc_state->stream,
6350 						      &position);
6351 			mutex_unlock(&adev->dm.dc_lock);
6352 		}
6353 		return;
6354 	}
6355 
6356 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6357 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6358 
6359 	memset(&attributes, 0, sizeof(attributes));
6360 	attributes.address.high_part = upper_32_bits(address);
6361 	attributes.address.low_part  = lower_32_bits(address);
6362 	attributes.width             = plane->state->crtc_w;
6363 	attributes.height            = plane->state->crtc_h;
6364 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6365 	attributes.rotation_angle    = 0;
6366 	attributes.attribute_flags.value = 0;
6367 
6368 	attributes.pitch = attributes.width;
6369 
6370 	if (crtc_state->stream) {
6371 		mutex_lock(&adev->dm.dc_lock);
6372 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6373 							 &attributes))
6374 			DRM_ERROR("DC failed to set cursor attributes\n");
6375 
6376 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6377 						   &position))
6378 			DRM_ERROR("DC failed to set cursor position\n");
6379 		mutex_unlock(&adev->dm.dc_lock);
6380 	}
6381 }
6382 
6383 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6384 {
6385 
6386 	assert_spin_locked(&acrtc->base.dev->event_lock);
6387 	WARN_ON(acrtc->event);
6388 
6389 	acrtc->event = acrtc->base.state->event;
6390 
6391 	/* Set the flip status */
6392 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6393 
6394 	/* Mark this event as consumed */
6395 	acrtc->base.state->event = NULL;
6396 
6397 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6398 						 acrtc->crtc_id);
6399 }
6400 
6401 static void update_freesync_state_on_stream(
6402 	struct amdgpu_display_manager *dm,
6403 	struct dm_crtc_state *new_crtc_state,
6404 	struct dc_stream_state *new_stream,
6405 	struct dc_plane_state *surface,
6406 	u32 flip_timestamp_in_us)
6407 {
6408 	struct mod_vrr_params vrr_params;
6409 	struct dc_info_packet vrr_infopacket = {0};
6410 	struct amdgpu_device *adev = dm->adev;
6411 	unsigned long flags;
6412 
6413 	if (!new_stream)
6414 		return;
6415 
6416 	/*
6417 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6418 	 * For now it's sufficient to just guard against these conditions.
6419 	 */
6420 
6421 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6422 		return;
6423 
6424 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6425 	vrr_params = new_crtc_state->vrr_params;
6426 
6427 	if (surface) {
6428 		mod_freesync_handle_preflip(
6429 			dm->freesync_module,
6430 			surface,
6431 			new_stream,
6432 			flip_timestamp_in_us,
6433 			&vrr_params);
6434 
6435 		if (adev->family < AMDGPU_FAMILY_AI &&
6436 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6437 			mod_freesync_handle_v_update(dm->freesync_module,
6438 						     new_stream, &vrr_params);
6439 
6440 			/* Need to call this before the frame ends. */
6441 			dc_stream_adjust_vmin_vmax(dm->dc,
6442 						   new_crtc_state->stream,
6443 						   &vrr_params.adjust);
6444 		}
6445 	}
6446 
6447 	mod_freesync_build_vrr_infopacket(
6448 		dm->freesync_module,
6449 		new_stream,
6450 		&vrr_params,
6451 		PACKET_TYPE_VRR,
6452 		TRANSFER_FUNC_UNKNOWN,
6453 		&vrr_infopacket);
6454 
6455 	new_crtc_state->freesync_timing_changed |=
6456 		(memcmp(&new_crtc_state->vrr_params.adjust,
6457 			&vrr_params.adjust,
6458 			sizeof(vrr_params.adjust)) != 0);
6459 
6460 	new_crtc_state->freesync_vrr_info_changed |=
6461 		(memcmp(&new_crtc_state->vrr_infopacket,
6462 			&vrr_infopacket,
6463 			sizeof(vrr_infopacket)) != 0);
6464 
6465 	new_crtc_state->vrr_params = vrr_params;
6466 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6467 
6468 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6469 	new_stream->vrr_infopacket = vrr_infopacket;
6470 
6471 	if (new_crtc_state->freesync_vrr_info_changed)
6472 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6473 			      new_crtc_state->base.crtc->base.id,
6474 			      (int)new_crtc_state->base.vrr_enabled,
6475 			      (int)vrr_params.state);
6476 
6477 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6478 }
6479 
6480 static void pre_update_freesync_state_on_stream(
6481 	struct amdgpu_display_manager *dm,
6482 	struct dm_crtc_state *new_crtc_state)
6483 {
6484 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6485 	struct mod_vrr_params vrr_params;
6486 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6487 	struct amdgpu_device *adev = dm->adev;
6488 	unsigned long flags;
6489 
6490 	if (!new_stream)
6491 		return;
6492 
6493 	/*
6494 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6495 	 * For now it's sufficient to just guard against these conditions.
6496 	 */
6497 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6498 		return;
6499 
6500 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6501 	vrr_params = new_crtc_state->vrr_params;
6502 
6503 	if (new_crtc_state->vrr_supported &&
6504 	    config.min_refresh_in_uhz &&
6505 	    config.max_refresh_in_uhz) {
6506 		config.state = new_crtc_state->base.vrr_enabled ?
6507 			VRR_STATE_ACTIVE_VARIABLE :
6508 			VRR_STATE_INACTIVE;
6509 	} else {
6510 		config.state = VRR_STATE_UNSUPPORTED;
6511 	}
6512 
6513 	mod_freesync_build_vrr_params(dm->freesync_module,
6514 				      new_stream,
6515 				      &config, &vrr_params);
6516 
6517 	new_crtc_state->freesync_timing_changed |=
6518 		(memcmp(&new_crtc_state->vrr_params.adjust,
6519 			&vrr_params.adjust,
6520 			sizeof(vrr_params.adjust)) != 0);
6521 
6522 	new_crtc_state->vrr_params = vrr_params;
6523 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6524 }
6525 
6526 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6527 					    struct dm_crtc_state *new_state)
6528 {
6529 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6530 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6531 
6532 	if (!old_vrr_active && new_vrr_active) {
6533 		/* Transition VRR inactive -> active:
6534 		 * While VRR is active, we must not disable vblank irq, as a
6535 		 * reenable after disable would compute bogus vblank/pflip
6536 		 * timestamps if it likely happened inside display front-porch.
6537 		 *
6538 		 * We also need vupdate irq for the actual core vblank handling
6539 		 * at end of vblank.
6540 		 */
6541 		dm_set_vupdate_irq(new_state->base.crtc, true);
6542 		drm_crtc_vblank_get(new_state->base.crtc);
6543 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6544 				 __func__, new_state->base.crtc->base.id);
6545 	} else if (old_vrr_active && !new_vrr_active) {
6546 		/* Transition VRR active -> inactive:
6547 		 * Allow vblank irq disable again for fixed refresh rate.
6548 		 */
6549 		dm_set_vupdate_irq(new_state->base.crtc, false);
6550 		drm_crtc_vblank_put(new_state->base.crtc);
6551 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6552 				 __func__, new_state->base.crtc->base.id);
6553 	}
6554 }
6555 
6556 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6557 {
6558 	struct drm_plane *plane;
6559 	struct drm_plane_state *old_plane_state, *new_plane_state;
6560 	int i;
6561 
6562 	/*
6563 	 * TODO: Make this per-stream so we don't issue redundant updates for
6564 	 * commits with multiple streams.
6565 	 */
6566 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6567 				       new_plane_state, i)
6568 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6569 			handle_cursor_update(plane, old_plane_state);
6570 }
6571 
6572 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6573 				    struct dc_state *dc_state,
6574 				    struct drm_device *dev,
6575 				    struct amdgpu_display_manager *dm,
6576 				    struct drm_crtc *pcrtc,
6577 				    bool wait_for_vblank)
6578 {
6579 	uint32_t i;
6580 	uint64_t timestamp_ns;
6581 	struct drm_plane *plane;
6582 	struct drm_plane_state *old_plane_state, *new_plane_state;
6583 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6584 	struct drm_crtc_state *new_pcrtc_state =
6585 			drm_atomic_get_new_crtc_state(state, pcrtc);
6586 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6587 	struct dm_crtc_state *dm_old_crtc_state =
6588 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6589 	int planes_count = 0, vpos, hpos;
6590 	long r;
6591 	unsigned long flags;
6592 	struct amdgpu_bo *abo;
6593 	uint64_t tiling_flags;
6594 	uint32_t target_vblank, last_flip_vblank;
6595 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6596 	bool pflip_present = false;
6597 	struct {
6598 		struct dc_surface_update surface_updates[MAX_SURFACES];
6599 		struct dc_plane_info plane_infos[MAX_SURFACES];
6600 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6601 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6602 		struct dc_stream_update stream_update;
6603 	} *bundle;
6604 
6605 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6606 
6607 	if (!bundle) {
6608 		dm_error("Failed to allocate update bundle\n");
6609 		goto cleanup;
6610 	}
6611 
6612 	/*
6613 	 * Disable the cursor first if we're disabling all the planes.
6614 	 * It'll remain on the screen after the planes are re-enabled
6615 	 * if we don't.
6616 	 */
6617 	if (acrtc_state->active_planes == 0)
6618 		amdgpu_dm_commit_cursors(state);
6619 
6620 	/* update planes when needed */
6621 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6622 		struct drm_crtc *crtc = new_plane_state->crtc;
6623 		struct drm_crtc_state *new_crtc_state;
6624 		struct drm_framebuffer *fb = new_plane_state->fb;
6625 		bool plane_needs_flip;
6626 		struct dc_plane_state *dc_plane;
6627 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6628 
6629 		/* Cursor plane is handled after stream updates */
6630 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6631 			continue;
6632 
6633 		if (!fb || !crtc || pcrtc != crtc)
6634 			continue;
6635 
6636 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6637 		if (!new_crtc_state->active)
6638 			continue;
6639 
6640 		dc_plane = dm_new_plane_state->dc_state;
6641 
6642 		bundle->surface_updates[planes_count].surface = dc_plane;
6643 		if (new_pcrtc_state->color_mgmt_changed) {
6644 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6645 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6646 		}
6647 
6648 		fill_dc_scaling_info(new_plane_state,
6649 				     &bundle->scaling_infos[planes_count]);
6650 
6651 		bundle->surface_updates[planes_count].scaling_info =
6652 			&bundle->scaling_infos[planes_count];
6653 
6654 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6655 
6656 		pflip_present = pflip_present || plane_needs_flip;
6657 
6658 		if (!plane_needs_flip) {
6659 			planes_count += 1;
6660 			continue;
6661 		}
6662 
6663 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6664 
6665 		/*
6666 		 * Wait for all fences on this FB. Do limited wait to avoid
6667 		 * deadlock during GPU reset when this fence will not signal
6668 		 * but we hold reservation lock for the BO.
6669 		 */
6670 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6671 							false,
6672 							msecs_to_jiffies(5000));
6673 		if (unlikely(r <= 0))
6674 			DRM_ERROR("Waiting for fences timed out!");
6675 
6676 		/*
6677 		 * TODO This might fail and hence better not used, wait
6678 		 * explicitly on fences instead
6679 		 * and in general should be called for
6680 		 * blocking commit to as per framework helpers
6681 		 */
6682 		r = amdgpu_bo_reserve(abo, true);
6683 		if (unlikely(r != 0))
6684 			DRM_ERROR("failed to reserve buffer before flip\n");
6685 
6686 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6687 
6688 		amdgpu_bo_unreserve(abo);
6689 
6690 		fill_dc_plane_info_and_addr(
6691 			dm->adev, new_plane_state, tiling_flags,
6692 			&bundle->plane_infos[planes_count],
6693 			&bundle->flip_addrs[planes_count].address,
6694 			false);
6695 
6696 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6697 				 new_plane_state->plane->index,
6698 				 bundle->plane_infos[planes_count].dcc.enable);
6699 
6700 		bundle->surface_updates[planes_count].plane_info =
6701 			&bundle->plane_infos[planes_count];
6702 
6703 		/*
6704 		 * Only allow immediate flips for fast updates that don't
6705 		 * change FB pitch, DCC state, rotation or mirroing.
6706 		 */
6707 		bundle->flip_addrs[planes_count].flip_immediate =
6708 			crtc->state->async_flip &&
6709 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6710 
6711 		timestamp_ns = ktime_get_ns();
6712 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6713 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6714 		bundle->surface_updates[planes_count].surface = dc_plane;
6715 
6716 		if (!bundle->surface_updates[planes_count].surface) {
6717 			DRM_ERROR("No surface for CRTC: id=%d\n",
6718 					acrtc_attach->crtc_id);
6719 			continue;
6720 		}
6721 
6722 		if (plane == pcrtc->primary)
6723 			update_freesync_state_on_stream(
6724 				dm,
6725 				acrtc_state,
6726 				acrtc_state->stream,
6727 				dc_plane,
6728 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6729 
6730 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6731 				 __func__,
6732 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6733 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6734 
6735 		planes_count += 1;
6736 
6737 	}
6738 
6739 	if (pflip_present) {
6740 		if (!vrr_active) {
6741 			/* Use old throttling in non-vrr fixed refresh rate mode
6742 			 * to keep flip scheduling based on target vblank counts
6743 			 * working in a backwards compatible way, e.g., for
6744 			 * clients using the GLX_OML_sync_control extension or
6745 			 * DRI3/Present extension with defined target_msc.
6746 			 */
6747 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6748 		}
6749 		else {
6750 			/* For variable refresh rate mode only:
6751 			 * Get vblank of last completed flip to avoid > 1 vrr
6752 			 * flips per video frame by use of throttling, but allow
6753 			 * flip programming anywhere in the possibly large
6754 			 * variable vrr vblank interval for fine-grained flip
6755 			 * timing control and more opportunity to avoid stutter
6756 			 * on late submission of flips.
6757 			 */
6758 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6759 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6760 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6761 		}
6762 
6763 		target_vblank = last_flip_vblank + wait_for_vblank;
6764 
6765 		/*
6766 		 * Wait until we're out of the vertical blank period before the one
6767 		 * targeted by the flip
6768 		 */
6769 		while ((acrtc_attach->enabled &&
6770 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6771 							    0, &vpos, &hpos, NULL,
6772 							    NULL, &pcrtc->hwmode)
6773 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6774 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6775 			(int)(target_vblank -
6776 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6777 			usleep_range(1000, 1100);
6778 		}
6779 
6780 		if (acrtc_attach->base.state->event) {
6781 			drm_crtc_vblank_get(pcrtc);
6782 
6783 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6784 
6785 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6786 			prepare_flip_isr(acrtc_attach);
6787 
6788 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6789 		}
6790 
6791 		if (acrtc_state->stream) {
6792 			if (acrtc_state->freesync_vrr_info_changed)
6793 				bundle->stream_update.vrr_infopacket =
6794 					&acrtc_state->stream->vrr_infopacket;
6795 		}
6796 	}
6797 
6798 	/* Update the planes if changed or disable if we don't have any. */
6799 	if ((planes_count || acrtc_state->active_planes == 0) &&
6800 		acrtc_state->stream) {
6801 		bundle->stream_update.stream = acrtc_state->stream;
6802 		if (new_pcrtc_state->mode_changed) {
6803 			bundle->stream_update.src = acrtc_state->stream->src;
6804 			bundle->stream_update.dst = acrtc_state->stream->dst;
6805 		}
6806 
6807 		if (new_pcrtc_state->color_mgmt_changed) {
6808 			/*
6809 			 * TODO: This isn't fully correct since we've actually
6810 			 * already modified the stream in place.
6811 			 */
6812 			bundle->stream_update.gamut_remap =
6813 				&acrtc_state->stream->gamut_remap_matrix;
6814 			bundle->stream_update.output_csc_transform =
6815 				&acrtc_state->stream->csc_color_matrix;
6816 			bundle->stream_update.out_transfer_func =
6817 				acrtc_state->stream->out_transfer_func;
6818 		}
6819 
6820 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6821 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6822 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6823 
6824 		/*
6825 		 * If FreeSync state on the stream has changed then we need to
6826 		 * re-adjust the min/max bounds now that DC doesn't handle this
6827 		 * as part of commit.
6828 		 */
6829 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6830 		    amdgpu_dm_vrr_active(acrtc_state)) {
6831 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6832 			dc_stream_adjust_vmin_vmax(
6833 				dm->dc, acrtc_state->stream,
6834 				&acrtc_state->vrr_params.adjust);
6835 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6836 		}
6837 		mutex_lock(&dm->dc_lock);
6838 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6839 				acrtc_state->stream->link->psr_allow_active)
6840 			amdgpu_dm_psr_disable(acrtc_state->stream);
6841 
6842 		dc_commit_updates_for_stream(dm->dc,
6843 						     bundle->surface_updates,
6844 						     planes_count,
6845 						     acrtc_state->stream,
6846 						     &bundle->stream_update,
6847 						     dc_state);
6848 
6849 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6850 						acrtc_state->stream->psr_version &&
6851 						!acrtc_state->stream->link->psr_feature_enabled)
6852 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6853 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6854 						acrtc_state->stream->link->psr_feature_enabled &&
6855 						!acrtc_state->stream->link->psr_allow_active) {
6856 			amdgpu_dm_psr_enable(acrtc_state->stream);
6857 		}
6858 
6859 		mutex_unlock(&dm->dc_lock);
6860 	}
6861 
6862 	/*
6863 	 * Update cursor state *after* programming all the planes.
6864 	 * This avoids redundant programming in the case where we're going
6865 	 * to be disabling a single plane - those pipes are being disabled.
6866 	 */
6867 	if (acrtc_state->active_planes)
6868 		amdgpu_dm_commit_cursors(state);
6869 
6870 cleanup:
6871 	kfree(bundle);
6872 }
6873 
6874 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6875 				   struct drm_atomic_state *state)
6876 {
6877 	struct amdgpu_device *adev = dev->dev_private;
6878 	struct amdgpu_dm_connector *aconnector;
6879 	struct drm_connector *connector;
6880 	struct drm_connector_state *old_con_state, *new_con_state;
6881 	struct drm_crtc_state *new_crtc_state;
6882 	struct dm_crtc_state *new_dm_crtc_state;
6883 	const struct dc_stream_status *status;
6884 	int i, inst;
6885 
6886 	/* Notify device removals. */
6887 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6888 		if (old_con_state->crtc != new_con_state->crtc) {
6889 			/* CRTC changes require notification. */
6890 			goto notify;
6891 		}
6892 
6893 		if (!new_con_state->crtc)
6894 			continue;
6895 
6896 		new_crtc_state = drm_atomic_get_new_crtc_state(
6897 			state, new_con_state->crtc);
6898 
6899 		if (!new_crtc_state)
6900 			continue;
6901 
6902 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6903 			continue;
6904 
6905 	notify:
6906 		aconnector = to_amdgpu_dm_connector(connector);
6907 
6908 		mutex_lock(&adev->dm.audio_lock);
6909 		inst = aconnector->audio_inst;
6910 		aconnector->audio_inst = -1;
6911 		mutex_unlock(&adev->dm.audio_lock);
6912 
6913 		amdgpu_dm_audio_eld_notify(adev, inst);
6914 	}
6915 
6916 	/* Notify audio device additions. */
6917 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6918 		if (!new_con_state->crtc)
6919 			continue;
6920 
6921 		new_crtc_state = drm_atomic_get_new_crtc_state(
6922 			state, new_con_state->crtc);
6923 
6924 		if (!new_crtc_state)
6925 			continue;
6926 
6927 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6928 			continue;
6929 
6930 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6931 		if (!new_dm_crtc_state->stream)
6932 			continue;
6933 
6934 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6935 		if (!status)
6936 			continue;
6937 
6938 		aconnector = to_amdgpu_dm_connector(connector);
6939 
6940 		mutex_lock(&adev->dm.audio_lock);
6941 		inst = status->audio_inst;
6942 		aconnector->audio_inst = inst;
6943 		mutex_unlock(&adev->dm.audio_lock);
6944 
6945 		amdgpu_dm_audio_eld_notify(adev, inst);
6946 	}
6947 }
6948 
6949 /*
6950  * Enable interrupts on CRTCs that are newly active, undergone
6951  * a modeset, or have active planes again.
6952  *
6953  * Done in two passes, based on the for_modeset flag:
6954  * Pass 1: For CRTCs going through modeset
6955  * Pass 2: For CRTCs going from 0 to n active planes
6956  *
6957  * Interrupts can only be enabled after the planes are programmed,
6958  * so this requires a two-pass approach since we don't want to
6959  * just defer the interrupts until after commit planes every time.
6960  */
6961 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6962 					     struct drm_atomic_state *state,
6963 					     bool for_modeset)
6964 {
6965 	struct amdgpu_device *adev = dev->dev_private;
6966 	struct drm_crtc *crtc;
6967 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6968 	int i;
6969 #ifdef CONFIG_DEBUG_FS
6970 	enum amdgpu_dm_pipe_crc_source source;
6971 #endif
6972 
6973 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6974 				      new_crtc_state, i) {
6975 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6976 		struct dm_crtc_state *dm_new_crtc_state =
6977 			to_dm_crtc_state(new_crtc_state);
6978 		struct dm_crtc_state *dm_old_crtc_state =
6979 			to_dm_crtc_state(old_crtc_state);
6980 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6981 		bool run_pass;
6982 
6983 		run_pass = (for_modeset && modeset) ||
6984 			   (!for_modeset && !modeset &&
6985 			    !dm_old_crtc_state->interrupts_enabled);
6986 
6987 		if (!run_pass)
6988 			continue;
6989 
6990 		if (!dm_new_crtc_state->interrupts_enabled)
6991 			continue;
6992 
6993 		manage_dm_interrupts(adev, acrtc, true);
6994 
6995 #ifdef CONFIG_DEBUG_FS
6996 		/* The stream has changed so CRC capture needs to re-enabled. */
6997 		source = dm_new_crtc_state->crc_src;
6998 		if (amdgpu_dm_is_valid_crc_source(source)) {
6999 			amdgpu_dm_crtc_configure_crc_source(
7000 				crtc, dm_new_crtc_state,
7001 				dm_new_crtc_state->crc_src);
7002 		}
7003 #endif
7004 	}
7005 }
7006 
7007 /*
7008  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7009  * @crtc_state: the DRM CRTC state
7010  * @stream_state: the DC stream state.
7011  *
7012  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7013  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7014  */
7015 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7016 						struct dc_stream_state *stream_state)
7017 {
7018 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7019 }
7020 
7021 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7022 				   struct drm_atomic_state *state,
7023 				   bool nonblock)
7024 {
7025 	struct drm_crtc *crtc;
7026 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7027 	struct amdgpu_device *adev = dev->dev_private;
7028 	int i;
7029 
7030 	/*
7031 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7032 	 * a modeset, being disabled, or have no active planes.
7033 	 *
7034 	 * It's done in atomic commit rather than commit tail for now since
7035 	 * some of these interrupt handlers access the current CRTC state and
7036 	 * potentially the stream pointer itself.
7037 	 *
7038 	 * Since the atomic state is swapped within atomic commit and not within
7039 	 * commit tail this would leave to new state (that hasn't been committed yet)
7040 	 * being accesssed from within the handlers.
7041 	 *
7042 	 * TODO: Fix this so we can do this in commit tail and not have to block
7043 	 * in atomic check.
7044 	 */
7045 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7046 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7047 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7048 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7049 
7050 		if (dm_old_crtc_state->interrupts_enabled &&
7051 		    (!dm_new_crtc_state->interrupts_enabled ||
7052 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7053 			manage_dm_interrupts(adev, acrtc, false);
7054 	}
7055 	/*
7056 	 * Add check here for SoC's that support hardware cursor plane, to
7057 	 * unset legacy_cursor_update
7058 	 */
7059 
7060 	return drm_atomic_helper_commit(dev, state, nonblock);
7061 
7062 	/*TODO Handle EINTR, reenable IRQ*/
7063 }
7064 
7065 /**
7066  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7067  * @state: The atomic state to commit
7068  *
7069  * This will tell DC to commit the constructed DC state from atomic_check,
7070  * programming the hardware. Any failures here implies a hardware failure, since
7071  * atomic check should have filtered anything non-kosher.
7072  */
7073 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7074 {
7075 	struct drm_device *dev = state->dev;
7076 	struct amdgpu_device *adev = dev->dev_private;
7077 	struct amdgpu_display_manager *dm = &adev->dm;
7078 	struct dm_atomic_state *dm_state;
7079 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7080 	uint32_t i, j;
7081 	struct drm_crtc *crtc;
7082 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7083 	unsigned long flags;
7084 	bool wait_for_vblank = true;
7085 	struct drm_connector *connector;
7086 	struct drm_connector_state *old_con_state, *new_con_state;
7087 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7088 	int crtc_disable_count = 0;
7089 
7090 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7091 
7092 	dm_state = dm_atomic_get_new_state(state);
7093 	if (dm_state && dm_state->context) {
7094 		dc_state = dm_state->context;
7095 	} else {
7096 		/* No state changes, retain current state. */
7097 		dc_state_temp = dc_create_state(dm->dc);
7098 		ASSERT(dc_state_temp);
7099 		dc_state = dc_state_temp;
7100 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7101 	}
7102 
7103 	/* update changed items */
7104 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7105 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7106 
7107 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7108 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7109 
7110 		DRM_DEBUG_DRIVER(
7111 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7112 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7113 			"connectors_changed:%d\n",
7114 			acrtc->crtc_id,
7115 			new_crtc_state->enable,
7116 			new_crtc_state->active,
7117 			new_crtc_state->planes_changed,
7118 			new_crtc_state->mode_changed,
7119 			new_crtc_state->active_changed,
7120 			new_crtc_state->connectors_changed);
7121 
7122 		/* Copy all transient state flags into dc state */
7123 		if (dm_new_crtc_state->stream) {
7124 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7125 							    dm_new_crtc_state->stream);
7126 		}
7127 
7128 		/* handles headless hotplug case, updating new_state and
7129 		 * aconnector as needed
7130 		 */
7131 
7132 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7133 
7134 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7135 
7136 			if (!dm_new_crtc_state->stream) {
7137 				/*
7138 				 * this could happen because of issues with
7139 				 * userspace notifications delivery.
7140 				 * In this case userspace tries to set mode on
7141 				 * display which is disconnected in fact.
7142 				 * dc_sink is NULL in this case on aconnector.
7143 				 * We expect reset mode will come soon.
7144 				 *
7145 				 * This can also happen when unplug is done
7146 				 * during resume sequence ended
7147 				 *
7148 				 * In this case, we want to pretend we still
7149 				 * have a sink to keep the pipe running so that
7150 				 * hw state is consistent with the sw state
7151 				 */
7152 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7153 						__func__, acrtc->base.base.id);
7154 				continue;
7155 			}
7156 
7157 			if (dm_old_crtc_state->stream)
7158 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7159 
7160 			pm_runtime_get_noresume(dev->dev);
7161 
7162 			acrtc->enabled = true;
7163 			acrtc->hw_mode = new_crtc_state->mode;
7164 			crtc->hwmode = new_crtc_state->mode;
7165 		} else if (modereset_required(new_crtc_state)) {
7166 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7167 			/* i.e. reset mode */
7168 			if (dm_old_crtc_state->stream) {
7169 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7170 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7171 
7172 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7173 			}
7174 		}
7175 	} /* for_each_crtc_in_state() */
7176 
7177 	if (dc_state) {
7178 		dm_enable_per_frame_crtc_master_sync(dc_state);
7179 		mutex_lock(&dm->dc_lock);
7180 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7181 		mutex_unlock(&dm->dc_lock);
7182 	}
7183 
7184 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7185 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7186 
7187 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7188 
7189 		if (dm_new_crtc_state->stream != NULL) {
7190 			const struct dc_stream_status *status =
7191 					dc_stream_get_status(dm_new_crtc_state->stream);
7192 
7193 			if (!status)
7194 				status = dc_stream_get_status_from_state(dc_state,
7195 									 dm_new_crtc_state->stream);
7196 
7197 			if (!status)
7198 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7199 			else
7200 				acrtc->otg_inst = status->primary_otg_inst;
7201 		}
7202 	}
7203 #ifdef CONFIG_DRM_AMD_DC_HDCP
7204 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7205 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7206 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7207 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7208 
7209 		new_crtc_state = NULL;
7210 
7211 		if (acrtc)
7212 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7213 
7214 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7215 
7216 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7217 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7218 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7219 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7220 			continue;
7221 		}
7222 
7223 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7224 			hdcp_update_display(
7225 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7226 				new_con_state->hdcp_content_type,
7227 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7228 													 : false);
7229 	}
7230 #endif
7231 
7232 	/* Handle connector state changes */
7233 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7234 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7235 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7236 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7237 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7238 		struct dc_stream_update stream_update;
7239 		struct dc_info_packet hdr_packet;
7240 		struct dc_stream_status *status = NULL;
7241 		bool abm_changed, hdr_changed, scaling_changed;
7242 
7243 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7244 		memset(&stream_update, 0, sizeof(stream_update));
7245 
7246 		if (acrtc) {
7247 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7248 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7249 		}
7250 
7251 		/* Skip any modesets/resets */
7252 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7253 			continue;
7254 
7255 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7256 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7257 
7258 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7259 							     dm_old_con_state);
7260 
7261 		abm_changed = dm_new_crtc_state->abm_level !=
7262 			      dm_old_crtc_state->abm_level;
7263 
7264 		hdr_changed =
7265 			is_hdr_metadata_different(old_con_state, new_con_state);
7266 
7267 		if (!scaling_changed && !abm_changed && !hdr_changed)
7268 			continue;
7269 
7270 		stream_update.stream = dm_new_crtc_state->stream;
7271 		if (scaling_changed) {
7272 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7273 					dm_new_con_state, dm_new_crtc_state->stream);
7274 
7275 			stream_update.src = dm_new_crtc_state->stream->src;
7276 			stream_update.dst = dm_new_crtc_state->stream->dst;
7277 		}
7278 
7279 		if (abm_changed) {
7280 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7281 
7282 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7283 		}
7284 
7285 		if (hdr_changed) {
7286 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7287 			stream_update.hdr_static_metadata = &hdr_packet;
7288 		}
7289 
7290 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7291 		WARN_ON(!status);
7292 		WARN_ON(!status->plane_count);
7293 
7294 		/*
7295 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7296 		 * Here we create an empty update on each plane.
7297 		 * To fix this, DC should permit updating only stream properties.
7298 		 */
7299 		for (j = 0; j < status->plane_count; j++)
7300 			dummy_updates[j].surface = status->plane_states[0];
7301 
7302 
7303 		mutex_lock(&dm->dc_lock);
7304 		dc_commit_updates_for_stream(dm->dc,
7305 						     dummy_updates,
7306 						     status->plane_count,
7307 						     dm_new_crtc_state->stream,
7308 						     &stream_update,
7309 						     dc_state);
7310 		mutex_unlock(&dm->dc_lock);
7311 	}
7312 
7313 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7314 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7315 				      new_crtc_state, i) {
7316 		if (old_crtc_state->active && !new_crtc_state->active)
7317 			crtc_disable_count++;
7318 
7319 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7320 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7321 
7322 		/* Update freesync active state. */
7323 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7324 
7325 		/* Handle vrr on->off / off->on transitions */
7326 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7327 						dm_new_crtc_state);
7328 	}
7329 
7330 	/* Enable interrupts for CRTCs going through a modeset. */
7331 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7332 
7333 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7334 		if (new_crtc_state->async_flip)
7335 			wait_for_vblank = false;
7336 
7337 	/* update planes when needed per crtc*/
7338 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7339 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7340 
7341 		if (dm_new_crtc_state->stream)
7342 			amdgpu_dm_commit_planes(state, dc_state, dev,
7343 						dm, crtc, wait_for_vblank);
7344 	}
7345 
7346 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7347 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7348 
7349 	/* Update audio instances for each connector. */
7350 	amdgpu_dm_commit_audio(dev, state);
7351 
7352 	/*
7353 	 * send vblank event on all events not handled in flip and
7354 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7355 	 */
7356 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7357 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7358 
7359 		if (new_crtc_state->event)
7360 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7361 
7362 		new_crtc_state->event = NULL;
7363 	}
7364 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7365 
7366 	/* Signal HW programming completion */
7367 	drm_atomic_helper_commit_hw_done(state);
7368 
7369 	if (wait_for_vblank)
7370 		drm_atomic_helper_wait_for_flip_done(dev, state);
7371 
7372 	drm_atomic_helper_cleanup_planes(dev, state);
7373 
7374 	/*
7375 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7376 	 * so we can put the GPU into runtime suspend if we're not driving any
7377 	 * displays anymore
7378 	 */
7379 	for (i = 0; i < crtc_disable_count; i++)
7380 		pm_runtime_put_autosuspend(dev->dev);
7381 	pm_runtime_mark_last_busy(dev->dev);
7382 
7383 	if (dc_state_temp)
7384 		dc_release_state(dc_state_temp);
7385 }
7386 
7387 
7388 static int dm_force_atomic_commit(struct drm_connector *connector)
7389 {
7390 	int ret = 0;
7391 	struct drm_device *ddev = connector->dev;
7392 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7393 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7394 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7395 	struct drm_connector_state *conn_state;
7396 	struct drm_crtc_state *crtc_state;
7397 	struct drm_plane_state *plane_state;
7398 
7399 	if (!state)
7400 		return -ENOMEM;
7401 
7402 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7403 
7404 	/* Construct an atomic state to restore previous display setting */
7405 
7406 	/*
7407 	 * Attach connectors to drm_atomic_state
7408 	 */
7409 	conn_state = drm_atomic_get_connector_state(state, connector);
7410 
7411 	ret = PTR_ERR_OR_ZERO(conn_state);
7412 	if (ret)
7413 		goto err;
7414 
7415 	/* Attach crtc to drm_atomic_state*/
7416 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7417 
7418 	ret = PTR_ERR_OR_ZERO(crtc_state);
7419 	if (ret)
7420 		goto err;
7421 
7422 	/* force a restore */
7423 	crtc_state->mode_changed = true;
7424 
7425 	/* Attach plane to drm_atomic_state */
7426 	plane_state = drm_atomic_get_plane_state(state, plane);
7427 
7428 	ret = PTR_ERR_OR_ZERO(plane_state);
7429 	if (ret)
7430 		goto err;
7431 
7432 
7433 	/* Call commit internally with the state we just constructed */
7434 	ret = drm_atomic_commit(state);
7435 	if (!ret)
7436 		return 0;
7437 
7438 err:
7439 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7440 	drm_atomic_state_put(state);
7441 
7442 	return ret;
7443 }
7444 
7445 /*
7446  * This function handles all cases when set mode does not come upon hotplug.
7447  * This includes when a display is unplugged then plugged back into the
7448  * same port and when running without usermode desktop manager supprot
7449  */
7450 void dm_restore_drm_connector_state(struct drm_device *dev,
7451 				    struct drm_connector *connector)
7452 {
7453 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7454 	struct amdgpu_crtc *disconnected_acrtc;
7455 	struct dm_crtc_state *acrtc_state;
7456 
7457 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7458 		return;
7459 
7460 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7461 	if (!disconnected_acrtc)
7462 		return;
7463 
7464 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7465 	if (!acrtc_state->stream)
7466 		return;
7467 
7468 	/*
7469 	 * If the previous sink is not released and different from the current,
7470 	 * we deduce we are in a state where we can not rely on usermode call
7471 	 * to turn on the display, so we do it here
7472 	 */
7473 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7474 		dm_force_atomic_commit(&aconnector->base);
7475 }
7476 
7477 /*
7478  * Grabs all modesetting locks to serialize against any blocking commits,
7479  * Waits for completion of all non blocking commits.
7480  */
7481 static int do_aquire_global_lock(struct drm_device *dev,
7482 				 struct drm_atomic_state *state)
7483 {
7484 	struct drm_crtc *crtc;
7485 	struct drm_crtc_commit *commit;
7486 	long ret;
7487 
7488 	/*
7489 	 * Adding all modeset locks to aquire_ctx will
7490 	 * ensure that when the framework release it the
7491 	 * extra locks we are locking here will get released to
7492 	 */
7493 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7494 	if (ret)
7495 		return ret;
7496 
7497 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7498 		spin_lock(&crtc->commit_lock);
7499 		commit = list_first_entry_or_null(&crtc->commit_list,
7500 				struct drm_crtc_commit, commit_entry);
7501 		if (commit)
7502 			drm_crtc_commit_get(commit);
7503 		spin_unlock(&crtc->commit_lock);
7504 
7505 		if (!commit)
7506 			continue;
7507 
7508 		/*
7509 		 * Make sure all pending HW programming completed and
7510 		 * page flips done
7511 		 */
7512 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7513 
7514 		if (ret > 0)
7515 			ret = wait_for_completion_interruptible_timeout(
7516 					&commit->flip_done, 10*HZ);
7517 
7518 		if (ret == 0)
7519 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7520 				  "timed out\n", crtc->base.id, crtc->name);
7521 
7522 		drm_crtc_commit_put(commit);
7523 	}
7524 
7525 	return ret < 0 ? ret : 0;
7526 }
7527 
7528 static void get_freesync_config_for_crtc(
7529 	struct dm_crtc_state *new_crtc_state,
7530 	struct dm_connector_state *new_con_state)
7531 {
7532 	struct mod_freesync_config config = {0};
7533 	struct amdgpu_dm_connector *aconnector =
7534 			to_amdgpu_dm_connector(new_con_state->base.connector);
7535 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7536 	int vrefresh = drm_mode_vrefresh(mode);
7537 
7538 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7539 					vrefresh >= aconnector->min_vfreq &&
7540 					vrefresh <= aconnector->max_vfreq;
7541 
7542 	if (new_crtc_state->vrr_supported) {
7543 		new_crtc_state->stream->ignore_msa_timing_param = true;
7544 		config.state = new_crtc_state->base.vrr_enabled ?
7545 				VRR_STATE_ACTIVE_VARIABLE :
7546 				VRR_STATE_INACTIVE;
7547 		config.min_refresh_in_uhz =
7548 				aconnector->min_vfreq * 1000000;
7549 		config.max_refresh_in_uhz =
7550 				aconnector->max_vfreq * 1000000;
7551 		config.vsif_supported = true;
7552 		config.btr = true;
7553 	}
7554 
7555 	new_crtc_state->freesync_config = config;
7556 }
7557 
7558 static void reset_freesync_config_for_crtc(
7559 	struct dm_crtc_state *new_crtc_state)
7560 {
7561 	new_crtc_state->vrr_supported = false;
7562 
7563 	memset(&new_crtc_state->vrr_params, 0,
7564 	       sizeof(new_crtc_state->vrr_params));
7565 	memset(&new_crtc_state->vrr_infopacket, 0,
7566 	       sizeof(new_crtc_state->vrr_infopacket));
7567 }
7568 
7569 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7570 				struct drm_atomic_state *state,
7571 				struct drm_crtc *crtc,
7572 				struct drm_crtc_state *old_crtc_state,
7573 				struct drm_crtc_state *new_crtc_state,
7574 				bool enable,
7575 				bool *lock_and_validation_needed)
7576 {
7577 	struct dm_atomic_state *dm_state = NULL;
7578 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7579 	struct dc_stream_state *new_stream;
7580 	int ret = 0;
7581 
7582 	/*
7583 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7584 	 * update changed items
7585 	 */
7586 	struct amdgpu_crtc *acrtc = NULL;
7587 	struct amdgpu_dm_connector *aconnector = NULL;
7588 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7589 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7590 
7591 	new_stream = NULL;
7592 
7593 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7594 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7595 	acrtc = to_amdgpu_crtc(crtc);
7596 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7597 
7598 	/* TODO This hack should go away */
7599 	if (aconnector && enable) {
7600 		/* Make sure fake sink is created in plug-in scenario */
7601 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7602 							    &aconnector->base);
7603 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7604 							    &aconnector->base);
7605 
7606 		if (IS_ERR(drm_new_conn_state)) {
7607 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7608 			goto fail;
7609 		}
7610 
7611 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7612 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7613 
7614 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7615 			goto skip_modeset;
7616 
7617 		new_stream = create_stream_for_sink(aconnector,
7618 						     &new_crtc_state->mode,
7619 						    dm_new_conn_state,
7620 						    dm_old_crtc_state->stream);
7621 
7622 		/*
7623 		 * we can have no stream on ACTION_SET if a display
7624 		 * was disconnected during S3, in this case it is not an
7625 		 * error, the OS will be updated after detection, and
7626 		 * will do the right thing on next atomic commit
7627 		 */
7628 
7629 		if (!new_stream) {
7630 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7631 					__func__, acrtc->base.base.id);
7632 			ret = -ENOMEM;
7633 			goto fail;
7634 		}
7635 
7636 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7637 
7638 		ret = fill_hdr_info_packet(drm_new_conn_state,
7639 					   &new_stream->hdr_static_metadata);
7640 		if (ret)
7641 			goto fail;
7642 
7643 		/*
7644 		 * If we already removed the old stream from the context
7645 		 * (and set the new stream to NULL) then we can't reuse
7646 		 * the old stream even if the stream and scaling are unchanged.
7647 		 * We'll hit the BUG_ON and black screen.
7648 		 *
7649 		 * TODO: Refactor this function to allow this check to work
7650 		 * in all conditions.
7651 		 */
7652 		if (dm_new_crtc_state->stream &&
7653 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7654 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7655 			new_crtc_state->mode_changed = false;
7656 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7657 					 new_crtc_state->mode_changed);
7658 		}
7659 	}
7660 
7661 	/* mode_changed flag may get updated above, need to check again */
7662 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7663 		goto skip_modeset;
7664 
7665 	DRM_DEBUG_DRIVER(
7666 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7667 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7668 		"connectors_changed:%d\n",
7669 		acrtc->crtc_id,
7670 		new_crtc_state->enable,
7671 		new_crtc_state->active,
7672 		new_crtc_state->planes_changed,
7673 		new_crtc_state->mode_changed,
7674 		new_crtc_state->active_changed,
7675 		new_crtc_state->connectors_changed);
7676 
7677 	/* Remove stream for any changed/disabled CRTC */
7678 	if (!enable) {
7679 
7680 		if (!dm_old_crtc_state->stream)
7681 			goto skip_modeset;
7682 
7683 		ret = dm_atomic_get_state(state, &dm_state);
7684 		if (ret)
7685 			goto fail;
7686 
7687 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7688 				crtc->base.id);
7689 
7690 		/* i.e. reset mode */
7691 		if (dc_remove_stream_from_ctx(
7692 				dm->dc,
7693 				dm_state->context,
7694 				dm_old_crtc_state->stream) != DC_OK) {
7695 			ret = -EINVAL;
7696 			goto fail;
7697 		}
7698 
7699 		dc_stream_release(dm_old_crtc_state->stream);
7700 		dm_new_crtc_state->stream = NULL;
7701 
7702 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7703 
7704 		*lock_and_validation_needed = true;
7705 
7706 	} else {/* Add stream for any updated/enabled CRTC */
7707 		/*
7708 		 * Quick fix to prevent NULL pointer on new_stream when
7709 		 * added MST connectors not found in existing crtc_state in the chained mode
7710 		 * TODO: need to dig out the root cause of that
7711 		 */
7712 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7713 			goto skip_modeset;
7714 
7715 		if (modereset_required(new_crtc_state))
7716 			goto skip_modeset;
7717 
7718 		if (modeset_required(new_crtc_state, new_stream,
7719 				     dm_old_crtc_state->stream)) {
7720 
7721 			WARN_ON(dm_new_crtc_state->stream);
7722 
7723 			ret = dm_atomic_get_state(state, &dm_state);
7724 			if (ret)
7725 				goto fail;
7726 
7727 			dm_new_crtc_state->stream = new_stream;
7728 
7729 			dc_stream_retain(new_stream);
7730 
7731 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7732 						crtc->base.id);
7733 
7734 			if (dc_add_stream_to_ctx(
7735 					dm->dc,
7736 					dm_state->context,
7737 					dm_new_crtc_state->stream) != DC_OK) {
7738 				ret = -EINVAL;
7739 				goto fail;
7740 			}
7741 
7742 			*lock_and_validation_needed = true;
7743 		}
7744 	}
7745 
7746 skip_modeset:
7747 	/* Release extra reference */
7748 	if (new_stream)
7749 		 dc_stream_release(new_stream);
7750 
7751 	/*
7752 	 * We want to do dc stream updates that do not require a
7753 	 * full modeset below.
7754 	 */
7755 	if (!(enable && aconnector && new_crtc_state->enable &&
7756 	      new_crtc_state->active))
7757 		return 0;
7758 	/*
7759 	 * Given above conditions, the dc state cannot be NULL because:
7760 	 * 1. We're in the process of enabling CRTCs (just been added
7761 	 *    to the dc context, or already is on the context)
7762 	 * 2. Has a valid connector attached, and
7763 	 * 3. Is currently active and enabled.
7764 	 * => The dc stream state currently exists.
7765 	 */
7766 	BUG_ON(dm_new_crtc_state->stream == NULL);
7767 
7768 	/* Scaling or underscan settings */
7769 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7770 		update_stream_scaling_settings(
7771 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7772 
7773 	/* ABM settings */
7774 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7775 
7776 	/*
7777 	 * Color management settings. We also update color properties
7778 	 * when a modeset is needed, to ensure it gets reprogrammed.
7779 	 */
7780 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7781 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7782 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7783 		if (ret)
7784 			goto fail;
7785 	}
7786 
7787 	/* Update Freesync settings. */
7788 	get_freesync_config_for_crtc(dm_new_crtc_state,
7789 				     dm_new_conn_state);
7790 
7791 	return ret;
7792 
7793 fail:
7794 	if (new_stream)
7795 		dc_stream_release(new_stream);
7796 	return ret;
7797 }
7798 
7799 static bool should_reset_plane(struct drm_atomic_state *state,
7800 			       struct drm_plane *plane,
7801 			       struct drm_plane_state *old_plane_state,
7802 			       struct drm_plane_state *new_plane_state)
7803 {
7804 	struct drm_plane *other;
7805 	struct drm_plane_state *old_other_state, *new_other_state;
7806 	struct drm_crtc_state *new_crtc_state;
7807 	int i;
7808 
7809 	/*
7810 	 * TODO: Remove this hack once the checks below are sufficient
7811 	 * enough to determine when we need to reset all the planes on
7812 	 * the stream.
7813 	 */
7814 	if (state->allow_modeset)
7815 		return true;
7816 
7817 	/* Exit early if we know that we're adding or removing the plane. */
7818 	if (old_plane_state->crtc != new_plane_state->crtc)
7819 		return true;
7820 
7821 	/* old crtc == new_crtc == NULL, plane not in context. */
7822 	if (!new_plane_state->crtc)
7823 		return false;
7824 
7825 	new_crtc_state =
7826 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7827 
7828 	if (!new_crtc_state)
7829 		return true;
7830 
7831 	/* CRTC Degamma changes currently require us to recreate planes. */
7832 	if (new_crtc_state->color_mgmt_changed)
7833 		return true;
7834 
7835 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7836 		return true;
7837 
7838 	/*
7839 	 * If there are any new primary or overlay planes being added or
7840 	 * removed then the z-order can potentially change. To ensure
7841 	 * correct z-order and pipe acquisition the current DC architecture
7842 	 * requires us to remove and recreate all existing planes.
7843 	 *
7844 	 * TODO: Come up with a more elegant solution for this.
7845 	 */
7846 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7847 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7848 			continue;
7849 
7850 		if (old_other_state->crtc != new_plane_state->crtc &&
7851 		    new_other_state->crtc != new_plane_state->crtc)
7852 			continue;
7853 
7854 		if (old_other_state->crtc != new_other_state->crtc)
7855 			return true;
7856 
7857 		/* TODO: Remove this once we can handle fast format changes. */
7858 		if (old_other_state->fb && new_other_state->fb &&
7859 		    old_other_state->fb->format != new_other_state->fb->format)
7860 			return true;
7861 	}
7862 
7863 	return false;
7864 }
7865 
7866 static int dm_update_plane_state(struct dc *dc,
7867 				 struct drm_atomic_state *state,
7868 				 struct drm_plane *plane,
7869 				 struct drm_plane_state *old_plane_state,
7870 				 struct drm_plane_state *new_plane_state,
7871 				 bool enable,
7872 				 bool *lock_and_validation_needed)
7873 {
7874 
7875 	struct dm_atomic_state *dm_state = NULL;
7876 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7877 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7878 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7879 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7880 	bool needs_reset;
7881 	int ret = 0;
7882 
7883 
7884 	new_plane_crtc = new_plane_state->crtc;
7885 	old_plane_crtc = old_plane_state->crtc;
7886 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7887 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7888 
7889 	/*TODO Implement atomic check for cursor plane */
7890 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
7891 		return 0;
7892 
7893 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7894 					 new_plane_state);
7895 
7896 	/* Remove any changed/removed planes */
7897 	if (!enable) {
7898 		if (!needs_reset)
7899 			return 0;
7900 
7901 		if (!old_plane_crtc)
7902 			return 0;
7903 
7904 		old_crtc_state = drm_atomic_get_old_crtc_state(
7905 				state, old_plane_crtc);
7906 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7907 
7908 		if (!dm_old_crtc_state->stream)
7909 			return 0;
7910 
7911 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7912 				plane->base.id, old_plane_crtc->base.id);
7913 
7914 		ret = dm_atomic_get_state(state, &dm_state);
7915 		if (ret)
7916 			return ret;
7917 
7918 		if (!dc_remove_plane_from_context(
7919 				dc,
7920 				dm_old_crtc_state->stream,
7921 				dm_old_plane_state->dc_state,
7922 				dm_state->context)) {
7923 
7924 			ret = EINVAL;
7925 			return ret;
7926 		}
7927 
7928 
7929 		dc_plane_state_release(dm_old_plane_state->dc_state);
7930 		dm_new_plane_state->dc_state = NULL;
7931 
7932 		*lock_and_validation_needed = true;
7933 
7934 	} else { /* Add new planes */
7935 		struct dc_plane_state *dc_new_plane_state;
7936 
7937 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7938 			return 0;
7939 
7940 		if (!new_plane_crtc)
7941 			return 0;
7942 
7943 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7944 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7945 
7946 		if (!dm_new_crtc_state->stream)
7947 			return 0;
7948 
7949 		if (!needs_reset)
7950 			return 0;
7951 
7952 		WARN_ON(dm_new_plane_state->dc_state);
7953 
7954 		dc_new_plane_state = dc_create_plane_state(dc);
7955 		if (!dc_new_plane_state)
7956 			return -ENOMEM;
7957 
7958 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7959 				plane->base.id, new_plane_crtc->base.id);
7960 
7961 		ret = fill_dc_plane_attributes(
7962 			new_plane_crtc->dev->dev_private,
7963 			dc_new_plane_state,
7964 			new_plane_state,
7965 			new_crtc_state);
7966 		if (ret) {
7967 			dc_plane_state_release(dc_new_plane_state);
7968 			return ret;
7969 		}
7970 
7971 		ret = dm_atomic_get_state(state, &dm_state);
7972 		if (ret) {
7973 			dc_plane_state_release(dc_new_plane_state);
7974 			return ret;
7975 		}
7976 
7977 		/*
7978 		 * Any atomic check errors that occur after this will
7979 		 * not need a release. The plane state will be attached
7980 		 * to the stream, and therefore part of the atomic
7981 		 * state. It'll be released when the atomic state is
7982 		 * cleaned.
7983 		 */
7984 		if (!dc_add_plane_to_context(
7985 				dc,
7986 				dm_new_crtc_state->stream,
7987 				dc_new_plane_state,
7988 				dm_state->context)) {
7989 
7990 			dc_plane_state_release(dc_new_plane_state);
7991 			return -EINVAL;
7992 		}
7993 
7994 		dm_new_plane_state->dc_state = dc_new_plane_state;
7995 
7996 		/* Tell DC to do a full surface update every time there
7997 		 * is a plane change. Inefficient, but works for now.
7998 		 */
7999 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8000 
8001 		*lock_and_validation_needed = true;
8002 	}
8003 
8004 
8005 	return ret;
8006 }
8007 
8008 static int
8009 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8010 				    struct drm_atomic_state *state,
8011 				    enum surface_update_type *out_type)
8012 {
8013 	struct dc *dc = dm->dc;
8014 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8015 	int i, j, num_plane, ret = 0;
8016 	struct drm_plane_state *old_plane_state, *new_plane_state;
8017 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8018 	struct drm_crtc *new_plane_crtc;
8019 	struct drm_plane *plane;
8020 
8021 	struct drm_crtc *crtc;
8022 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8023 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8024 	struct dc_stream_status *status = NULL;
8025 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8026 	struct surface_info_bundle {
8027 		struct dc_surface_update surface_updates[MAX_SURFACES];
8028 		struct dc_plane_info plane_infos[MAX_SURFACES];
8029 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8030 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8031 		struct dc_stream_update stream_update;
8032 	} *bundle;
8033 
8034 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8035 
8036 	if (!bundle) {
8037 		DRM_ERROR("Failed to allocate update bundle\n");
8038 		/* Set type to FULL to avoid crashing in DC*/
8039 		update_type = UPDATE_TYPE_FULL;
8040 		goto cleanup;
8041 	}
8042 
8043 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8044 
8045 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8046 
8047 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8048 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8049 		num_plane = 0;
8050 
8051 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8052 			update_type = UPDATE_TYPE_FULL;
8053 			goto cleanup;
8054 		}
8055 
8056 		if (!new_dm_crtc_state->stream)
8057 			continue;
8058 
8059 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8060 			const struct amdgpu_framebuffer *amdgpu_fb =
8061 				to_amdgpu_framebuffer(new_plane_state->fb);
8062 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8063 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8064 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8065 			uint64_t tiling_flags;
8066 
8067 			new_plane_crtc = new_plane_state->crtc;
8068 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8069 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8070 
8071 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8072 				continue;
8073 
8074 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8075 				update_type = UPDATE_TYPE_FULL;
8076 				goto cleanup;
8077 			}
8078 
8079 			if (crtc != new_plane_crtc)
8080 				continue;
8081 
8082 			bundle->surface_updates[num_plane].surface =
8083 					new_dm_plane_state->dc_state;
8084 
8085 			if (new_crtc_state->mode_changed) {
8086 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8087 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8088 			}
8089 
8090 			if (new_crtc_state->color_mgmt_changed) {
8091 				bundle->surface_updates[num_plane].gamma =
8092 						new_dm_plane_state->dc_state->gamma_correction;
8093 				bundle->surface_updates[num_plane].in_transfer_func =
8094 						new_dm_plane_state->dc_state->in_transfer_func;
8095 				bundle->stream_update.gamut_remap =
8096 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8097 				bundle->stream_update.output_csc_transform =
8098 						&new_dm_crtc_state->stream->csc_color_matrix;
8099 				bundle->stream_update.out_transfer_func =
8100 						new_dm_crtc_state->stream->out_transfer_func;
8101 			}
8102 
8103 			ret = fill_dc_scaling_info(new_plane_state,
8104 						   scaling_info);
8105 			if (ret)
8106 				goto cleanup;
8107 
8108 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8109 
8110 			if (amdgpu_fb) {
8111 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8112 				if (ret)
8113 					goto cleanup;
8114 
8115 				ret = fill_dc_plane_info_and_addr(
8116 					dm->adev, new_plane_state, tiling_flags,
8117 					plane_info,
8118 					&flip_addr->address,
8119 					false);
8120 				if (ret)
8121 					goto cleanup;
8122 
8123 				bundle->surface_updates[num_plane].plane_info = plane_info;
8124 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8125 			}
8126 
8127 			num_plane++;
8128 		}
8129 
8130 		if (num_plane == 0)
8131 			continue;
8132 
8133 		ret = dm_atomic_get_state(state, &dm_state);
8134 		if (ret)
8135 			goto cleanup;
8136 
8137 		old_dm_state = dm_atomic_get_old_state(state);
8138 		if (!old_dm_state) {
8139 			ret = -EINVAL;
8140 			goto cleanup;
8141 		}
8142 
8143 		status = dc_stream_get_status_from_state(old_dm_state->context,
8144 							 new_dm_crtc_state->stream);
8145 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8146 		/*
8147 		 * TODO: DC modifies the surface during this call so we need
8148 		 * to lock here - find a way to do this without locking.
8149 		 */
8150 		mutex_lock(&dm->dc_lock);
8151 		update_type = dc_check_update_surfaces_for_stream(
8152 				dc,	bundle->surface_updates, num_plane,
8153 				&bundle->stream_update, status);
8154 		mutex_unlock(&dm->dc_lock);
8155 
8156 		if (update_type > UPDATE_TYPE_MED) {
8157 			update_type = UPDATE_TYPE_FULL;
8158 			goto cleanup;
8159 		}
8160 	}
8161 
8162 cleanup:
8163 	kfree(bundle);
8164 
8165 	*out_type = update_type;
8166 	return ret;
8167 }
8168 
8169 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8170 {
8171 	struct drm_connector *connector;
8172 	struct drm_connector_state *conn_state;
8173 	struct amdgpu_dm_connector *aconnector = NULL;
8174 	int i;
8175 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8176 		if (conn_state->crtc != crtc)
8177 			continue;
8178 
8179 		aconnector = to_amdgpu_dm_connector(connector);
8180 		if (!aconnector->port || !aconnector->mst_port)
8181 			aconnector = NULL;
8182 		else
8183 			break;
8184 	}
8185 
8186 	if (!aconnector)
8187 		return 0;
8188 
8189 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8190 }
8191 
8192 /**
8193  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8194  * @dev: The DRM device
8195  * @state: The atomic state to commit
8196  *
8197  * Validate that the given atomic state is programmable by DC into hardware.
8198  * This involves constructing a &struct dc_state reflecting the new hardware
8199  * state we wish to commit, then querying DC to see if it is programmable. It's
8200  * important not to modify the existing DC state. Otherwise, atomic_check
8201  * may unexpectedly commit hardware changes.
8202  *
8203  * When validating the DC state, it's important that the right locks are
8204  * acquired. For full updates case which removes/adds/updates streams on one
8205  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8206  * that any such full update commit will wait for completion of any outstanding
8207  * flip using DRMs synchronization events. See
8208  * dm_determine_update_type_for_commit()
8209  *
8210  * Note that DM adds the affected connectors for all CRTCs in state, when that
8211  * might not seem necessary. This is because DC stream creation requires the
8212  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8213  * be possible but non-trivial - a possible TODO item.
8214  *
8215  * Return: -Error code if validation failed.
8216  */
8217 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8218 				  struct drm_atomic_state *state)
8219 {
8220 	struct amdgpu_device *adev = dev->dev_private;
8221 	struct dm_atomic_state *dm_state = NULL;
8222 	struct dc *dc = adev->dm.dc;
8223 	struct drm_connector *connector;
8224 	struct drm_connector_state *old_con_state, *new_con_state;
8225 	struct drm_crtc *crtc;
8226 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8227 	struct drm_plane *plane;
8228 	struct drm_plane_state *old_plane_state, *new_plane_state;
8229 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8230 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8231 
8232 	int ret, i;
8233 
8234 	/*
8235 	 * This bool will be set for true for any modeset/reset
8236 	 * or plane update which implies non fast surface update.
8237 	 */
8238 	bool lock_and_validation_needed = false;
8239 
8240 	ret = drm_atomic_helper_check_modeset(dev, state);
8241 	if (ret)
8242 		goto fail;
8243 
8244 	if (adev->asic_type >= CHIP_NAVI10) {
8245 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8246 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8247 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8248 				if (ret)
8249 					goto fail;
8250 			}
8251 		}
8252 	}
8253 
8254 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8255 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8256 		    !new_crtc_state->color_mgmt_changed &&
8257 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8258 			continue;
8259 
8260 		if (!new_crtc_state->enable)
8261 			continue;
8262 
8263 		ret = drm_atomic_add_affected_connectors(state, crtc);
8264 		if (ret)
8265 			return ret;
8266 
8267 		ret = drm_atomic_add_affected_planes(state, crtc);
8268 		if (ret)
8269 			goto fail;
8270 	}
8271 
8272 	/*
8273 	 * Add all primary and overlay planes on the CRTC to the state
8274 	 * whenever a plane is enabled to maintain correct z-ordering
8275 	 * and to enable fast surface updates.
8276 	 */
8277 	drm_for_each_crtc(crtc, dev) {
8278 		bool modified = false;
8279 
8280 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8281 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8282 				continue;
8283 
8284 			if (new_plane_state->crtc == crtc ||
8285 			    old_plane_state->crtc == crtc) {
8286 				modified = true;
8287 				break;
8288 			}
8289 		}
8290 
8291 		if (!modified)
8292 			continue;
8293 
8294 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8295 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8296 				continue;
8297 
8298 			new_plane_state =
8299 				drm_atomic_get_plane_state(state, plane);
8300 
8301 			if (IS_ERR(new_plane_state)) {
8302 				ret = PTR_ERR(new_plane_state);
8303 				goto fail;
8304 			}
8305 		}
8306 	}
8307 
8308 	/* Remove exiting planes if they are modified */
8309 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8310 		ret = dm_update_plane_state(dc, state, plane,
8311 					    old_plane_state,
8312 					    new_plane_state,
8313 					    false,
8314 					    &lock_and_validation_needed);
8315 		if (ret)
8316 			goto fail;
8317 	}
8318 
8319 	/* Disable all crtcs which require disable */
8320 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8321 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8322 					   old_crtc_state,
8323 					   new_crtc_state,
8324 					   false,
8325 					   &lock_and_validation_needed);
8326 		if (ret)
8327 			goto fail;
8328 	}
8329 
8330 	/* Enable all crtcs which require enable */
8331 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8332 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8333 					   old_crtc_state,
8334 					   new_crtc_state,
8335 					   true,
8336 					   &lock_and_validation_needed);
8337 		if (ret)
8338 			goto fail;
8339 	}
8340 
8341 	/* Add new/modified planes */
8342 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8343 		ret = dm_update_plane_state(dc, state, plane,
8344 					    old_plane_state,
8345 					    new_plane_state,
8346 					    true,
8347 					    &lock_and_validation_needed);
8348 		if (ret)
8349 			goto fail;
8350 	}
8351 
8352 	/* Run this here since we want to validate the streams we created */
8353 	ret = drm_atomic_helper_check_planes(dev, state);
8354 	if (ret)
8355 		goto fail;
8356 
8357 	if (state->legacy_cursor_update) {
8358 		/*
8359 		 * This is a fast cursor update coming from the plane update
8360 		 * helper, check if it can be done asynchronously for better
8361 		 * performance.
8362 		 */
8363 		state->async_update =
8364 			!drm_atomic_helper_async_check(dev, state);
8365 
8366 		/*
8367 		 * Skip the remaining global validation if this is an async
8368 		 * update. Cursor updates can be done without affecting
8369 		 * state or bandwidth calcs and this avoids the performance
8370 		 * penalty of locking the private state object and
8371 		 * allocating a new dc_state.
8372 		 */
8373 		if (state->async_update)
8374 			return 0;
8375 	}
8376 
8377 	/* Check scaling and underscan changes*/
8378 	/* TODO Removed scaling changes validation due to inability to commit
8379 	 * new stream into context w\o causing full reset. Need to
8380 	 * decide how to handle.
8381 	 */
8382 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8383 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8384 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8385 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8386 
8387 		/* Skip any modesets/resets */
8388 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8389 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8390 			continue;
8391 
8392 		/* Skip any thing not scale or underscan changes */
8393 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8394 			continue;
8395 
8396 		overall_update_type = UPDATE_TYPE_FULL;
8397 		lock_and_validation_needed = true;
8398 	}
8399 
8400 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8401 	if (ret)
8402 		goto fail;
8403 
8404 	if (overall_update_type < update_type)
8405 		overall_update_type = update_type;
8406 
8407 	/*
8408 	 * lock_and_validation_needed was an old way to determine if we need to set
8409 	 * the global lock. Leaving it in to check if we broke any corner cases
8410 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8411 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8412 	 */
8413 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8414 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8415 
8416 	if (overall_update_type > UPDATE_TYPE_FAST) {
8417 		ret = dm_atomic_get_state(state, &dm_state);
8418 		if (ret)
8419 			goto fail;
8420 
8421 		ret = do_aquire_global_lock(dev, state);
8422 		if (ret)
8423 			goto fail;
8424 
8425 #if defined(CONFIG_DRM_AMD_DC_DCN)
8426 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8427 			goto fail;
8428 
8429 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8430 		if (ret)
8431 			goto fail;
8432 #endif
8433 
8434 		/*
8435 		 * Perform validation of MST topology in the state:
8436 		 * We need to perform MST atomic check before calling
8437 		 * dc_validate_global_state(), or there is a chance
8438 		 * to get stuck in an infinite loop and hang eventually.
8439 		 */
8440 		ret = drm_dp_mst_atomic_check(state);
8441 		if (ret)
8442 			goto fail;
8443 
8444 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8445 			ret = -EINVAL;
8446 			goto fail;
8447 		}
8448 	} else {
8449 		/*
8450 		 * The commit is a fast update. Fast updates shouldn't change
8451 		 * the DC context, affect global validation, and can have their
8452 		 * commit work done in parallel with other commits not touching
8453 		 * the same resource. If we have a new DC context as part of
8454 		 * the DM atomic state from validation we need to free it and
8455 		 * retain the existing one instead.
8456 		 */
8457 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8458 
8459 		new_dm_state = dm_atomic_get_new_state(state);
8460 		old_dm_state = dm_atomic_get_old_state(state);
8461 
8462 		if (new_dm_state && old_dm_state) {
8463 			if (new_dm_state->context)
8464 				dc_release_state(new_dm_state->context);
8465 
8466 			new_dm_state->context = old_dm_state->context;
8467 
8468 			if (old_dm_state->context)
8469 				dc_retain_state(old_dm_state->context);
8470 		}
8471 	}
8472 
8473 	/* Store the overall update type for use later in atomic check. */
8474 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8475 		struct dm_crtc_state *dm_new_crtc_state =
8476 			to_dm_crtc_state(new_crtc_state);
8477 
8478 		dm_new_crtc_state->update_type = (int)overall_update_type;
8479 	}
8480 
8481 	/* Must be success */
8482 	WARN_ON(ret);
8483 	return ret;
8484 
8485 fail:
8486 	if (ret == -EDEADLK)
8487 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8488 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8489 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8490 	else
8491 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8492 
8493 	return ret;
8494 }
8495 
8496 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8497 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8498 {
8499 	uint8_t dpcd_data;
8500 	bool capable = false;
8501 
8502 	if (amdgpu_dm_connector->dc_link &&
8503 		dm_helpers_dp_read_dpcd(
8504 				NULL,
8505 				amdgpu_dm_connector->dc_link,
8506 				DP_DOWN_STREAM_PORT_COUNT,
8507 				&dpcd_data,
8508 				sizeof(dpcd_data))) {
8509 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8510 	}
8511 
8512 	return capable;
8513 }
8514 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8515 					struct edid *edid)
8516 {
8517 	int i;
8518 	bool edid_check_required;
8519 	struct detailed_timing *timing;
8520 	struct detailed_non_pixel *data;
8521 	struct detailed_data_monitor_range *range;
8522 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8523 			to_amdgpu_dm_connector(connector);
8524 	struct dm_connector_state *dm_con_state = NULL;
8525 
8526 	struct drm_device *dev = connector->dev;
8527 	struct amdgpu_device *adev = dev->dev_private;
8528 	bool freesync_capable = false;
8529 
8530 	if (!connector->state) {
8531 		DRM_ERROR("%s - Connector has no state", __func__);
8532 		goto update;
8533 	}
8534 
8535 	if (!edid) {
8536 		dm_con_state = to_dm_connector_state(connector->state);
8537 
8538 		amdgpu_dm_connector->min_vfreq = 0;
8539 		amdgpu_dm_connector->max_vfreq = 0;
8540 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8541 
8542 		goto update;
8543 	}
8544 
8545 	dm_con_state = to_dm_connector_state(connector->state);
8546 
8547 	edid_check_required = false;
8548 	if (!amdgpu_dm_connector->dc_sink) {
8549 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8550 		goto update;
8551 	}
8552 	if (!adev->dm.freesync_module)
8553 		goto update;
8554 	/*
8555 	 * if edid non zero restrict freesync only for dp and edp
8556 	 */
8557 	if (edid) {
8558 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8559 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8560 			edid_check_required = is_dp_capable_without_timing_msa(
8561 						adev->dm.dc,
8562 						amdgpu_dm_connector);
8563 		}
8564 	}
8565 	if (edid_check_required == true && (edid->version > 1 ||
8566 	   (edid->version == 1 && edid->revision > 1))) {
8567 		for (i = 0; i < 4; i++) {
8568 
8569 			timing	= &edid->detailed_timings[i];
8570 			data	= &timing->data.other_data;
8571 			range	= &data->data.range;
8572 			/*
8573 			 * Check if monitor has continuous frequency mode
8574 			 */
8575 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8576 				continue;
8577 			/*
8578 			 * Check for flag range limits only. If flag == 1 then
8579 			 * no additional timing information provided.
8580 			 * Default GTF, GTF Secondary curve and CVT are not
8581 			 * supported
8582 			 */
8583 			if (range->flags != 1)
8584 				continue;
8585 
8586 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8587 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8588 			amdgpu_dm_connector->pixel_clock_mhz =
8589 				range->pixel_clock_mhz * 10;
8590 			break;
8591 		}
8592 
8593 		if (amdgpu_dm_connector->max_vfreq -
8594 		    amdgpu_dm_connector->min_vfreq > 10) {
8595 
8596 			freesync_capable = true;
8597 		}
8598 	}
8599 
8600 update:
8601 	if (dm_con_state)
8602 		dm_con_state->freesync_capable = freesync_capable;
8603 
8604 	if (connector->vrr_capable_property)
8605 		drm_connector_set_vrr_capable_property(connector,
8606 						       freesync_capable);
8607 }
8608 
8609 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8610 {
8611 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8612 
8613 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8614 		return;
8615 	if (link->type == dc_connection_none)
8616 		return;
8617 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8618 					dpcd_data, sizeof(dpcd_data))) {
8619 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8620 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8621 	}
8622 }
8623 
8624 /*
8625  * amdgpu_dm_link_setup_psr() - configure psr link
8626  * @stream: stream state
8627  *
8628  * Return: true if success
8629  */
8630 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8631 {
8632 	struct dc_link *link = NULL;
8633 	struct psr_config psr_config = {0};
8634 	struct psr_context psr_context = {0};
8635 	struct dc *dc = NULL;
8636 	bool ret = false;
8637 
8638 	if (stream == NULL)
8639 		return false;
8640 
8641 	link = stream->link;
8642 	dc = link->ctx->dc;
8643 
8644 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8645 
8646 	if (psr_config.psr_version > 0) {
8647 		psr_config.psr_exit_link_training_required = 0x1;
8648 		psr_config.psr_frame_capture_indication_req = 0;
8649 		psr_config.psr_rfb_setup_time = 0x37;
8650 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8651 		psr_config.allow_smu_optimizations = 0x0;
8652 
8653 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8654 
8655 	}
8656 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8657 
8658 	return ret;
8659 }
8660 
8661 /*
8662  * amdgpu_dm_psr_enable() - enable psr f/w
8663  * @stream: stream state
8664  *
8665  * Return: true if success
8666  */
8667 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8668 {
8669 	struct dc_link *link = stream->link;
8670 	unsigned int vsync_rate_hz = 0;
8671 	struct dc_static_screen_params params = {0};
8672 	/* Calculate number of static frames before generating interrupt to
8673 	 * enter PSR.
8674 	 */
8675 	// Init fail safe of 2 frames static
8676 	unsigned int num_frames_static = 2;
8677 
8678 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8679 
8680 	vsync_rate_hz = div64_u64(div64_u64((
8681 			stream->timing.pix_clk_100hz * 100),
8682 			stream->timing.v_total),
8683 			stream->timing.h_total);
8684 
8685 	/* Round up
8686 	 * Calculate number of frames such that at least 30 ms of time has
8687 	 * passed.
8688 	 */
8689 	if (vsync_rate_hz != 0) {
8690 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8691 		num_frames_static = (30000 / frame_time_microsec) + 1;
8692 	}
8693 
8694 	params.triggers.cursor_update = true;
8695 	params.triggers.overlay_update = true;
8696 	params.triggers.surface_update = true;
8697 	params.num_frames = num_frames_static;
8698 
8699 	dc_stream_set_static_screen_params(link->ctx->dc,
8700 					   &stream, 1,
8701 					   &params);
8702 
8703 	return dc_link_set_psr_allow_active(link, true, false);
8704 }
8705 
8706 /*
8707  * amdgpu_dm_psr_disable() - disable psr f/w
8708  * @stream:  stream state
8709  *
8710  * Return: true if success
8711  */
8712 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8713 {
8714 
8715 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8716 
8717 	return dc_link_set_psr_allow_active(stream->link, false, true);
8718 }
8719