1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #endif
101 
102 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
103 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
104 
105 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
107 
108 /* Number of bytes in PSP header for firmware. */
109 #define PSP_HEADER_BYTES 0x100
110 
111 /* Number of bytes in PSP footer for firmware. */
112 #define PSP_FOOTER_BYTES 0x100
113 
114 /**
115  * DOC: overview
116  *
117  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
118  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
119  * requests into DC requests, and DC responses into DRM responses.
120  *
121  * The root control structure is &struct amdgpu_display_manager.
122  */
123 
124 /* basic init/fini API */
125 static int amdgpu_dm_init(struct amdgpu_device *adev);
126 static void amdgpu_dm_fini(struct amdgpu_device *adev);
127 
128 /*
129  * initializes drm_device display related structures, based on the information
130  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
131  * drm_encoder, drm_mode_config
132  *
133  * Returns 0 on success
134  */
135 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
136 /* removes and deallocates the drm structures, created by the above function */
137 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
138 
139 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
140 				struct drm_plane *plane,
141 				unsigned long possible_crtcs,
142 				const struct dc_plane_cap *plane_cap);
143 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
144 			       struct drm_plane *plane,
145 			       uint32_t link_index);
146 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
147 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
148 				    uint32_t link_index,
149 				    struct amdgpu_encoder *amdgpu_encoder);
150 static int amdgpu_dm_encoder_init(struct drm_device *dev,
151 				  struct amdgpu_encoder *aencoder,
152 				  uint32_t link_index);
153 
154 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
155 
156 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
157 				   struct drm_atomic_state *state,
158 				   bool nonblock);
159 
160 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
161 
162 static int amdgpu_dm_atomic_check(struct drm_device *dev,
163 				  struct drm_atomic_state *state);
164 
165 static void handle_cursor_update(struct drm_plane *plane,
166 				 struct drm_plane_state *old_plane_state);
167 
168 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
169 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
170 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
171 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
172 
173 
174 /*
175  * dm_vblank_get_counter
176  *
177  * @brief
178  * Get counter for number of vertical blanks
179  *
180  * @param
181  * struct amdgpu_device *adev - [in] desired amdgpu device
182  * int disp_idx - [in] which CRTC to get the counter from
183  *
184  * @return
185  * Counter for vertical blanks
186  */
187 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
188 {
189 	if (crtc >= adev->mode_info.num_crtc)
190 		return 0;
191 	else {
192 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
193 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
194 				acrtc->base.state);
195 
196 
197 		if (acrtc_state->stream == NULL) {
198 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
199 				  crtc);
200 			return 0;
201 		}
202 
203 		return dc_stream_get_vblank_counter(acrtc_state->stream);
204 	}
205 }
206 
207 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
208 				  u32 *vbl, u32 *position)
209 {
210 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
211 
212 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
213 		return -EINVAL;
214 	else {
215 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
216 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
217 						acrtc->base.state);
218 
219 		if (acrtc_state->stream ==  NULL) {
220 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
221 				  crtc);
222 			return 0;
223 		}
224 
225 		/*
226 		 * TODO rework base driver to use values directly.
227 		 * for now parse it back into reg-format
228 		 */
229 		dc_stream_get_scanoutpos(acrtc_state->stream,
230 					 &v_blank_start,
231 					 &v_blank_end,
232 					 &h_position,
233 					 &v_position);
234 
235 		*position = v_position | (h_position << 16);
236 		*vbl = v_blank_start | (v_blank_end << 16);
237 	}
238 
239 	return 0;
240 }
241 
242 static bool dm_is_idle(void *handle)
243 {
244 	/* XXX todo */
245 	return true;
246 }
247 
248 static int dm_wait_for_idle(void *handle)
249 {
250 	/* XXX todo */
251 	return 0;
252 }
253 
254 static bool dm_check_soft_reset(void *handle)
255 {
256 	return false;
257 }
258 
259 static int dm_soft_reset(void *handle)
260 {
261 	/* XXX todo */
262 	return 0;
263 }
264 
265 static struct amdgpu_crtc *
266 get_crtc_by_otg_inst(struct amdgpu_device *adev,
267 		     int otg_inst)
268 {
269 	struct drm_device *dev = adev->ddev;
270 	struct drm_crtc *crtc;
271 	struct amdgpu_crtc *amdgpu_crtc;
272 
273 	if (otg_inst == -1) {
274 		WARN_ON(1);
275 		return adev->mode_info.crtcs[0];
276 	}
277 
278 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279 		amdgpu_crtc = to_amdgpu_crtc(crtc);
280 
281 		if (amdgpu_crtc->otg_inst == otg_inst)
282 			return amdgpu_crtc;
283 	}
284 
285 	return NULL;
286 }
287 
288 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
289 {
290 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
291 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
292 }
293 
294 /**
295  * dm_pflip_high_irq() - Handle pageflip interrupt
296  * @interrupt_params: ignored
297  *
298  * Handles the pageflip interrupt by notifying all interested parties
299  * that the pageflip has been completed.
300  */
301 static void dm_pflip_high_irq(void *interrupt_params)
302 {
303 	struct amdgpu_crtc *amdgpu_crtc;
304 	struct common_irq_params *irq_params = interrupt_params;
305 	struct amdgpu_device *adev = irq_params->adev;
306 	unsigned long flags;
307 	struct drm_pending_vblank_event *e;
308 	struct dm_crtc_state *acrtc_state;
309 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
310 	bool vrr_active;
311 
312 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
313 
314 	/* IRQ could occur when in initial stage */
315 	/* TODO work and BO cleanup */
316 	if (amdgpu_crtc == NULL) {
317 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
318 		return;
319 	}
320 
321 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
322 
323 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
324 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
325 						 amdgpu_crtc->pflip_status,
326 						 AMDGPU_FLIP_SUBMITTED,
327 						 amdgpu_crtc->crtc_id,
328 						 amdgpu_crtc);
329 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
330 		return;
331 	}
332 
333 	/* page flip completed. */
334 	e = amdgpu_crtc->event;
335 	amdgpu_crtc->event = NULL;
336 
337 	if (!e)
338 		WARN_ON(1);
339 
340 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
341 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
342 
343 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
344 	if (!vrr_active ||
345 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
346 				      &v_blank_end, &hpos, &vpos) ||
347 	    (vpos < v_blank_start)) {
348 		/* Update to correct count and vblank timestamp if racing with
349 		 * vblank irq. This also updates to the correct vblank timestamp
350 		 * even in VRR mode, as scanout is past the front-porch atm.
351 		 */
352 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
353 
354 		/* Wake up userspace by sending the pageflip event with proper
355 		 * count and timestamp of vblank of flip completion.
356 		 */
357 		if (e) {
358 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
359 
360 			/* Event sent, so done with vblank for this flip */
361 			drm_crtc_vblank_put(&amdgpu_crtc->base);
362 		}
363 	} else if (e) {
364 		/* VRR active and inside front-porch: vblank count and
365 		 * timestamp for pageflip event will only be up to date after
366 		 * drm_crtc_handle_vblank() has been executed from late vblank
367 		 * irq handler after start of back-porch (vline 0). We queue the
368 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
369 		 * updated timestamp and count, once it runs after us.
370 		 *
371 		 * We need to open-code this instead of using the helper
372 		 * drm_crtc_arm_vblank_event(), as that helper would
373 		 * call drm_crtc_accurate_vblank_count(), which we must
374 		 * not call in VRR mode while we are in front-porch!
375 		 */
376 
377 		/* sequence will be replaced by real count during send-out. */
378 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
379 		e->pipe = amdgpu_crtc->crtc_id;
380 
381 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
382 		e = NULL;
383 	}
384 
385 	/* Keep track of vblank of this flip for flip throttling. We use the
386 	 * cooked hw counter, as that one incremented at start of this vblank
387 	 * of pageflip completion, so last_flip_vblank is the forbidden count
388 	 * for queueing new pageflips if vsync + VRR is enabled.
389 	 */
390 	amdgpu_crtc->last_flip_vblank =
391 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
392 
393 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
394 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
395 
396 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
397 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
398 			 vrr_active, (int) !e);
399 }
400 
401 static void dm_vupdate_high_irq(void *interrupt_params)
402 {
403 	struct common_irq_params *irq_params = interrupt_params;
404 	struct amdgpu_device *adev = irq_params->adev;
405 	struct amdgpu_crtc *acrtc;
406 	struct dm_crtc_state *acrtc_state;
407 	unsigned long flags;
408 
409 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
410 
411 	if (acrtc) {
412 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
413 
414 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
415 			      acrtc->crtc_id,
416 			      amdgpu_dm_vrr_active(acrtc_state));
417 
418 		/* Core vblank handling is done here after end of front-porch in
419 		 * vrr mode, as vblank timestamping will give valid results
420 		 * while now done after front-porch. This will also deliver
421 		 * page-flip completion events that have been queued to us
422 		 * if a pageflip happened inside front-porch.
423 		 */
424 		if (amdgpu_dm_vrr_active(acrtc_state)) {
425 			drm_crtc_handle_vblank(&acrtc->base);
426 
427 			/* BTR processing for pre-DCE12 ASICs */
428 			if (acrtc_state->stream &&
429 			    adev->family < AMDGPU_FAMILY_AI) {
430 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
431 				mod_freesync_handle_v_update(
432 				    adev->dm.freesync_module,
433 				    acrtc_state->stream,
434 				    &acrtc_state->vrr_params);
435 
436 				dc_stream_adjust_vmin_vmax(
437 				    adev->dm.dc,
438 				    acrtc_state->stream,
439 				    &acrtc_state->vrr_params.adjust);
440 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
441 			}
442 		}
443 	}
444 }
445 
446 /**
447  * dm_crtc_high_irq() - Handles CRTC interrupt
448  * @interrupt_params: used for determining the CRTC instance
449  *
450  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
451  * event handler.
452  */
453 static void dm_crtc_high_irq(void *interrupt_params)
454 {
455 	struct common_irq_params *irq_params = interrupt_params;
456 	struct amdgpu_device *adev = irq_params->adev;
457 	struct amdgpu_crtc *acrtc;
458 	struct dm_crtc_state *acrtc_state;
459 	unsigned long flags;
460 
461 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
462 	if (!acrtc)
463 		return;
464 
465 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
466 
467 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
468 			 amdgpu_dm_vrr_active(acrtc_state),
469 			 acrtc_state->active_planes);
470 
471 	/**
472 	 * Core vblank handling at start of front-porch is only possible
473 	 * in non-vrr mode, as only there vblank timestamping will give
474 	 * valid results while done in front-porch. Otherwise defer it
475 	 * to dm_vupdate_high_irq after end of front-porch.
476 	 */
477 	if (!amdgpu_dm_vrr_active(acrtc_state))
478 		drm_crtc_handle_vblank(&acrtc->base);
479 
480 	/**
481 	 * Following stuff must happen at start of vblank, for crc
482 	 * computation and below-the-range btr support in vrr mode.
483 	 */
484 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
485 
486 	/* BTR updates need to happen before VUPDATE on Vega and above. */
487 	if (adev->family < AMDGPU_FAMILY_AI)
488 		return;
489 
490 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
491 
492 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
493 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
494 		mod_freesync_handle_v_update(adev->dm.freesync_module,
495 					     acrtc_state->stream,
496 					     &acrtc_state->vrr_params);
497 
498 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
499 					   &acrtc_state->vrr_params.adjust);
500 	}
501 
502 	/*
503 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
504 	 * In that case, pageflip completion interrupts won't fire and pageflip
505 	 * completion events won't get delivered. Prevent this by sending
506 	 * pending pageflip events from here if a flip is still pending.
507 	 *
508 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
509 	 * avoid race conditions between flip programming and completion,
510 	 * which could cause too early flip completion events.
511 	 */
512 	if (adev->family >= AMDGPU_FAMILY_RV &&
513 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
514 	    acrtc_state->active_planes == 0) {
515 		if (acrtc->event) {
516 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
517 			acrtc->event = NULL;
518 			drm_crtc_vblank_put(&acrtc->base);
519 		}
520 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
521 	}
522 
523 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
524 }
525 
526 static int dm_set_clockgating_state(void *handle,
527 		  enum amd_clockgating_state state)
528 {
529 	return 0;
530 }
531 
532 static int dm_set_powergating_state(void *handle,
533 		  enum amd_powergating_state state)
534 {
535 	return 0;
536 }
537 
538 /* Prototypes of private functions */
539 static int dm_early_init(void* handle);
540 
541 /* Allocate memory for FBC compressed data  */
542 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
543 {
544 	struct drm_device *dev = connector->dev;
545 	struct amdgpu_device *adev = dev->dev_private;
546 	struct dm_comressor_info *compressor = &adev->dm.compressor;
547 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
548 	struct drm_display_mode *mode;
549 	unsigned long max_size = 0;
550 
551 	if (adev->dm.dc->fbc_compressor == NULL)
552 		return;
553 
554 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
555 		return;
556 
557 	if (compressor->bo_ptr)
558 		return;
559 
560 
561 	list_for_each_entry(mode, &connector->modes, head) {
562 		if (max_size < mode->htotal * mode->vtotal)
563 			max_size = mode->htotal * mode->vtotal;
564 	}
565 
566 	if (max_size) {
567 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
568 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
569 			    &compressor->gpu_addr, &compressor->cpu_addr);
570 
571 		if (r)
572 			DRM_ERROR("DM: Failed to initialize FBC\n");
573 		else {
574 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
575 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
576 		}
577 
578 	}
579 
580 }
581 
582 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
583 					  int pipe, bool *enabled,
584 					  unsigned char *buf, int max_bytes)
585 {
586 	struct drm_device *dev = dev_get_drvdata(kdev);
587 	struct amdgpu_device *adev = dev->dev_private;
588 	struct drm_connector *connector;
589 	struct drm_connector_list_iter conn_iter;
590 	struct amdgpu_dm_connector *aconnector;
591 	int ret = 0;
592 
593 	*enabled = false;
594 
595 	mutex_lock(&adev->dm.audio_lock);
596 
597 	drm_connector_list_iter_begin(dev, &conn_iter);
598 	drm_for_each_connector_iter(connector, &conn_iter) {
599 		aconnector = to_amdgpu_dm_connector(connector);
600 		if (aconnector->audio_inst != port)
601 			continue;
602 
603 		*enabled = true;
604 		ret = drm_eld_size(connector->eld);
605 		memcpy(buf, connector->eld, min(max_bytes, ret));
606 
607 		break;
608 	}
609 	drm_connector_list_iter_end(&conn_iter);
610 
611 	mutex_unlock(&adev->dm.audio_lock);
612 
613 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
614 
615 	return ret;
616 }
617 
618 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
619 	.get_eld = amdgpu_dm_audio_component_get_eld,
620 };
621 
622 static int amdgpu_dm_audio_component_bind(struct device *kdev,
623 				       struct device *hda_kdev, void *data)
624 {
625 	struct drm_device *dev = dev_get_drvdata(kdev);
626 	struct amdgpu_device *adev = dev->dev_private;
627 	struct drm_audio_component *acomp = data;
628 
629 	acomp->ops = &amdgpu_dm_audio_component_ops;
630 	acomp->dev = kdev;
631 	adev->dm.audio_component = acomp;
632 
633 	return 0;
634 }
635 
636 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
637 					  struct device *hda_kdev, void *data)
638 {
639 	struct drm_device *dev = dev_get_drvdata(kdev);
640 	struct amdgpu_device *adev = dev->dev_private;
641 	struct drm_audio_component *acomp = data;
642 
643 	acomp->ops = NULL;
644 	acomp->dev = NULL;
645 	adev->dm.audio_component = NULL;
646 }
647 
648 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
649 	.bind	= amdgpu_dm_audio_component_bind,
650 	.unbind	= amdgpu_dm_audio_component_unbind,
651 };
652 
653 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
654 {
655 	int i, ret;
656 
657 	if (!amdgpu_audio)
658 		return 0;
659 
660 	adev->mode_info.audio.enabled = true;
661 
662 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
663 
664 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
665 		adev->mode_info.audio.pin[i].channels = -1;
666 		adev->mode_info.audio.pin[i].rate = -1;
667 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
668 		adev->mode_info.audio.pin[i].status_bits = 0;
669 		adev->mode_info.audio.pin[i].category_code = 0;
670 		adev->mode_info.audio.pin[i].connected = false;
671 		adev->mode_info.audio.pin[i].id =
672 			adev->dm.dc->res_pool->audios[i]->inst;
673 		adev->mode_info.audio.pin[i].offset = 0;
674 	}
675 
676 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
677 	if (ret < 0)
678 		return ret;
679 
680 	adev->dm.audio_registered = true;
681 
682 	return 0;
683 }
684 
685 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
686 {
687 	if (!amdgpu_audio)
688 		return;
689 
690 	if (!adev->mode_info.audio.enabled)
691 		return;
692 
693 	if (adev->dm.audio_registered) {
694 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
695 		adev->dm.audio_registered = false;
696 	}
697 
698 	/* TODO: Disable audio? */
699 
700 	adev->mode_info.audio.enabled = false;
701 }
702 
703 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
704 {
705 	struct drm_audio_component *acomp = adev->dm.audio_component;
706 
707 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
708 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
709 
710 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
711 						 pin, -1);
712 	}
713 }
714 
715 static int dm_dmub_hw_init(struct amdgpu_device *adev)
716 {
717 	const struct dmcub_firmware_header_v1_0 *hdr;
718 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
719 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
720 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
721 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
722 	struct abm *abm = adev->dm.dc->res_pool->abm;
723 	struct dmub_srv_hw_params hw_params;
724 	enum dmub_status status;
725 	const unsigned char *fw_inst_const, *fw_bss_data;
726 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
727 	bool has_hw_support;
728 
729 	if (!dmub_srv)
730 		/* DMUB isn't supported on the ASIC. */
731 		return 0;
732 
733 	if (!fb_info) {
734 		DRM_ERROR("No framebuffer info for DMUB service.\n");
735 		return -EINVAL;
736 	}
737 
738 	if (!dmub_fw) {
739 		/* Firmware required for DMUB support. */
740 		DRM_ERROR("No firmware provided for DMUB.\n");
741 		return -EINVAL;
742 	}
743 
744 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
745 	if (status != DMUB_STATUS_OK) {
746 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
747 		return -EINVAL;
748 	}
749 
750 	if (!has_hw_support) {
751 		DRM_INFO("DMUB unsupported on ASIC\n");
752 		return 0;
753 	}
754 
755 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
756 
757 	fw_inst_const = dmub_fw->data +
758 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759 			PSP_HEADER_BYTES;
760 
761 	fw_bss_data = dmub_fw->data +
762 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
763 		      le32_to_cpu(hdr->inst_const_bytes);
764 
765 	/* Copy firmware and bios info into FB memory. */
766 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
767 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
768 
769 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
770 
771 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
772 	 * amdgpu_ucode_init_single_fw will load dmub firmware
773 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
774 	 * will be done by dm_dmub_hw_init
775 	 */
776 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
777 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
778 				fw_inst_const_size);
779 	}
780 
781 	if (fw_bss_data_size)
782 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
783 		       fw_bss_data, fw_bss_data_size);
784 
785 	/* Copy firmware bios info into FB memory. */
786 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
787 	       adev->bios_size);
788 
789 	/* Reset regions that need to be reset. */
790 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
791 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
792 
793 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
794 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
795 
796 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
797 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
798 
799 	/* Initialize hardware. */
800 	memset(&hw_params, 0, sizeof(hw_params));
801 	hw_params.fb_base = adev->gmc.fb_start;
802 	hw_params.fb_offset = adev->gmc.aper_base;
803 
804 	/* backdoor load firmware and trigger dmub running */
805 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
806 		hw_params.load_inst_const = true;
807 
808 	if (dmcu)
809 		hw_params.psp_version = dmcu->psp_version;
810 
811 	for (i = 0; i < fb_info->num_fb; ++i)
812 		hw_params.fb[i] = &fb_info->fb[i];
813 
814 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
815 	if (status != DMUB_STATUS_OK) {
816 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
817 		return -EINVAL;
818 	}
819 
820 	/* Wait for firmware load to finish. */
821 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
822 	if (status != DMUB_STATUS_OK)
823 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
824 
825 	/* Init DMCU and ABM if available. */
826 	if (dmcu && abm) {
827 		dmcu->funcs->dmcu_init(dmcu);
828 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
829 	}
830 
831 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
832 	if (!adev->dm.dc->ctx->dmub_srv) {
833 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
834 		return -ENOMEM;
835 	}
836 
837 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
838 		 adev->dm.dmcub_fw_version);
839 
840 	return 0;
841 }
842 
843 static int amdgpu_dm_init(struct amdgpu_device *adev)
844 {
845 	struct dc_init_data init_data;
846 #ifdef CONFIG_DRM_AMD_DC_HDCP
847 	struct dc_callback_init init_params;
848 #endif
849 	int r;
850 
851 	adev->dm.ddev = adev->ddev;
852 	adev->dm.adev = adev;
853 
854 	/* Zero all the fields */
855 	memset(&init_data, 0, sizeof(init_data));
856 #ifdef CONFIG_DRM_AMD_DC_HDCP
857 	memset(&init_params, 0, sizeof(init_params));
858 #endif
859 
860 	mutex_init(&adev->dm.dc_lock);
861 	mutex_init(&adev->dm.audio_lock);
862 
863 	if(amdgpu_dm_irq_init(adev)) {
864 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
865 		goto error;
866 	}
867 
868 	init_data.asic_id.chip_family = adev->family;
869 
870 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
871 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
872 
873 	init_data.asic_id.vram_width = adev->gmc.vram_width;
874 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
875 	init_data.asic_id.atombios_base_address =
876 		adev->mode_info.atom_context->bios;
877 
878 	init_data.driver = adev;
879 
880 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
881 
882 	if (!adev->dm.cgs_device) {
883 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
884 		goto error;
885 	}
886 
887 	init_data.cgs_device = adev->dm.cgs_device;
888 
889 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
890 
891 	switch (adev->asic_type) {
892 	case CHIP_CARRIZO:
893 	case CHIP_STONEY:
894 	case CHIP_RAVEN:
895 	case CHIP_RENOIR:
896 		init_data.flags.gpu_vm_support = true;
897 		break;
898 	default:
899 		break;
900 	}
901 
902 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
903 		init_data.flags.fbc_support = true;
904 
905 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
906 		init_data.flags.multi_mon_pp_mclk_switch = true;
907 
908 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
909 		init_data.flags.disable_fractional_pwm = true;
910 
911 	init_data.flags.power_down_display_on_boot = true;
912 
913 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
914 
915 	/* Display Core create. */
916 	adev->dm.dc = dc_create(&init_data);
917 
918 	if (adev->dm.dc) {
919 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
920 	} else {
921 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
922 		goto error;
923 	}
924 
925 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
926 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
927 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
928 	}
929 
930 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
931 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
932 
933 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
934 		adev->dm.dc->debug.disable_stutter = true;
935 
936 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
937 		adev->dm.dc->debug.disable_dsc = true;
938 
939 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
940 		adev->dm.dc->debug.disable_clock_gate = true;
941 
942 	r = dm_dmub_hw_init(adev);
943 	if (r) {
944 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
945 		goto error;
946 	}
947 
948 	dc_hardware_init(adev->dm.dc);
949 
950 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
951 	if (!adev->dm.freesync_module) {
952 		DRM_ERROR(
953 		"amdgpu: failed to initialize freesync_module.\n");
954 	} else
955 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
956 				adev->dm.freesync_module);
957 
958 	amdgpu_dm_init_color_mod();
959 
960 #ifdef CONFIG_DRM_AMD_DC_HDCP
961 	if (adev->asic_type >= CHIP_RAVEN) {
962 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
963 
964 		if (!adev->dm.hdcp_workqueue)
965 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
966 		else
967 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
968 
969 		dc_init_callbacks(adev->dm.dc, &init_params);
970 	}
971 #endif
972 	if (amdgpu_dm_initialize_drm_device(adev)) {
973 		DRM_ERROR(
974 		"amdgpu: failed to initialize sw for display support.\n");
975 		goto error;
976 	}
977 
978 	/* Update the actual used number of crtc */
979 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
980 
981 	/* create fake encoders for MST */
982 	dm_dp_create_fake_mst_encoders(adev);
983 
984 	/* TODO: Add_display_info? */
985 
986 	/* TODO use dynamic cursor width */
987 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
988 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
989 
990 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
991 		DRM_ERROR(
992 		"amdgpu: failed to initialize sw for display support.\n");
993 		goto error;
994 	}
995 
996 	DRM_DEBUG_DRIVER("KMS initialized.\n");
997 
998 	return 0;
999 error:
1000 	amdgpu_dm_fini(adev);
1001 
1002 	return -EINVAL;
1003 }
1004 
1005 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1006 {
1007 	int i;
1008 
1009 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1010 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1011 	}
1012 
1013 	amdgpu_dm_audio_fini(adev);
1014 
1015 	amdgpu_dm_destroy_drm_device(&adev->dm);
1016 
1017 #ifdef CONFIG_DRM_AMD_DC_HDCP
1018 	if (adev->dm.hdcp_workqueue) {
1019 		hdcp_destroy(adev->dm.hdcp_workqueue);
1020 		adev->dm.hdcp_workqueue = NULL;
1021 	}
1022 
1023 	if (adev->dm.dc)
1024 		dc_deinit_callbacks(adev->dm.dc);
1025 #endif
1026 	if (adev->dm.dc->ctx->dmub_srv) {
1027 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1028 		adev->dm.dc->ctx->dmub_srv = NULL;
1029 	}
1030 
1031 	if (adev->dm.dmub_bo)
1032 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1033 				      &adev->dm.dmub_bo_gpu_addr,
1034 				      &adev->dm.dmub_bo_cpu_addr);
1035 
1036 	/* DC Destroy TODO: Replace destroy DAL */
1037 	if (adev->dm.dc)
1038 		dc_destroy(&adev->dm.dc);
1039 	/*
1040 	 * TODO: pageflip, vlank interrupt
1041 	 *
1042 	 * amdgpu_dm_irq_fini(adev);
1043 	 */
1044 
1045 	if (adev->dm.cgs_device) {
1046 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1047 		adev->dm.cgs_device = NULL;
1048 	}
1049 	if (adev->dm.freesync_module) {
1050 		mod_freesync_destroy(adev->dm.freesync_module);
1051 		adev->dm.freesync_module = NULL;
1052 	}
1053 
1054 	mutex_destroy(&adev->dm.audio_lock);
1055 	mutex_destroy(&adev->dm.dc_lock);
1056 
1057 	return;
1058 }
1059 
1060 static int load_dmcu_fw(struct amdgpu_device *adev)
1061 {
1062 	const char *fw_name_dmcu = NULL;
1063 	int r;
1064 	const struct dmcu_firmware_header_v1_0 *hdr;
1065 
1066 	switch(adev->asic_type) {
1067 	case CHIP_BONAIRE:
1068 	case CHIP_HAWAII:
1069 	case CHIP_KAVERI:
1070 	case CHIP_KABINI:
1071 	case CHIP_MULLINS:
1072 	case CHIP_TONGA:
1073 	case CHIP_FIJI:
1074 	case CHIP_CARRIZO:
1075 	case CHIP_STONEY:
1076 	case CHIP_POLARIS11:
1077 	case CHIP_POLARIS10:
1078 	case CHIP_POLARIS12:
1079 	case CHIP_VEGAM:
1080 	case CHIP_VEGA10:
1081 	case CHIP_VEGA12:
1082 	case CHIP_VEGA20:
1083 	case CHIP_NAVI10:
1084 	case CHIP_NAVI14:
1085 	case CHIP_RENOIR:
1086 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1087 	case CHIP_SIENNA_CICHLID:
1088 	case CHIP_NAVY_FLOUNDER:
1089 #endif
1090 		return 0;
1091 	case CHIP_NAVI12:
1092 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1093 		break;
1094 	case CHIP_RAVEN:
1095 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1096 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1097 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else
1100 			return 0;
1101 		break;
1102 	default:
1103 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1104 		return -EINVAL;
1105 	}
1106 
1107 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1108 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1109 		return 0;
1110 	}
1111 
1112 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1113 	if (r == -ENOENT) {
1114 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1115 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1116 		adev->dm.fw_dmcu = NULL;
1117 		return 0;
1118 	}
1119 	if (r) {
1120 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1121 			fw_name_dmcu);
1122 		return r;
1123 	}
1124 
1125 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1126 	if (r) {
1127 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1128 			fw_name_dmcu);
1129 		release_firmware(adev->dm.fw_dmcu);
1130 		adev->dm.fw_dmcu = NULL;
1131 		return r;
1132 	}
1133 
1134 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1135 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1136 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1137 	adev->firmware.fw_size +=
1138 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1139 
1140 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1141 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1142 	adev->firmware.fw_size +=
1143 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1144 
1145 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1146 
1147 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1148 
1149 	return 0;
1150 }
1151 
1152 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1153 {
1154 	struct amdgpu_device *adev = ctx;
1155 
1156 	return dm_read_reg(adev->dm.dc->ctx, address);
1157 }
1158 
1159 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1160 				     uint32_t value)
1161 {
1162 	struct amdgpu_device *adev = ctx;
1163 
1164 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1165 }
1166 
1167 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1168 {
1169 	struct dmub_srv_create_params create_params;
1170 	struct dmub_srv_region_params region_params;
1171 	struct dmub_srv_region_info region_info;
1172 	struct dmub_srv_fb_params fb_params;
1173 	struct dmub_srv_fb_info *fb_info;
1174 	struct dmub_srv *dmub_srv;
1175 	const struct dmcub_firmware_header_v1_0 *hdr;
1176 	const char *fw_name_dmub;
1177 	enum dmub_asic dmub_asic;
1178 	enum dmub_status status;
1179 	int r;
1180 
1181 	switch (adev->asic_type) {
1182 	case CHIP_RENOIR:
1183 		dmub_asic = DMUB_ASIC_DCN21;
1184 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1185 		break;
1186 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1187 	case CHIP_SIENNA_CICHLID:
1188 	case CHIP_NAVY_FLOUNDER:
1189 		dmub_asic = DMUB_ASIC_DCN30;
1190 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1191 		break;
1192 #endif
1193 
1194 	default:
1195 		/* ASIC doesn't support DMUB. */
1196 		return 0;
1197 	}
1198 
1199 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1200 	if (r) {
1201 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1202 		return 0;
1203 	}
1204 
1205 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1206 	if (r) {
1207 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1208 		return 0;
1209 	}
1210 
1211 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1212 
1213 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1214 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1215 			AMDGPU_UCODE_ID_DMCUB;
1216 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1217 			adev->dm.dmub_fw;
1218 		adev->firmware.fw_size +=
1219 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1220 
1221 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1222 			 adev->dm.dmcub_fw_version);
1223 	}
1224 
1225 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1226 
1227 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1228 	dmub_srv = adev->dm.dmub_srv;
1229 
1230 	if (!dmub_srv) {
1231 		DRM_ERROR("Failed to allocate DMUB service!\n");
1232 		return -ENOMEM;
1233 	}
1234 
1235 	memset(&create_params, 0, sizeof(create_params));
1236 	create_params.user_ctx = adev;
1237 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1238 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1239 	create_params.asic = dmub_asic;
1240 
1241 	/* Create the DMUB service. */
1242 	status = dmub_srv_create(dmub_srv, &create_params);
1243 	if (status != DMUB_STATUS_OK) {
1244 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1245 		return -EINVAL;
1246 	}
1247 
1248 	/* Calculate the size of all the regions for the DMUB service. */
1249 	memset(&region_params, 0, sizeof(region_params));
1250 
1251 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1252 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1253 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1254 	region_params.vbios_size = adev->bios_size;
1255 	region_params.fw_bss_data = region_params.bss_data_size ?
1256 		adev->dm.dmub_fw->data +
1257 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1258 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1259 	region_params.fw_inst_const =
1260 		adev->dm.dmub_fw->data +
1261 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1262 		PSP_HEADER_BYTES;
1263 
1264 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1265 					   &region_info);
1266 
1267 	if (status != DMUB_STATUS_OK) {
1268 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1269 		return -EINVAL;
1270 	}
1271 
1272 	/*
1273 	 * Allocate a framebuffer based on the total size of all the regions.
1274 	 * TODO: Move this into GART.
1275 	 */
1276 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1277 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1278 				    &adev->dm.dmub_bo_gpu_addr,
1279 				    &adev->dm.dmub_bo_cpu_addr);
1280 	if (r)
1281 		return r;
1282 
1283 	/* Rebase the regions on the framebuffer address. */
1284 	memset(&fb_params, 0, sizeof(fb_params));
1285 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1286 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1287 	fb_params.region_info = &region_info;
1288 
1289 	adev->dm.dmub_fb_info =
1290 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1291 	fb_info = adev->dm.dmub_fb_info;
1292 
1293 	if (!fb_info) {
1294 		DRM_ERROR(
1295 			"Failed to allocate framebuffer info for DMUB service!\n");
1296 		return -ENOMEM;
1297 	}
1298 
1299 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1300 	if (status != DMUB_STATUS_OK) {
1301 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1302 		return -EINVAL;
1303 	}
1304 
1305 	return 0;
1306 }
1307 
1308 static int dm_sw_init(void *handle)
1309 {
1310 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1311 	int r;
1312 
1313 	r = dm_dmub_sw_init(adev);
1314 	if (r)
1315 		return r;
1316 
1317 	return load_dmcu_fw(adev);
1318 }
1319 
1320 static int dm_sw_fini(void *handle)
1321 {
1322 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1323 
1324 	kfree(adev->dm.dmub_fb_info);
1325 	adev->dm.dmub_fb_info = NULL;
1326 
1327 	if (adev->dm.dmub_srv) {
1328 		dmub_srv_destroy(adev->dm.dmub_srv);
1329 		adev->dm.dmub_srv = NULL;
1330 	}
1331 
1332 	release_firmware(adev->dm.dmub_fw);
1333 	adev->dm.dmub_fw = NULL;
1334 
1335 	release_firmware(adev->dm.fw_dmcu);
1336 	adev->dm.fw_dmcu = NULL;
1337 
1338 	return 0;
1339 }
1340 
1341 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1342 {
1343 	struct amdgpu_dm_connector *aconnector;
1344 	struct drm_connector *connector;
1345 	struct drm_connector_list_iter iter;
1346 	int ret = 0;
1347 
1348 	drm_connector_list_iter_begin(dev, &iter);
1349 	drm_for_each_connector_iter(connector, &iter) {
1350 		aconnector = to_amdgpu_dm_connector(connector);
1351 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1352 		    aconnector->mst_mgr.aux) {
1353 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1354 					 aconnector,
1355 					 aconnector->base.base.id);
1356 
1357 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1358 			if (ret < 0) {
1359 				DRM_ERROR("DM_MST: Failed to start MST\n");
1360 				aconnector->dc_link->type =
1361 					dc_connection_single;
1362 				break;
1363 			}
1364 		}
1365 	}
1366 	drm_connector_list_iter_end(&iter);
1367 
1368 	return ret;
1369 }
1370 
1371 static int dm_late_init(void *handle)
1372 {
1373 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1374 
1375 	struct dmcu_iram_parameters params;
1376 	unsigned int linear_lut[16];
1377 	int i;
1378 	struct dmcu *dmcu = NULL;
1379 	bool ret = true;
1380 
1381 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1382 		return detect_mst_link_for_all_connectors(adev->ddev);
1383 
1384 	dmcu = adev->dm.dc->res_pool->dmcu;
1385 
1386 	for (i = 0; i < 16; i++)
1387 		linear_lut[i] = 0xFFFF * i / 15;
1388 
1389 	params.set = 0;
1390 	params.backlight_ramping_start = 0xCCCC;
1391 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1392 	params.backlight_lut_array_size = 16;
1393 	params.backlight_lut_array = linear_lut;
1394 
1395 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1396 	 * 0xFFFF x 0.01 = 0x28F
1397 	 */
1398 	params.min_abm_backlight = 0x28F;
1399 
1400 	/* In the case where abm is implemented on dmcub,
1401 	 * dmcu object will be null.
1402 	 * ABM 2.4 and up are implemented on dmcub.
1403 	 */
1404 	if (dmcu)
1405 		ret = dmcu_load_iram(dmcu, params);
1406 	else if (adev->dm.dc->ctx->dmub_srv)
1407 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1408 
1409 	if (!ret)
1410 		return -EINVAL;
1411 
1412 	return detect_mst_link_for_all_connectors(adev->ddev);
1413 }
1414 
1415 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1416 {
1417 	struct amdgpu_dm_connector *aconnector;
1418 	struct drm_connector *connector;
1419 	struct drm_connector_list_iter iter;
1420 	struct drm_dp_mst_topology_mgr *mgr;
1421 	int ret;
1422 	bool need_hotplug = false;
1423 
1424 	drm_connector_list_iter_begin(dev, &iter);
1425 	drm_for_each_connector_iter(connector, &iter) {
1426 		aconnector = to_amdgpu_dm_connector(connector);
1427 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1428 		    aconnector->mst_port)
1429 			continue;
1430 
1431 		mgr = &aconnector->mst_mgr;
1432 
1433 		if (suspend) {
1434 			drm_dp_mst_topology_mgr_suspend(mgr);
1435 		} else {
1436 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1437 			if (ret < 0) {
1438 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1439 				need_hotplug = true;
1440 			}
1441 		}
1442 	}
1443 	drm_connector_list_iter_end(&iter);
1444 
1445 	if (need_hotplug)
1446 		drm_kms_helper_hotplug_event(dev);
1447 }
1448 
1449 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1450 {
1451 	struct smu_context *smu = &adev->smu;
1452 	int ret = 0;
1453 
1454 	if (!is_support_sw_smu(adev))
1455 		return 0;
1456 
1457 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1458 	 * on window driver dc implementation.
1459 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1460 	 * should be passed to smu during boot up and resume from s3.
1461 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1462 	 * dcn20_resource_construct
1463 	 * then call pplib functions below to pass the settings to smu:
1464 	 * smu_set_watermarks_for_clock_ranges
1465 	 * smu_set_watermarks_table
1466 	 * navi10_set_watermarks_table
1467 	 * smu_write_watermarks_table
1468 	 *
1469 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1470 	 * dc has implemented different flow for window driver:
1471 	 * dc_hardware_init / dc_set_power_state
1472 	 * dcn10_init_hw
1473 	 * notify_wm_ranges
1474 	 * set_wm_ranges
1475 	 * -- Linux
1476 	 * smu_set_watermarks_for_clock_ranges
1477 	 * renoir_set_watermarks_table
1478 	 * smu_write_watermarks_table
1479 	 *
1480 	 * For Linux,
1481 	 * dc_hardware_init -> amdgpu_dm_init
1482 	 * dc_set_power_state --> dm_resume
1483 	 *
1484 	 * therefore, this function apply to navi10/12/14 but not Renoir
1485 	 * *
1486 	 */
1487 	switch(adev->asic_type) {
1488 	case CHIP_NAVI10:
1489 	case CHIP_NAVI14:
1490 	case CHIP_NAVI12:
1491 		break;
1492 	default:
1493 		return 0;
1494 	}
1495 
1496 	ret = smu_write_watermarks_table(smu);
1497 	if (ret) {
1498 		DRM_ERROR("Failed to update WMTABLE!\n");
1499 		return ret;
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 /**
1506  * dm_hw_init() - Initialize DC device
1507  * @handle: The base driver device containing the amdgpu_dm device.
1508  *
1509  * Initialize the &struct amdgpu_display_manager device. This involves calling
1510  * the initializers of each DM component, then populating the struct with them.
1511  *
1512  * Although the function implies hardware initialization, both hardware and
1513  * software are initialized here. Splitting them out to their relevant init
1514  * hooks is a future TODO item.
1515  *
1516  * Some notable things that are initialized here:
1517  *
1518  * - Display Core, both software and hardware
1519  * - DC modules that we need (freesync and color management)
1520  * - DRM software states
1521  * - Interrupt sources and handlers
1522  * - Vblank support
1523  * - Debug FS entries, if enabled
1524  */
1525 static int dm_hw_init(void *handle)
1526 {
1527 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1528 	/* Create DAL display manager */
1529 	amdgpu_dm_init(adev);
1530 	amdgpu_dm_hpd_init(adev);
1531 
1532 	return 0;
1533 }
1534 
1535 /**
1536  * dm_hw_fini() - Teardown DC device
1537  * @handle: The base driver device containing the amdgpu_dm device.
1538  *
1539  * Teardown components within &struct amdgpu_display_manager that require
1540  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1541  * were loaded. Also flush IRQ workqueues and disable them.
1542  */
1543 static int dm_hw_fini(void *handle)
1544 {
1545 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 
1547 	amdgpu_dm_hpd_fini(adev);
1548 
1549 	amdgpu_dm_irq_fini(adev);
1550 	amdgpu_dm_fini(adev);
1551 	return 0;
1552 }
1553 
1554 
1555 static int dm_enable_vblank(struct drm_crtc *crtc);
1556 static void dm_disable_vblank(struct drm_crtc *crtc);
1557 
1558 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1559 				 struct dc_state *state, bool enable)
1560 {
1561 	enum dc_irq_source irq_source;
1562 	struct amdgpu_crtc *acrtc;
1563 	int rc = -EBUSY;
1564 	int i = 0;
1565 
1566 	for (i = 0; i < state->stream_count; i++) {
1567 		acrtc = get_crtc_by_otg_inst(
1568 				adev, state->stream_status[i].primary_otg_inst);
1569 
1570 		if (acrtc && state->stream_status[i].plane_count != 0) {
1571 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1572 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1573 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1574 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1575 			if (rc)
1576 				DRM_WARN("Failed to %s pflip interrupts\n",
1577 					 enable ? "enable" : "disable");
1578 
1579 			if (enable) {
1580 				rc = dm_enable_vblank(&acrtc->base);
1581 				if (rc)
1582 					DRM_WARN("Failed to enable vblank interrupts\n");
1583 			} else {
1584 				dm_disable_vblank(&acrtc->base);
1585 			}
1586 
1587 		}
1588 	}
1589 
1590 }
1591 
1592 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1593 {
1594 	struct dc_state *context = NULL;
1595 	enum dc_status res = DC_ERROR_UNEXPECTED;
1596 	int i;
1597 	struct dc_stream_state *del_streams[MAX_PIPES];
1598 	int del_streams_count = 0;
1599 
1600 	memset(del_streams, 0, sizeof(del_streams));
1601 
1602 	context = dc_create_state(dc);
1603 	if (context == NULL)
1604 		goto context_alloc_fail;
1605 
1606 	dc_resource_state_copy_construct_current(dc, context);
1607 
1608 	/* First remove from context all streams */
1609 	for (i = 0; i < context->stream_count; i++) {
1610 		struct dc_stream_state *stream = context->streams[i];
1611 
1612 		del_streams[del_streams_count++] = stream;
1613 	}
1614 
1615 	/* Remove all planes for removed streams and then remove the streams */
1616 	for (i = 0; i < del_streams_count; i++) {
1617 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1618 			res = DC_FAIL_DETACH_SURFACES;
1619 			goto fail;
1620 		}
1621 
1622 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1623 		if (res != DC_OK)
1624 			goto fail;
1625 	}
1626 
1627 
1628 	res = dc_validate_global_state(dc, context, false);
1629 
1630 	if (res != DC_OK) {
1631 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1632 		goto fail;
1633 	}
1634 
1635 	res = dc_commit_state(dc, context);
1636 
1637 fail:
1638 	dc_release_state(context);
1639 
1640 context_alloc_fail:
1641 	return res;
1642 }
1643 
1644 static int dm_suspend(void *handle)
1645 {
1646 	struct amdgpu_device *adev = handle;
1647 	struct amdgpu_display_manager *dm = &adev->dm;
1648 	int ret = 0;
1649 
1650 	if (adev->in_gpu_reset) {
1651 		mutex_lock(&dm->dc_lock);
1652 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1653 
1654 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1655 
1656 		amdgpu_dm_commit_zero_streams(dm->dc);
1657 
1658 		amdgpu_dm_irq_suspend(adev);
1659 
1660 		return ret;
1661 	}
1662 
1663 	WARN_ON(adev->dm.cached_state);
1664 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1665 
1666 	s3_handle_mst(adev->ddev, true);
1667 
1668 	amdgpu_dm_irq_suspend(adev);
1669 
1670 
1671 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1672 
1673 	return 0;
1674 }
1675 
1676 static struct amdgpu_dm_connector *
1677 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1678 					     struct drm_crtc *crtc)
1679 {
1680 	uint32_t i;
1681 	struct drm_connector_state *new_con_state;
1682 	struct drm_connector *connector;
1683 	struct drm_crtc *crtc_from_state;
1684 
1685 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1686 		crtc_from_state = new_con_state->crtc;
1687 
1688 		if (crtc_from_state == crtc)
1689 			return to_amdgpu_dm_connector(connector);
1690 	}
1691 
1692 	return NULL;
1693 }
1694 
1695 static void emulated_link_detect(struct dc_link *link)
1696 {
1697 	struct dc_sink_init_data sink_init_data = { 0 };
1698 	struct display_sink_capability sink_caps = { 0 };
1699 	enum dc_edid_status edid_status;
1700 	struct dc_context *dc_ctx = link->ctx;
1701 	struct dc_sink *sink = NULL;
1702 	struct dc_sink *prev_sink = NULL;
1703 
1704 	link->type = dc_connection_none;
1705 	prev_sink = link->local_sink;
1706 
1707 	if (prev_sink != NULL)
1708 		dc_sink_retain(prev_sink);
1709 
1710 	switch (link->connector_signal) {
1711 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1712 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1713 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1714 		break;
1715 	}
1716 
1717 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1718 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1719 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1720 		break;
1721 	}
1722 
1723 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1724 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1725 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1726 		break;
1727 	}
1728 
1729 	case SIGNAL_TYPE_LVDS: {
1730 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1731 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1732 		break;
1733 	}
1734 
1735 	case SIGNAL_TYPE_EDP: {
1736 		sink_caps.transaction_type =
1737 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1738 		sink_caps.signal = SIGNAL_TYPE_EDP;
1739 		break;
1740 	}
1741 
1742 	case SIGNAL_TYPE_DISPLAY_PORT: {
1743 		sink_caps.transaction_type =
1744 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1745 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1746 		break;
1747 	}
1748 
1749 	default:
1750 		DC_ERROR("Invalid connector type! signal:%d\n",
1751 			link->connector_signal);
1752 		return;
1753 	}
1754 
1755 	sink_init_data.link = link;
1756 	sink_init_data.sink_signal = sink_caps.signal;
1757 
1758 	sink = dc_sink_create(&sink_init_data);
1759 	if (!sink) {
1760 		DC_ERROR("Failed to create sink!\n");
1761 		return;
1762 	}
1763 
1764 	/* dc_sink_create returns a new reference */
1765 	link->local_sink = sink;
1766 
1767 	edid_status = dm_helpers_read_local_edid(
1768 			link->ctx,
1769 			link,
1770 			sink);
1771 
1772 	if (edid_status != EDID_OK)
1773 		DC_ERROR("Failed to read EDID");
1774 
1775 }
1776 
1777 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1778 				     struct amdgpu_display_manager *dm)
1779 {
1780 	struct {
1781 		struct dc_surface_update surface_updates[MAX_SURFACES];
1782 		struct dc_plane_info plane_infos[MAX_SURFACES];
1783 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1784 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1785 		struct dc_stream_update stream_update;
1786 	} * bundle;
1787 	int k, m;
1788 
1789 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1790 
1791 	if (!bundle) {
1792 		dm_error("Failed to allocate update bundle\n");
1793 		goto cleanup;
1794 	}
1795 
1796 	for (k = 0; k < dc_state->stream_count; k++) {
1797 		bundle->stream_update.stream = dc_state->streams[k];
1798 
1799 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1800 			bundle->surface_updates[m].surface =
1801 				dc_state->stream_status->plane_states[m];
1802 			bundle->surface_updates[m].surface->force_full_update =
1803 				true;
1804 		}
1805 		dc_commit_updates_for_stream(
1806 			dm->dc, bundle->surface_updates,
1807 			dc_state->stream_status->plane_count,
1808 			dc_state->streams[k], &bundle->stream_update, dc_state);
1809 	}
1810 
1811 cleanup:
1812 	kfree(bundle);
1813 
1814 	return;
1815 }
1816 
1817 static int dm_resume(void *handle)
1818 {
1819 	struct amdgpu_device *adev = handle;
1820 	struct drm_device *ddev = adev->ddev;
1821 	struct amdgpu_display_manager *dm = &adev->dm;
1822 	struct amdgpu_dm_connector *aconnector;
1823 	struct drm_connector *connector;
1824 	struct drm_connector_list_iter iter;
1825 	struct drm_crtc *crtc;
1826 	struct drm_crtc_state *new_crtc_state;
1827 	struct dm_crtc_state *dm_new_crtc_state;
1828 	struct drm_plane *plane;
1829 	struct drm_plane_state *new_plane_state;
1830 	struct dm_plane_state *dm_new_plane_state;
1831 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1832 	enum dc_connection_type new_connection_type = dc_connection_none;
1833 	struct dc_state *dc_state;
1834 	int i, r, j;
1835 
1836 	if (adev->in_gpu_reset) {
1837 		dc_state = dm->cached_dc_state;
1838 
1839 		r = dm_dmub_hw_init(adev);
1840 		if (r)
1841 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1842 
1843 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1844 		dc_resume(dm->dc);
1845 
1846 		amdgpu_dm_irq_resume_early(adev);
1847 
1848 		for (i = 0; i < dc_state->stream_count; i++) {
1849 			dc_state->streams[i]->mode_changed = true;
1850 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1851 				dc_state->stream_status->plane_states[j]->update_flags.raw
1852 					= 0xffffffff;
1853 			}
1854 		}
1855 
1856 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1857 
1858 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1859 
1860 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1861 
1862 		dc_release_state(dm->cached_dc_state);
1863 		dm->cached_dc_state = NULL;
1864 
1865 		amdgpu_dm_irq_resume_late(adev);
1866 
1867 		mutex_unlock(&dm->dc_lock);
1868 
1869 		return 0;
1870 	}
1871 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1872 	dc_release_state(dm_state->context);
1873 	dm_state->context = dc_create_state(dm->dc);
1874 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1875 	dc_resource_state_construct(dm->dc, dm_state->context);
1876 
1877 	/* Before powering on DC we need to re-initialize DMUB. */
1878 	r = dm_dmub_hw_init(adev);
1879 	if (r)
1880 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1881 
1882 	/* power on hardware */
1883 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1884 
1885 	/* program HPD filter */
1886 	dc_resume(dm->dc);
1887 
1888 	/*
1889 	 * early enable HPD Rx IRQ, should be done before set mode as short
1890 	 * pulse interrupts are used for MST
1891 	 */
1892 	amdgpu_dm_irq_resume_early(adev);
1893 
1894 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1895 	s3_handle_mst(ddev, false);
1896 
1897 	/* Do detection*/
1898 	drm_connector_list_iter_begin(ddev, &iter);
1899 	drm_for_each_connector_iter(connector, &iter) {
1900 		aconnector = to_amdgpu_dm_connector(connector);
1901 
1902 		/*
1903 		 * this is the case when traversing through already created
1904 		 * MST connectors, should be skipped
1905 		 */
1906 		if (aconnector->mst_port)
1907 			continue;
1908 
1909 		mutex_lock(&aconnector->hpd_lock);
1910 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1911 			DRM_ERROR("KMS: Failed to detect connector\n");
1912 
1913 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1914 			emulated_link_detect(aconnector->dc_link);
1915 		else
1916 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1917 
1918 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1919 			aconnector->fake_enable = false;
1920 
1921 		if (aconnector->dc_sink)
1922 			dc_sink_release(aconnector->dc_sink);
1923 		aconnector->dc_sink = NULL;
1924 		amdgpu_dm_update_connector_after_detect(aconnector);
1925 		mutex_unlock(&aconnector->hpd_lock);
1926 	}
1927 	drm_connector_list_iter_end(&iter);
1928 
1929 	/* Force mode set in atomic commit */
1930 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1931 		new_crtc_state->active_changed = true;
1932 
1933 	/*
1934 	 * atomic_check is expected to create the dc states. We need to release
1935 	 * them here, since they were duplicated as part of the suspend
1936 	 * procedure.
1937 	 */
1938 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1939 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1940 		if (dm_new_crtc_state->stream) {
1941 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1942 			dc_stream_release(dm_new_crtc_state->stream);
1943 			dm_new_crtc_state->stream = NULL;
1944 		}
1945 	}
1946 
1947 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1948 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1949 		if (dm_new_plane_state->dc_state) {
1950 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1951 			dc_plane_state_release(dm_new_plane_state->dc_state);
1952 			dm_new_plane_state->dc_state = NULL;
1953 		}
1954 	}
1955 
1956 	drm_atomic_helper_resume(ddev, dm->cached_state);
1957 
1958 	dm->cached_state = NULL;
1959 
1960 	amdgpu_dm_irq_resume_late(adev);
1961 
1962 	amdgpu_dm_smu_write_watermarks_table(adev);
1963 
1964 	return 0;
1965 }
1966 
1967 /**
1968  * DOC: DM Lifecycle
1969  *
1970  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1971  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1972  * the base driver's device list to be initialized and torn down accordingly.
1973  *
1974  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1975  */
1976 
1977 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1978 	.name = "dm",
1979 	.early_init = dm_early_init,
1980 	.late_init = dm_late_init,
1981 	.sw_init = dm_sw_init,
1982 	.sw_fini = dm_sw_fini,
1983 	.hw_init = dm_hw_init,
1984 	.hw_fini = dm_hw_fini,
1985 	.suspend = dm_suspend,
1986 	.resume = dm_resume,
1987 	.is_idle = dm_is_idle,
1988 	.wait_for_idle = dm_wait_for_idle,
1989 	.check_soft_reset = dm_check_soft_reset,
1990 	.soft_reset = dm_soft_reset,
1991 	.set_clockgating_state = dm_set_clockgating_state,
1992 	.set_powergating_state = dm_set_powergating_state,
1993 };
1994 
1995 const struct amdgpu_ip_block_version dm_ip_block =
1996 {
1997 	.type = AMD_IP_BLOCK_TYPE_DCE,
1998 	.major = 1,
1999 	.minor = 0,
2000 	.rev = 0,
2001 	.funcs = &amdgpu_dm_funcs,
2002 };
2003 
2004 
2005 /**
2006  * DOC: atomic
2007  *
2008  * *WIP*
2009  */
2010 
2011 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2012 	.fb_create = amdgpu_display_user_framebuffer_create,
2013 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2014 	.atomic_check = amdgpu_dm_atomic_check,
2015 	.atomic_commit = amdgpu_dm_atomic_commit,
2016 };
2017 
2018 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2019 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2020 };
2021 
2022 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2023 {
2024 	u32 max_cll, min_cll, max, min, q, r;
2025 	struct amdgpu_dm_backlight_caps *caps;
2026 	struct amdgpu_display_manager *dm;
2027 	struct drm_connector *conn_base;
2028 	struct amdgpu_device *adev;
2029 	struct dc_link *link = NULL;
2030 	static const u8 pre_computed_values[] = {
2031 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2032 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2033 
2034 	if (!aconnector || !aconnector->dc_link)
2035 		return;
2036 
2037 	link = aconnector->dc_link;
2038 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2039 		return;
2040 
2041 	conn_base = &aconnector->base;
2042 	adev = conn_base->dev->dev_private;
2043 	dm = &adev->dm;
2044 	caps = &dm->backlight_caps;
2045 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2046 	caps->aux_support = false;
2047 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2048 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2049 
2050 	if (caps->ext_caps->bits.oled == 1 ||
2051 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2052 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2053 		caps->aux_support = true;
2054 
2055 	/* From the specification (CTA-861-G), for calculating the maximum
2056 	 * luminance we need to use:
2057 	 *	Luminance = 50*2**(CV/32)
2058 	 * Where CV is a one-byte value.
2059 	 * For calculating this expression we may need float point precision;
2060 	 * to avoid this complexity level, we take advantage that CV is divided
2061 	 * by a constant. From the Euclids division algorithm, we know that CV
2062 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2063 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2064 	 * need to pre-compute the value of r/32. For pre-computing the values
2065 	 * We just used the following Ruby line:
2066 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2067 	 * The results of the above expressions can be verified at
2068 	 * pre_computed_values.
2069 	 */
2070 	q = max_cll >> 5;
2071 	r = max_cll % 32;
2072 	max = (1 << q) * pre_computed_values[r];
2073 
2074 	// min luminance: maxLum * (CV/255)^2 / 100
2075 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2076 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2077 
2078 	caps->aux_max_input_signal = max;
2079 	caps->aux_min_input_signal = min;
2080 }
2081 
2082 void amdgpu_dm_update_connector_after_detect(
2083 		struct amdgpu_dm_connector *aconnector)
2084 {
2085 	struct drm_connector *connector = &aconnector->base;
2086 	struct drm_device *dev = connector->dev;
2087 	struct dc_sink *sink;
2088 
2089 	/* MST handled by drm_mst framework */
2090 	if (aconnector->mst_mgr.mst_state == true)
2091 		return;
2092 
2093 
2094 	sink = aconnector->dc_link->local_sink;
2095 	if (sink)
2096 		dc_sink_retain(sink);
2097 
2098 	/*
2099 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2100 	 * the connector sink is set to either fake or physical sink depends on link status.
2101 	 * Skip if already done during boot.
2102 	 */
2103 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2104 			&& aconnector->dc_em_sink) {
2105 
2106 		/*
2107 		 * For S3 resume with headless use eml_sink to fake stream
2108 		 * because on resume connector->sink is set to NULL
2109 		 */
2110 		mutex_lock(&dev->mode_config.mutex);
2111 
2112 		if (sink) {
2113 			if (aconnector->dc_sink) {
2114 				amdgpu_dm_update_freesync_caps(connector, NULL);
2115 				/*
2116 				 * retain and release below are used to
2117 				 * bump up refcount for sink because the link doesn't point
2118 				 * to it anymore after disconnect, so on next crtc to connector
2119 				 * reshuffle by UMD we will get into unwanted dc_sink release
2120 				 */
2121 				dc_sink_release(aconnector->dc_sink);
2122 			}
2123 			aconnector->dc_sink = sink;
2124 			dc_sink_retain(aconnector->dc_sink);
2125 			amdgpu_dm_update_freesync_caps(connector,
2126 					aconnector->edid);
2127 		} else {
2128 			amdgpu_dm_update_freesync_caps(connector, NULL);
2129 			if (!aconnector->dc_sink) {
2130 				aconnector->dc_sink = aconnector->dc_em_sink;
2131 				dc_sink_retain(aconnector->dc_sink);
2132 			}
2133 		}
2134 
2135 		mutex_unlock(&dev->mode_config.mutex);
2136 
2137 		if (sink)
2138 			dc_sink_release(sink);
2139 		return;
2140 	}
2141 
2142 	/*
2143 	 * TODO: temporary guard to look for proper fix
2144 	 * if this sink is MST sink, we should not do anything
2145 	 */
2146 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2147 		dc_sink_release(sink);
2148 		return;
2149 	}
2150 
2151 	if (aconnector->dc_sink == sink) {
2152 		/*
2153 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2154 		 * Do nothing!!
2155 		 */
2156 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2157 				aconnector->connector_id);
2158 		if (sink)
2159 			dc_sink_release(sink);
2160 		return;
2161 	}
2162 
2163 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2164 		aconnector->connector_id, aconnector->dc_sink, sink);
2165 
2166 	mutex_lock(&dev->mode_config.mutex);
2167 
2168 	/*
2169 	 * 1. Update status of the drm connector
2170 	 * 2. Send an event and let userspace tell us what to do
2171 	 */
2172 	if (sink) {
2173 		/*
2174 		 * TODO: check if we still need the S3 mode update workaround.
2175 		 * If yes, put it here.
2176 		 */
2177 		if (aconnector->dc_sink)
2178 			amdgpu_dm_update_freesync_caps(connector, NULL);
2179 
2180 		aconnector->dc_sink = sink;
2181 		dc_sink_retain(aconnector->dc_sink);
2182 		if (sink->dc_edid.length == 0) {
2183 			aconnector->edid = NULL;
2184 			if (aconnector->dc_link->aux_mode) {
2185 				drm_dp_cec_unset_edid(
2186 					&aconnector->dm_dp_aux.aux);
2187 			}
2188 		} else {
2189 			aconnector->edid =
2190 				(struct edid *)sink->dc_edid.raw_edid;
2191 
2192 			drm_connector_update_edid_property(connector,
2193 							   aconnector->edid);
2194 
2195 			if (aconnector->dc_link->aux_mode)
2196 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2197 						    aconnector->edid);
2198 		}
2199 
2200 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2201 		update_connector_ext_caps(aconnector);
2202 	} else {
2203 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2204 		amdgpu_dm_update_freesync_caps(connector, NULL);
2205 		drm_connector_update_edid_property(connector, NULL);
2206 		aconnector->num_modes = 0;
2207 		dc_sink_release(aconnector->dc_sink);
2208 		aconnector->dc_sink = NULL;
2209 		aconnector->edid = NULL;
2210 #ifdef CONFIG_DRM_AMD_DC_HDCP
2211 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2212 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2213 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2214 #endif
2215 	}
2216 
2217 	mutex_unlock(&dev->mode_config.mutex);
2218 
2219 	if (sink)
2220 		dc_sink_release(sink);
2221 }
2222 
2223 static void handle_hpd_irq(void *param)
2224 {
2225 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2226 	struct drm_connector *connector = &aconnector->base;
2227 	struct drm_device *dev = connector->dev;
2228 	enum dc_connection_type new_connection_type = dc_connection_none;
2229 #ifdef CONFIG_DRM_AMD_DC_HDCP
2230 	struct amdgpu_device *adev = dev->dev_private;
2231 #endif
2232 
2233 	/*
2234 	 * In case of failure or MST no need to update connector status or notify the OS
2235 	 * since (for MST case) MST does this in its own context.
2236 	 */
2237 	mutex_lock(&aconnector->hpd_lock);
2238 
2239 #ifdef CONFIG_DRM_AMD_DC_HDCP
2240 	if (adev->dm.hdcp_workqueue)
2241 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2242 #endif
2243 	if (aconnector->fake_enable)
2244 		aconnector->fake_enable = false;
2245 
2246 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2247 		DRM_ERROR("KMS: Failed to detect connector\n");
2248 
2249 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2250 		emulated_link_detect(aconnector->dc_link);
2251 
2252 
2253 		drm_modeset_lock_all(dev);
2254 		dm_restore_drm_connector_state(dev, connector);
2255 		drm_modeset_unlock_all(dev);
2256 
2257 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2258 			drm_kms_helper_hotplug_event(dev);
2259 
2260 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2261 		amdgpu_dm_update_connector_after_detect(aconnector);
2262 
2263 
2264 		drm_modeset_lock_all(dev);
2265 		dm_restore_drm_connector_state(dev, connector);
2266 		drm_modeset_unlock_all(dev);
2267 
2268 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2269 			drm_kms_helper_hotplug_event(dev);
2270 	}
2271 	mutex_unlock(&aconnector->hpd_lock);
2272 
2273 }
2274 
2275 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2276 {
2277 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2278 	uint8_t dret;
2279 	bool new_irq_handled = false;
2280 	int dpcd_addr;
2281 	int dpcd_bytes_to_read;
2282 
2283 	const int max_process_count = 30;
2284 	int process_count = 0;
2285 
2286 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2287 
2288 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2289 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2290 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2291 		dpcd_addr = DP_SINK_COUNT;
2292 	} else {
2293 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2294 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2295 		dpcd_addr = DP_SINK_COUNT_ESI;
2296 	}
2297 
2298 	dret = drm_dp_dpcd_read(
2299 		&aconnector->dm_dp_aux.aux,
2300 		dpcd_addr,
2301 		esi,
2302 		dpcd_bytes_to_read);
2303 
2304 	while (dret == dpcd_bytes_to_read &&
2305 		process_count < max_process_count) {
2306 		uint8_t retry;
2307 		dret = 0;
2308 
2309 		process_count++;
2310 
2311 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2312 		/* handle HPD short pulse irq */
2313 		if (aconnector->mst_mgr.mst_state)
2314 			drm_dp_mst_hpd_irq(
2315 				&aconnector->mst_mgr,
2316 				esi,
2317 				&new_irq_handled);
2318 
2319 		if (new_irq_handled) {
2320 			/* ACK at DPCD to notify down stream */
2321 			const int ack_dpcd_bytes_to_write =
2322 				dpcd_bytes_to_read - 1;
2323 
2324 			for (retry = 0; retry < 3; retry++) {
2325 				uint8_t wret;
2326 
2327 				wret = drm_dp_dpcd_write(
2328 					&aconnector->dm_dp_aux.aux,
2329 					dpcd_addr + 1,
2330 					&esi[1],
2331 					ack_dpcd_bytes_to_write);
2332 				if (wret == ack_dpcd_bytes_to_write)
2333 					break;
2334 			}
2335 
2336 			/* check if there is new irq to be handled */
2337 			dret = drm_dp_dpcd_read(
2338 				&aconnector->dm_dp_aux.aux,
2339 				dpcd_addr,
2340 				esi,
2341 				dpcd_bytes_to_read);
2342 
2343 			new_irq_handled = false;
2344 		} else {
2345 			break;
2346 		}
2347 	}
2348 
2349 	if (process_count == max_process_count)
2350 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2351 }
2352 
2353 static void handle_hpd_rx_irq(void *param)
2354 {
2355 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2356 	struct drm_connector *connector = &aconnector->base;
2357 	struct drm_device *dev = connector->dev;
2358 	struct dc_link *dc_link = aconnector->dc_link;
2359 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2360 	enum dc_connection_type new_connection_type = dc_connection_none;
2361 #ifdef CONFIG_DRM_AMD_DC_HDCP
2362 	union hpd_irq_data hpd_irq_data;
2363 	struct amdgpu_device *adev = dev->dev_private;
2364 
2365 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2366 #endif
2367 
2368 	/*
2369 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2370 	 * conflict, after implement i2c helper, this mutex should be
2371 	 * retired.
2372 	 */
2373 	if (dc_link->type != dc_connection_mst_branch)
2374 		mutex_lock(&aconnector->hpd_lock);
2375 
2376 
2377 #ifdef CONFIG_DRM_AMD_DC_HDCP
2378 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2379 #else
2380 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2381 #endif
2382 			!is_mst_root_connector) {
2383 		/* Downstream Port status changed. */
2384 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2385 			DRM_ERROR("KMS: Failed to detect connector\n");
2386 
2387 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2388 			emulated_link_detect(dc_link);
2389 
2390 			if (aconnector->fake_enable)
2391 				aconnector->fake_enable = false;
2392 
2393 			amdgpu_dm_update_connector_after_detect(aconnector);
2394 
2395 
2396 			drm_modeset_lock_all(dev);
2397 			dm_restore_drm_connector_state(dev, connector);
2398 			drm_modeset_unlock_all(dev);
2399 
2400 			drm_kms_helper_hotplug_event(dev);
2401 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2402 
2403 			if (aconnector->fake_enable)
2404 				aconnector->fake_enable = false;
2405 
2406 			amdgpu_dm_update_connector_after_detect(aconnector);
2407 
2408 
2409 			drm_modeset_lock_all(dev);
2410 			dm_restore_drm_connector_state(dev, connector);
2411 			drm_modeset_unlock_all(dev);
2412 
2413 			drm_kms_helper_hotplug_event(dev);
2414 		}
2415 	}
2416 #ifdef CONFIG_DRM_AMD_DC_HDCP
2417 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2418 		if (adev->dm.hdcp_workqueue)
2419 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2420 	}
2421 #endif
2422 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2423 	    (dc_link->type == dc_connection_mst_branch))
2424 		dm_handle_hpd_rx_irq(aconnector);
2425 
2426 	if (dc_link->type != dc_connection_mst_branch) {
2427 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2428 		mutex_unlock(&aconnector->hpd_lock);
2429 	}
2430 }
2431 
2432 static void register_hpd_handlers(struct amdgpu_device *adev)
2433 {
2434 	struct drm_device *dev = adev->ddev;
2435 	struct drm_connector *connector;
2436 	struct amdgpu_dm_connector *aconnector;
2437 	const struct dc_link *dc_link;
2438 	struct dc_interrupt_params int_params = {0};
2439 
2440 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2441 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2442 
2443 	list_for_each_entry(connector,
2444 			&dev->mode_config.connector_list, head)	{
2445 
2446 		aconnector = to_amdgpu_dm_connector(connector);
2447 		dc_link = aconnector->dc_link;
2448 
2449 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2450 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2451 			int_params.irq_source = dc_link->irq_source_hpd;
2452 
2453 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2454 					handle_hpd_irq,
2455 					(void *) aconnector);
2456 		}
2457 
2458 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2459 
2460 			/* Also register for DP short pulse (hpd_rx). */
2461 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2462 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2463 
2464 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2465 					handle_hpd_rx_irq,
2466 					(void *) aconnector);
2467 		}
2468 	}
2469 }
2470 
2471 /* Register IRQ sources and initialize IRQ callbacks */
2472 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2473 {
2474 	struct dc *dc = adev->dm.dc;
2475 	struct common_irq_params *c_irq_params;
2476 	struct dc_interrupt_params int_params = {0};
2477 	int r;
2478 	int i;
2479 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2480 
2481 	if (adev->asic_type >= CHIP_VEGA10)
2482 		client_id = SOC15_IH_CLIENTID_DCE;
2483 
2484 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2485 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2486 
2487 	/*
2488 	 * Actions of amdgpu_irq_add_id():
2489 	 * 1. Register a set() function with base driver.
2490 	 *    Base driver will call set() function to enable/disable an
2491 	 *    interrupt in DC hardware.
2492 	 * 2. Register amdgpu_dm_irq_handler().
2493 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2494 	 *    coming from DC hardware.
2495 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2496 	 *    for acknowledging and handling. */
2497 
2498 	/* Use VBLANK interrupt */
2499 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2500 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2501 		if (r) {
2502 			DRM_ERROR("Failed to add crtc irq id!\n");
2503 			return r;
2504 		}
2505 
2506 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2507 		int_params.irq_source =
2508 			dc_interrupt_to_irq_source(dc, i, 0);
2509 
2510 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2511 
2512 		c_irq_params->adev = adev;
2513 		c_irq_params->irq_src = int_params.irq_source;
2514 
2515 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2516 				dm_crtc_high_irq, c_irq_params);
2517 	}
2518 
2519 	/* Use VUPDATE interrupt */
2520 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2521 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2522 		if (r) {
2523 			DRM_ERROR("Failed to add vupdate irq id!\n");
2524 			return r;
2525 		}
2526 
2527 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2528 		int_params.irq_source =
2529 			dc_interrupt_to_irq_source(dc, i, 0);
2530 
2531 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2532 
2533 		c_irq_params->adev = adev;
2534 		c_irq_params->irq_src = int_params.irq_source;
2535 
2536 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2537 				dm_vupdate_high_irq, c_irq_params);
2538 	}
2539 
2540 	/* Use GRPH_PFLIP interrupt */
2541 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2542 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2543 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2544 		if (r) {
2545 			DRM_ERROR("Failed to add page flip irq id!\n");
2546 			return r;
2547 		}
2548 
2549 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2550 		int_params.irq_source =
2551 			dc_interrupt_to_irq_source(dc, i, 0);
2552 
2553 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2554 
2555 		c_irq_params->adev = adev;
2556 		c_irq_params->irq_src = int_params.irq_source;
2557 
2558 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2559 				dm_pflip_high_irq, c_irq_params);
2560 
2561 	}
2562 
2563 	/* HPD */
2564 	r = amdgpu_irq_add_id(adev, client_id,
2565 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2566 	if (r) {
2567 		DRM_ERROR("Failed to add hpd irq id!\n");
2568 		return r;
2569 	}
2570 
2571 	register_hpd_handlers(adev);
2572 
2573 	return 0;
2574 }
2575 
2576 #if defined(CONFIG_DRM_AMD_DC_DCN)
2577 /* Register IRQ sources and initialize IRQ callbacks */
2578 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2579 {
2580 	struct dc *dc = adev->dm.dc;
2581 	struct common_irq_params *c_irq_params;
2582 	struct dc_interrupt_params int_params = {0};
2583 	int r;
2584 	int i;
2585 
2586 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2587 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2588 
2589 	/*
2590 	 * Actions of amdgpu_irq_add_id():
2591 	 * 1. Register a set() function with base driver.
2592 	 *    Base driver will call set() function to enable/disable an
2593 	 *    interrupt in DC hardware.
2594 	 * 2. Register amdgpu_dm_irq_handler().
2595 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2596 	 *    coming from DC hardware.
2597 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2598 	 *    for acknowledging and handling.
2599 	 */
2600 
2601 	/* Use VSTARTUP interrupt */
2602 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2603 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2604 			i++) {
2605 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2606 
2607 		if (r) {
2608 			DRM_ERROR("Failed to add crtc irq id!\n");
2609 			return r;
2610 		}
2611 
2612 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2613 		int_params.irq_source =
2614 			dc_interrupt_to_irq_source(dc, i, 0);
2615 
2616 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2617 
2618 		c_irq_params->adev = adev;
2619 		c_irq_params->irq_src = int_params.irq_source;
2620 
2621 		amdgpu_dm_irq_register_interrupt(
2622 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2623 	}
2624 
2625 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2626 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2627 	 * to trigger at end of each vblank, regardless of state of the lock,
2628 	 * matching DCE behaviour.
2629 	 */
2630 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2631 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2632 	     i++) {
2633 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2634 
2635 		if (r) {
2636 			DRM_ERROR("Failed to add vupdate irq id!\n");
2637 			return r;
2638 		}
2639 
2640 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2641 		int_params.irq_source =
2642 			dc_interrupt_to_irq_source(dc, i, 0);
2643 
2644 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2645 
2646 		c_irq_params->adev = adev;
2647 		c_irq_params->irq_src = int_params.irq_source;
2648 
2649 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2650 				dm_vupdate_high_irq, c_irq_params);
2651 	}
2652 
2653 	/* Use GRPH_PFLIP interrupt */
2654 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2655 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2656 			i++) {
2657 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2658 		if (r) {
2659 			DRM_ERROR("Failed to add page flip irq id!\n");
2660 			return r;
2661 		}
2662 
2663 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2664 		int_params.irq_source =
2665 			dc_interrupt_to_irq_source(dc, i, 0);
2666 
2667 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2668 
2669 		c_irq_params->adev = adev;
2670 		c_irq_params->irq_src = int_params.irq_source;
2671 
2672 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2673 				dm_pflip_high_irq, c_irq_params);
2674 
2675 	}
2676 
2677 	/* HPD */
2678 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2679 			&adev->hpd_irq);
2680 	if (r) {
2681 		DRM_ERROR("Failed to add hpd irq id!\n");
2682 		return r;
2683 	}
2684 
2685 	register_hpd_handlers(adev);
2686 
2687 	return 0;
2688 }
2689 #endif
2690 
2691 /*
2692  * Acquires the lock for the atomic state object and returns
2693  * the new atomic state.
2694  *
2695  * This should only be called during atomic check.
2696  */
2697 static int dm_atomic_get_state(struct drm_atomic_state *state,
2698 			       struct dm_atomic_state **dm_state)
2699 {
2700 	struct drm_device *dev = state->dev;
2701 	struct amdgpu_device *adev = dev->dev_private;
2702 	struct amdgpu_display_manager *dm = &adev->dm;
2703 	struct drm_private_state *priv_state;
2704 
2705 	if (*dm_state)
2706 		return 0;
2707 
2708 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2709 	if (IS_ERR(priv_state))
2710 		return PTR_ERR(priv_state);
2711 
2712 	*dm_state = to_dm_atomic_state(priv_state);
2713 
2714 	return 0;
2715 }
2716 
2717 static struct dm_atomic_state *
2718 dm_atomic_get_new_state(struct drm_atomic_state *state)
2719 {
2720 	struct drm_device *dev = state->dev;
2721 	struct amdgpu_device *adev = dev->dev_private;
2722 	struct amdgpu_display_manager *dm = &adev->dm;
2723 	struct drm_private_obj *obj;
2724 	struct drm_private_state *new_obj_state;
2725 	int i;
2726 
2727 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2728 		if (obj->funcs == dm->atomic_obj.funcs)
2729 			return to_dm_atomic_state(new_obj_state);
2730 	}
2731 
2732 	return NULL;
2733 }
2734 
2735 static struct dm_atomic_state *
2736 dm_atomic_get_old_state(struct drm_atomic_state *state)
2737 {
2738 	struct drm_device *dev = state->dev;
2739 	struct amdgpu_device *adev = dev->dev_private;
2740 	struct amdgpu_display_manager *dm = &adev->dm;
2741 	struct drm_private_obj *obj;
2742 	struct drm_private_state *old_obj_state;
2743 	int i;
2744 
2745 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2746 		if (obj->funcs == dm->atomic_obj.funcs)
2747 			return to_dm_atomic_state(old_obj_state);
2748 	}
2749 
2750 	return NULL;
2751 }
2752 
2753 static struct drm_private_state *
2754 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2755 {
2756 	struct dm_atomic_state *old_state, *new_state;
2757 
2758 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2759 	if (!new_state)
2760 		return NULL;
2761 
2762 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2763 
2764 	old_state = to_dm_atomic_state(obj->state);
2765 
2766 	if (old_state && old_state->context)
2767 		new_state->context = dc_copy_state(old_state->context);
2768 
2769 	if (!new_state->context) {
2770 		kfree(new_state);
2771 		return NULL;
2772 	}
2773 
2774 	return &new_state->base;
2775 }
2776 
2777 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2778 				    struct drm_private_state *state)
2779 {
2780 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2781 
2782 	if (dm_state && dm_state->context)
2783 		dc_release_state(dm_state->context);
2784 
2785 	kfree(dm_state);
2786 }
2787 
2788 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2789 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2790 	.atomic_destroy_state = dm_atomic_destroy_state,
2791 };
2792 
2793 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2794 {
2795 	struct dm_atomic_state *state;
2796 	int r;
2797 
2798 	adev->mode_info.mode_config_initialized = true;
2799 
2800 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2801 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2802 
2803 	adev->ddev->mode_config.max_width = 16384;
2804 	adev->ddev->mode_config.max_height = 16384;
2805 
2806 	adev->ddev->mode_config.preferred_depth = 24;
2807 	adev->ddev->mode_config.prefer_shadow = 1;
2808 	/* indicates support for immediate flip */
2809 	adev->ddev->mode_config.async_page_flip = true;
2810 
2811 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2812 
2813 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2814 	if (!state)
2815 		return -ENOMEM;
2816 
2817 	state->context = dc_create_state(adev->dm.dc);
2818 	if (!state->context) {
2819 		kfree(state);
2820 		return -ENOMEM;
2821 	}
2822 
2823 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2824 
2825 	drm_atomic_private_obj_init(adev->ddev,
2826 				    &adev->dm.atomic_obj,
2827 				    &state->base,
2828 				    &dm_atomic_state_funcs);
2829 
2830 	r = amdgpu_display_modeset_create_props(adev);
2831 	if (r)
2832 		return r;
2833 
2834 	r = amdgpu_dm_audio_init(adev);
2835 	if (r)
2836 		return r;
2837 
2838 	return 0;
2839 }
2840 
2841 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2842 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2843 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2844 
2845 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2846 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2847 
2848 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2849 {
2850 #if defined(CONFIG_ACPI)
2851 	struct amdgpu_dm_backlight_caps caps;
2852 
2853 	if (dm->backlight_caps.caps_valid)
2854 		return;
2855 
2856 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2857 	if (caps.caps_valid) {
2858 		dm->backlight_caps.caps_valid = true;
2859 		if (caps.aux_support)
2860 			return;
2861 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2862 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2863 	} else {
2864 		dm->backlight_caps.min_input_signal =
2865 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2866 		dm->backlight_caps.max_input_signal =
2867 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2868 	}
2869 #else
2870 	if (dm->backlight_caps.aux_support)
2871 		return;
2872 
2873 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2874 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2875 #endif
2876 }
2877 
2878 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2879 {
2880 	bool rc;
2881 
2882 	if (!link)
2883 		return 1;
2884 
2885 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2886 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2887 
2888 	return rc ? 0 : 1;
2889 }
2890 
2891 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2892 			      const uint32_t user_brightness)
2893 {
2894 	u32 min, max, conversion_pace;
2895 	u32 brightness = user_brightness;
2896 
2897 	if (!caps)
2898 		goto out;
2899 
2900 	if (!caps->aux_support) {
2901 		max = caps->max_input_signal;
2902 		min = caps->min_input_signal;
2903 		/*
2904 		 * The brightness input is in the range 0-255
2905 		 * It needs to be rescaled to be between the
2906 		 * requested min and max input signal
2907 		 * It also needs to be scaled up by 0x101 to
2908 		 * match the DC interface which has a range of
2909 		 * 0 to 0xffff
2910 		 */
2911 		conversion_pace = 0x101;
2912 		brightness =
2913 			user_brightness
2914 			* conversion_pace
2915 			* (max - min)
2916 			/ AMDGPU_MAX_BL_LEVEL
2917 			+ min * conversion_pace;
2918 	} else {
2919 		/* TODO
2920 		 * We are doing a linear interpolation here, which is OK but
2921 		 * does not provide the optimal result. We probably want
2922 		 * something close to the Perceptual Quantizer (PQ) curve.
2923 		 */
2924 		max = caps->aux_max_input_signal;
2925 		min = caps->aux_min_input_signal;
2926 
2927 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2928 			       + user_brightness * max;
2929 		// Multiple the value by 1000 since we use millinits
2930 		brightness *= 1000;
2931 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2932 	}
2933 
2934 out:
2935 	return brightness;
2936 }
2937 
2938 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2939 {
2940 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2941 	struct amdgpu_dm_backlight_caps caps;
2942 	struct dc_link *link = NULL;
2943 	u32 brightness;
2944 	bool rc;
2945 
2946 	amdgpu_dm_update_backlight_caps(dm);
2947 	caps = dm->backlight_caps;
2948 
2949 	link = (struct dc_link *)dm->backlight_link;
2950 
2951 	brightness = convert_brightness(&caps, bd->props.brightness);
2952 	// Change brightness based on AUX property
2953 	if (caps.aux_support)
2954 		return set_backlight_via_aux(link, brightness);
2955 
2956 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2957 
2958 	return rc ? 0 : 1;
2959 }
2960 
2961 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2962 {
2963 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2964 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2965 
2966 	if (ret == DC_ERROR_UNEXPECTED)
2967 		return bd->props.brightness;
2968 	return ret;
2969 }
2970 
2971 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2972 	.options = BL_CORE_SUSPENDRESUME,
2973 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2974 	.update_status	= amdgpu_dm_backlight_update_status,
2975 };
2976 
2977 static void
2978 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2979 {
2980 	char bl_name[16];
2981 	struct backlight_properties props = { 0 };
2982 
2983 	amdgpu_dm_update_backlight_caps(dm);
2984 
2985 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2986 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2987 	props.type = BACKLIGHT_RAW;
2988 
2989 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2990 			dm->adev->ddev->primary->index);
2991 
2992 	dm->backlight_dev = backlight_device_register(bl_name,
2993 			dm->adev->ddev->dev,
2994 			dm,
2995 			&amdgpu_dm_backlight_ops,
2996 			&props);
2997 
2998 	if (IS_ERR(dm->backlight_dev))
2999 		DRM_ERROR("DM: Backlight registration failed!\n");
3000 	else
3001 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3002 }
3003 
3004 #endif
3005 
3006 static int initialize_plane(struct amdgpu_display_manager *dm,
3007 			    struct amdgpu_mode_info *mode_info, int plane_id,
3008 			    enum drm_plane_type plane_type,
3009 			    const struct dc_plane_cap *plane_cap)
3010 {
3011 	struct drm_plane *plane;
3012 	unsigned long possible_crtcs;
3013 	int ret = 0;
3014 
3015 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3016 	if (!plane) {
3017 		DRM_ERROR("KMS: Failed to allocate plane\n");
3018 		return -ENOMEM;
3019 	}
3020 	plane->type = plane_type;
3021 
3022 	/*
3023 	 * HACK: IGT tests expect that the primary plane for a CRTC
3024 	 * can only have one possible CRTC. Only expose support for
3025 	 * any CRTC if they're not going to be used as a primary plane
3026 	 * for a CRTC - like overlay or underlay planes.
3027 	 */
3028 	possible_crtcs = 1 << plane_id;
3029 	if (plane_id >= dm->dc->caps.max_streams)
3030 		possible_crtcs = 0xff;
3031 
3032 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3033 
3034 	if (ret) {
3035 		DRM_ERROR("KMS: Failed to initialize plane\n");
3036 		kfree(plane);
3037 		return ret;
3038 	}
3039 
3040 	if (mode_info)
3041 		mode_info->planes[plane_id] = plane;
3042 
3043 	return ret;
3044 }
3045 
3046 
3047 static void register_backlight_device(struct amdgpu_display_manager *dm,
3048 				      struct dc_link *link)
3049 {
3050 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3051 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3052 
3053 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3054 	    link->type != dc_connection_none) {
3055 		/*
3056 		 * Event if registration failed, we should continue with
3057 		 * DM initialization because not having a backlight control
3058 		 * is better then a black screen.
3059 		 */
3060 		amdgpu_dm_register_backlight_device(dm);
3061 
3062 		if (dm->backlight_dev)
3063 			dm->backlight_link = link;
3064 	}
3065 #endif
3066 }
3067 
3068 
3069 /*
3070  * In this architecture, the association
3071  * connector -> encoder -> crtc
3072  * id not really requried. The crtc and connector will hold the
3073  * display_index as an abstraction to use with DAL component
3074  *
3075  * Returns 0 on success
3076  */
3077 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3078 {
3079 	struct amdgpu_display_manager *dm = &adev->dm;
3080 	int32_t i;
3081 	struct amdgpu_dm_connector *aconnector = NULL;
3082 	struct amdgpu_encoder *aencoder = NULL;
3083 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3084 	uint32_t link_cnt;
3085 	int32_t primary_planes;
3086 	enum dc_connection_type new_connection_type = dc_connection_none;
3087 	const struct dc_plane_cap *plane;
3088 
3089 	link_cnt = dm->dc->caps.max_links;
3090 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3091 		DRM_ERROR("DM: Failed to initialize mode config\n");
3092 		return -EINVAL;
3093 	}
3094 
3095 	/* There is one primary plane per CRTC */
3096 	primary_planes = dm->dc->caps.max_streams;
3097 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3098 
3099 	/*
3100 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3101 	 * Order is reversed to match iteration order in atomic check.
3102 	 */
3103 	for (i = (primary_planes - 1); i >= 0; i--) {
3104 		plane = &dm->dc->caps.planes[i];
3105 
3106 		if (initialize_plane(dm, mode_info, i,
3107 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3108 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3109 			goto fail;
3110 		}
3111 	}
3112 
3113 	/*
3114 	 * Initialize overlay planes, index starting after primary planes.
3115 	 * These planes have a higher DRM index than the primary planes since
3116 	 * they should be considered as having a higher z-order.
3117 	 * Order is reversed to match iteration order in atomic check.
3118 	 *
3119 	 * Only support DCN for now, and only expose one so we don't encourage
3120 	 * userspace to use up all the pipes.
3121 	 */
3122 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3123 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3124 
3125 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3126 			continue;
3127 
3128 		if (!plane->blends_with_above || !plane->blends_with_below)
3129 			continue;
3130 
3131 		if (!plane->pixel_format_support.argb8888)
3132 			continue;
3133 
3134 		if (initialize_plane(dm, NULL, primary_planes + i,
3135 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3136 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3137 			goto fail;
3138 		}
3139 
3140 		/* Only create one overlay plane. */
3141 		break;
3142 	}
3143 
3144 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3145 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3146 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3147 			goto fail;
3148 		}
3149 
3150 	dm->display_indexes_num = dm->dc->caps.max_streams;
3151 
3152 	/* loops over all connectors on the board */
3153 	for (i = 0; i < link_cnt; i++) {
3154 		struct dc_link *link = NULL;
3155 
3156 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3157 			DRM_ERROR(
3158 				"KMS: Cannot support more than %d display indexes\n",
3159 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3160 			continue;
3161 		}
3162 
3163 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3164 		if (!aconnector)
3165 			goto fail;
3166 
3167 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3168 		if (!aencoder)
3169 			goto fail;
3170 
3171 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3172 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3173 			goto fail;
3174 		}
3175 
3176 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3177 			DRM_ERROR("KMS: Failed to initialize connector\n");
3178 			goto fail;
3179 		}
3180 
3181 		link = dc_get_link_at_index(dm->dc, i);
3182 
3183 		if (!dc_link_detect_sink(link, &new_connection_type))
3184 			DRM_ERROR("KMS: Failed to detect connector\n");
3185 
3186 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3187 			emulated_link_detect(link);
3188 			amdgpu_dm_update_connector_after_detect(aconnector);
3189 
3190 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3191 			amdgpu_dm_update_connector_after_detect(aconnector);
3192 			register_backlight_device(dm, link);
3193 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3194 				amdgpu_dm_set_psr_caps(link);
3195 		}
3196 
3197 
3198 	}
3199 
3200 	/* Software is initialized. Now we can register interrupt handlers. */
3201 	switch (adev->asic_type) {
3202 	case CHIP_BONAIRE:
3203 	case CHIP_HAWAII:
3204 	case CHIP_KAVERI:
3205 	case CHIP_KABINI:
3206 	case CHIP_MULLINS:
3207 	case CHIP_TONGA:
3208 	case CHIP_FIJI:
3209 	case CHIP_CARRIZO:
3210 	case CHIP_STONEY:
3211 	case CHIP_POLARIS11:
3212 	case CHIP_POLARIS10:
3213 	case CHIP_POLARIS12:
3214 	case CHIP_VEGAM:
3215 	case CHIP_VEGA10:
3216 	case CHIP_VEGA12:
3217 	case CHIP_VEGA20:
3218 		if (dce110_register_irq_handlers(dm->adev)) {
3219 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3220 			goto fail;
3221 		}
3222 		break;
3223 #if defined(CONFIG_DRM_AMD_DC_DCN)
3224 	case CHIP_RAVEN:
3225 	case CHIP_NAVI12:
3226 	case CHIP_NAVI10:
3227 	case CHIP_NAVI14:
3228 	case CHIP_RENOIR:
3229 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3230 	case CHIP_SIENNA_CICHLID:
3231 	case CHIP_NAVY_FLOUNDER:
3232 #endif
3233 		if (dcn10_register_irq_handlers(dm->adev)) {
3234 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3235 			goto fail;
3236 		}
3237 		break;
3238 #endif
3239 	default:
3240 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3241 		goto fail;
3242 	}
3243 
3244 	/* No userspace support. */
3245 	dm->dc->debug.disable_tri_buf = true;
3246 
3247 	return 0;
3248 fail:
3249 	kfree(aencoder);
3250 	kfree(aconnector);
3251 
3252 	return -EINVAL;
3253 }
3254 
3255 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3256 {
3257 	drm_mode_config_cleanup(dm->ddev);
3258 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3259 	return;
3260 }
3261 
3262 /******************************************************************************
3263  * amdgpu_display_funcs functions
3264  *****************************************************************************/
3265 
3266 /*
3267  * dm_bandwidth_update - program display watermarks
3268  *
3269  * @adev: amdgpu_device pointer
3270  *
3271  * Calculate and program the display watermarks and line buffer allocation.
3272  */
3273 static void dm_bandwidth_update(struct amdgpu_device *adev)
3274 {
3275 	/* TODO: implement later */
3276 }
3277 
3278 static const struct amdgpu_display_funcs dm_display_funcs = {
3279 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3280 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3281 	.backlight_set_level = NULL, /* never called for DC */
3282 	.backlight_get_level = NULL, /* never called for DC */
3283 	.hpd_sense = NULL,/* called unconditionally */
3284 	.hpd_set_polarity = NULL, /* called unconditionally */
3285 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3286 	.page_flip_get_scanoutpos =
3287 		dm_crtc_get_scanoutpos,/* called unconditionally */
3288 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3289 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3290 };
3291 
3292 #if defined(CONFIG_DEBUG_KERNEL_DC)
3293 
3294 static ssize_t s3_debug_store(struct device *device,
3295 			      struct device_attribute *attr,
3296 			      const char *buf,
3297 			      size_t count)
3298 {
3299 	int ret;
3300 	int s3_state;
3301 	struct drm_device *drm_dev = dev_get_drvdata(device);
3302 	struct amdgpu_device *adev = drm_dev->dev_private;
3303 
3304 	ret = kstrtoint(buf, 0, &s3_state);
3305 
3306 	if (ret == 0) {
3307 		if (s3_state) {
3308 			dm_resume(adev);
3309 			drm_kms_helper_hotplug_event(adev->ddev);
3310 		} else
3311 			dm_suspend(adev);
3312 	}
3313 
3314 	return ret == 0 ? count : 0;
3315 }
3316 
3317 DEVICE_ATTR_WO(s3_debug);
3318 
3319 #endif
3320 
3321 static int dm_early_init(void *handle)
3322 {
3323 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3324 
3325 	switch (adev->asic_type) {
3326 	case CHIP_BONAIRE:
3327 	case CHIP_HAWAII:
3328 		adev->mode_info.num_crtc = 6;
3329 		adev->mode_info.num_hpd = 6;
3330 		adev->mode_info.num_dig = 6;
3331 		break;
3332 	case CHIP_KAVERI:
3333 		adev->mode_info.num_crtc = 4;
3334 		adev->mode_info.num_hpd = 6;
3335 		adev->mode_info.num_dig = 7;
3336 		break;
3337 	case CHIP_KABINI:
3338 	case CHIP_MULLINS:
3339 		adev->mode_info.num_crtc = 2;
3340 		adev->mode_info.num_hpd = 6;
3341 		adev->mode_info.num_dig = 6;
3342 		break;
3343 	case CHIP_FIJI:
3344 	case CHIP_TONGA:
3345 		adev->mode_info.num_crtc = 6;
3346 		adev->mode_info.num_hpd = 6;
3347 		adev->mode_info.num_dig = 7;
3348 		break;
3349 	case CHIP_CARRIZO:
3350 		adev->mode_info.num_crtc = 3;
3351 		adev->mode_info.num_hpd = 6;
3352 		adev->mode_info.num_dig = 9;
3353 		break;
3354 	case CHIP_STONEY:
3355 		adev->mode_info.num_crtc = 2;
3356 		adev->mode_info.num_hpd = 6;
3357 		adev->mode_info.num_dig = 9;
3358 		break;
3359 	case CHIP_POLARIS11:
3360 	case CHIP_POLARIS12:
3361 		adev->mode_info.num_crtc = 5;
3362 		adev->mode_info.num_hpd = 5;
3363 		adev->mode_info.num_dig = 5;
3364 		break;
3365 	case CHIP_POLARIS10:
3366 	case CHIP_VEGAM:
3367 		adev->mode_info.num_crtc = 6;
3368 		adev->mode_info.num_hpd = 6;
3369 		adev->mode_info.num_dig = 6;
3370 		break;
3371 	case CHIP_VEGA10:
3372 	case CHIP_VEGA12:
3373 	case CHIP_VEGA20:
3374 		adev->mode_info.num_crtc = 6;
3375 		adev->mode_info.num_hpd = 6;
3376 		adev->mode_info.num_dig = 6;
3377 		break;
3378 #if defined(CONFIG_DRM_AMD_DC_DCN)
3379 	case CHIP_RAVEN:
3380 		adev->mode_info.num_crtc = 4;
3381 		adev->mode_info.num_hpd = 4;
3382 		adev->mode_info.num_dig = 4;
3383 		break;
3384 #endif
3385 	case CHIP_NAVI10:
3386 	case CHIP_NAVI12:
3387 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3388 	case CHIP_SIENNA_CICHLID:
3389 	case CHIP_NAVY_FLOUNDER:
3390 #endif
3391 		adev->mode_info.num_crtc = 6;
3392 		adev->mode_info.num_hpd = 6;
3393 		adev->mode_info.num_dig = 6;
3394 		break;
3395 	case CHIP_NAVI14:
3396 		adev->mode_info.num_crtc = 5;
3397 		adev->mode_info.num_hpd = 5;
3398 		adev->mode_info.num_dig = 5;
3399 		break;
3400 	case CHIP_RENOIR:
3401 		adev->mode_info.num_crtc = 4;
3402 		adev->mode_info.num_hpd = 4;
3403 		adev->mode_info.num_dig = 4;
3404 		break;
3405 	default:
3406 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3407 		return -EINVAL;
3408 	}
3409 
3410 	amdgpu_dm_set_irq_funcs(adev);
3411 
3412 	if (adev->mode_info.funcs == NULL)
3413 		adev->mode_info.funcs = &dm_display_funcs;
3414 
3415 	/*
3416 	 * Note: Do NOT change adev->audio_endpt_rreg and
3417 	 * adev->audio_endpt_wreg because they are initialised in
3418 	 * amdgpu_device_init()
3419 	 */
3420 #if defined(CONFIG_DEBUG_KERNEL_DC)
3421 	device_create_file(
3422 		adev->ddev->dev,
3423 		&dev_attr_s3_debug);
3424 #endif
3425 
3426 	return 0;
3427 }
3428 
3429 static bool modeset_required(struct drm_crtc_state *crtc_state,
3430 			     struct dc_stream_state *new_stream,
3431 			     struct dc_stream_state *old_stream)
3432 {
3433 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3434 		return false;
3435 
3436 	if (!crtc_state->enable)
3437 		return false;
3438 
3439 	return crtc_state->active;
3440 }
3441 
3442 static bool modereset_required(struct drm_crtc_state *crtc_state)
3443 {
3444 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3445 		return false;
3446 
3447 	return !crtc_state->enable || !crtc_state->active;
3448 }
3449 
3450 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3451 {
3452 	drm_encoder_cleanup(encoder);
3453 	kfree(encoder);
3454 }
3455 
3456 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3457 	.destroy = amdgpu_dm_encoder_destroy,
3458 };
3459 
3460 
3461 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3462 				struct dc_scaling_info *scaling_info)
3463 {
3464 	int scale_w, scale_h;
3465 
3466 	memset(scaling_info, 0, sizeof(*scaling_info));
3467 
3468 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3469 	scaling_info->src_rect.x = state->src_x >> 16;
3470 	scaling_info->src_rect.y = state->src_y >> 16;
3471 
3472 	scaling_info->src_rect.width = state->src_w >> 16;
3473 	if (scaling_info->src_rect.width == 0)
3474 		return -EINVAL;
3475 
3476 	scaling_info->src_rect.height = state->src_h >> 16;
3477 	if (scaling_info->src_rect.height == 0)
3478 		return -EINVAL;
3479 
3480 	scaling_info->dst_rect.x = state->crtc_x;
3481 	scaling_info->dst_rect.y = state->crtc_y;
3482 
3483 	if (state->crtc_w == 0)
3484 		return -EINVAL;
3485 
3486 	scaling_info->dst_rect.width = state->crtc_w;
3487 
3488 	if (state->crtc_h == 0)
3489 		return -EINVAL;
3490 
3491 	scaling_info->dst_rect.height = state->crtc_h;
3492 
3493 	/* DRM doesn't specify clipping on destination output. */
3494 	scaling_info->clip_rect = scaling_info->dst_rect;
3495 
3496 	/* TODO: Validate scaling per-format with DC plane caps */
3497 	scale_w = scaling_info->dst_rect.width * 1000 /
3498 		  scaling_info->src_rect.width;
3499 
3500 	if (scale_w < 250 || scale_w > 16000)
3501 		return -EINVAL;
3502 
3503 	scale_h = scaling_info->dst_rect.height * 1000 /
3504 		  scaling_info->src_rect.height;
3505 
3506 	if (scale_h < 250 || scale_h > 16000)
3507 		return -EINVAL;
3508 
3509 	/*
3510 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3511 	 * assume reasonable defaults based on the format.
3512 	 */
3513 
3514 	return 0;
3515 }
3516 
3517 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3518 		       uint64_t *tiling_flags, bool *tmz_surface)
3519 {
3520 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3521 	int r = amdgpu_bo_reserve(rbo, false);
3522 
3523 	if (unlikely(r)) {
3524 		/* Don't show error message when returning -ERESTARTSYS */
3525 		if (r != -ERESTARTSYS)
3526 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3527 		return r;
3528 	}
3529 
3530 	if (tiling_flags)
3531 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3532 
3533 	if (tmz_surface)
3534 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3535 
3536 	amdgpu_bo_unreserve(rbo);
3537 
3538 	return r;
3539 }
3540 
3541 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3542 {
3543 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3544 
3545 	return offset ? (address + offset * 256) : 0;
3546 }
3547 
3548 static int
3549 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3550 			  const struct amdgpu_framebuffer *afb,
3551 			  const enum surface_pixel_format format,
3552 			  const enum dc_rotation_angle rotation,
3553 			  const struct plane_size *plane_size,
3554 			  const union dc_tiling_info *tiling_info,
3555 			  const uint64_t info,
3556 			  struct dc_plane_dcc_param *dcc,
3557 			  struct dc_plane_address *address,
3558 			  bool force_disable_dcc)
3559 {
3560 	struct dc *dc = adev->dm.dc;
3561 	struct dc_dcc_surface_param input;
3562 	struct dc_surface_dcc_cap output;
3563 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3564 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3565 	uint64_t dcc_address;
3566 
3567 	memset(&input, 0, sizeof(input));
3568 	memset(&output, 0, sizeof(output));
3569 
3570 	if (force_disable_dcc)
3571 		return 0;
3572 
3573 	if (!offset)
3574 		return 0;
3575 
3576 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3577 		return 0;
3578 
3579 	if (!dc->cap_funcs.get_dcc_compression_cap)
3580 		return -EINVAL;
3581 
3582 	input.format = format;
3583 	input.surface_size.width = plane_size->surface_size.width;
3584 	input.surface_size.height = plane_size->surface_size.height;
3585 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3586 
3587 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3588 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3589 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3590 		input.scan = SCAN_DIRECTION_VERTICAL;
3591 
3592 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3593 		return -EINVAL;
3594 
3595 	if (!output.capable)
3596 		return -EINVAL;
3597 
3598 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3599 		return -EINVAL;
3600 
3601 	dcc->enable = 1;
3602 	dcc->meta_pitch =
3603 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3604 	dcc->independent_64b_blks = i64b;
3605 
3606 	dcc_address = get_dcc_address(afb->address, info);
3607 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3608 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3609 
3610 	return 0;
3611 }
3612 
3613 static int
3614 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3615 			     const struct amdgpu_framebuffer *afb,
3616 			     const enum surface_pixel_format format,
3617 			     const enum dc_rotation_angle rotation,
3618 			     const uint64_t tiling_flags,
3619 			     union dc_tiling_info *tiling_info,
3620 			     struct plane_size *plane_size,
3621 			     struct dc_plane_dcc_param *dcc,
3622 			     struct dc_plane_address *address,
3623 			     bool tmz_surface,
3624 			     bool force_disable_dcc)
3625 {
3626 	const struct drm_framebuffer *fb = &afb->base;
3627 	int ret;
3628 
3629 	memset(tiling_info, 0, sizeof(*tiling_info));
3630 	memset(plane_size, 0, sizeof(*plane_size));
3631 	memset(dcc, 0, sizeof(*dcc));
3632 	memset(address, 0, sizeof(*address));
3633 
3634 	address->tmz_surface = tmz_surface;
3635 
3636 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3637 		plane_size->surface_size.x = 0;
3638 		plane_size->surface_size.y = 0;
3639 		plane_size->surface_size.width = fb->width;
3640 		plane_size->surface_size.height = fb->height;
3641 		plane_size->surface_pitch =
3642 			fb->pitches[0] / fb->format->cpp[0];
3643 
3644 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3645 		address->grph.addr.low_part = lower_32_bits(afb->address);
3646 		address->grph.addr.high_part = upper_32_bits(afb->address);
3647 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3648 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3649 
3650 		plane_size->surface_size.x = 0;
3651 		plane_size->surface_size.y = 0;
3652 		plane_size->surface_size.width = fb->width;
3653 		plane_size->surface_size.height = fb->height;
3654 		plane_size->surface_pitch =
3655 			fb->pitches[0] / fb->format->cpp[0];
3656 
3657 		plane_size->chroma_size.x = 0;
3658 		plane_size->chroma_size.y = 0;
3659 		/* TODO: set these based on surface format */
3660 		plane_size->chroma_size.width = fb->width / 2;
3661 		plane_size->chroma_size.height = fb->height / 2;
3662 
3663 		plane_size->chroma_pitch =
3664 			fb->pitches[1] / fb->format->cpp[1];
3665 
3666 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3667 		address->video_progressive.luma_addr.low_part =
3668 			lower_32_bits(afb->address);
3669 		address->video_progressive.luma_addr.high_part =
3670 			upper_32_bits(afb->address);
3671 		address->video_progressive.chroma_addr.low_part =
3672 			lower_32_bits(chroma_addr);
3673 		address->video_progressive.chroma_addr.high_part =
3674 			upper_32_bits(chroma_addr);
3675 	}
3676 
3677 	/* Fill GFX8 params */
3678 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3679 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3680 
3681 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3682 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3683 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3684 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3685 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3686 
3687 		/* XXX fix me for VI */
3688 		tiling_info->gfx8.num_banks = num_banks;
3689 		tiling_info->gfx8.array_mode =
3690 				DC_ARRAY_2D_TILED_THIN1;
3691 		tiling_info->gfx8.tile_split = tile_split;
3692 		tiling_info->gfx8.bank_width = bankw;
3693 		tiling_info->gfx8.bank_height = bankh;
3694 		tiling_info->gfx8.tile_aspect = mtaspect;
3695 		tiling_info->gfx8.tile_mode =
3696 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3697 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3698 			== DC_ARRAY_1D_TILED_THIN1) {
3699 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3700 	}
3701 
3702 	tiling_info->gfx8.pipe_config =
3703 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3704 
3705 	if (adev->asic_type == CHIP_VEGA10 ||
3706 	    adev->asic_type == CHIP_VEGA12 ||
3707 	    adev->asic_type == CHIP_VEGA20 ||
3708 	    adev->asic_type == CHIP_NAVI10 ||
3709 	    adev->asic_type == CHIP_NAVI14 ||
3710 	    adev->asic_type == CHIP_NAVI12 ||
3711 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3712 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3713 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3714 #endif
3715 	    adev->asic_type == CHIP_RENOIR ||
3716 	    adev->asic_type == CHIP_RAVEN) {
3717 		/* Fill GFX9 params */
3718 		tiling_info->gfx9.num_pipes =
3719 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3720 		tiling_info->gfx9.num_banks =
3721 			adev->gfx.config.gb_addr_config_fields.num_banks;
3722 		tiling_info->gfx9.pipe_interleave =
3723 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3724 		tiling_info->gfx9.num_shader_engines =
3725 			adev->gfx.config.gb_addr_config_fields.num_se;
3726 		tiling_info->gfx9.max_compressed_frags =
3727 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3728 		tiling_info->gfx9.num_rb_per_se =
3729 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3730 		tiling_info->gfx9.swizzle =
3731 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3732 		tiling_info->gfx9.shaderEnable = 1;
3733 
3734 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3735 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3736 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3737 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3738 #endif
3739 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3740 						plane_size, tiling_info,
3741 						tiling_flags, dcc, address,
3742 						force_disable_dcc);
3743 		if (ret)
3744 			return ret;
3745 	}
3746 
3747 	return 0;
3748 }
3749 
3750 static void
3751 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3752 			       bool *per_pixel_alpha, bool *global_alpha,
3753 			       int *global_alpha_value)
3754 {
3755 	*per_pixel_alpha = false;
3756 	*global_alpha = false;
3757 	*global_alpha_value = 0xff;
3758 
3759 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3760 		return;
3761 
3762 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3763 		static const uint32_t alpha_formats[] = {
3764 			DRM_FORMAT_ARGB8888,
3765 			DRM_FORMAT_RGBA8888,
3766 			DRM_FORMAT_ABGR8888,
3767 		};
3768 		uint32_t format = plane_state->fb->format->format;
3769 		unsigned int i;
3770 
3771 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3772 			if (format == alpha_formats[i]) {
3773 				*per_pixel_alpha = true;
3774 				break;
3775 			}
3776 		}
3777 	}
3778 
3779 	if (plane_state->alpha < 0xffff) {
3780 		*global_alpha = true;
3781 		*global_alpha_value = plane_state->alpha >> 8;
3782 	}
3783 }
3784 
3785 static int
3786 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3787 			    const enum surface_pixel_format format,
3788 			    enum dc_color_space *color_space)
3789 {
3790 	bool full_range;
3791 
3792 	*color_space = COLOR_SPACE_SRGB;
3793 
3794 	/* DRM color properties only affect non-RGB formats. */
3795 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3796 		return 0;
3797 
3798 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3799 
3800 	switch (plane_state->color_encoding) {
3801 	case DRM_COLOR_YCBCR_BT601:
3802 		if (full_range)
3803 			*color_space = COLOR_SPACE_YCBCR601;
3804 		else
3805 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3806 		break;
3807 
3808 	case DRM_COLOR_YCBCR_BT709:
3809 		if (full_range)
3810 			*color_space = COLOR_SPACE_YCBCR709;
3811 		else
3812 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3813 		break;
3814 
3815 	case DRM_COLOR_YCBCR_BT2020:
3816 		if (full_range)
3817 			*color_space = COLOR_SPACE_2020_YCBCR;
3818 		else
3819 			return -EINVAL;
3820 		break;
3821 
3822 	default:
3823 		return -EINVAL;
3824 	}
3825 
3826 	return 0;
3827 }
3828 
3829 static int
3830 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3831 			    const struct drm_plane_state *plane_state,
3832 			    const uint64_t tiling_flags,
3833 			    struct dc_plane_info *plane_info,
3834 			    struct dc_plane_address *address,
3835 			    bool tmz_surface,
3836 			    bool force_disable_dcc)
3837 {
3838 	const struct drm_framebuffer *fb = plane_state->fb;
3839 	const struct amdgpu_framebuffer *afb =
3840 		to_amdgpu_framebuffer(plane_state->fb);
3841 	struct drm_format_name_buf format_name;
3842 	int ret;
3843 
3844 	memset(plane_info, 0, sizeof(*plane_info));
3845 
3846 	switch (fb->format->format) {
3847 	case DRM_FORMAT_C8:
3848 		plane_info->format =
3849 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3850 		break;
3851 	case DRM_FORMAT_RGB565:
3852 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3853 		break;
3854 	case DRM_FORMAT_XRGB8888:
3855 	case DRM_FORMAT_ARGB8888:
3856 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3857 		break;
3858 	case DRM_FORMAT_XRGB2101010:
3859 	case DRM_FORMAT_ARGB2101010:
3860 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3861 		break;
3862 	case DRM_FORMAT_XBGR2101010:
3863 	case DRM_FORMAT_ABGR2101010:
3864 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3865 		break;
3866 	case DRM_FORMAT_XBGR8888:
3867 	case DRM_FORMAT_ABGR8888:
3868 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3869 		break;
3870 	case DRM_FORMAT_NV21:
3871 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3872 		break;
3873 	case DRM_FORMAT_NV12:
3874 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3875 		break;
3876 	case DRM_FORMAT_P010:
3877 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3878 		break;
3879 	case DRM_FORMAT_XRGB16161616F:
3880 	case DRM_FORMAT_ARGB16161616F:
3881 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3882 		break;
3883 	case DRM_FORMAT_XBGR16161616F:
3884 	case DRM_FORMAT_ABGR16161616F:
3885 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3886 		break;
3887 	default:
3888 		DRM_ERROR(
3889 			"Unsupported screen format %s\n",
3890 			drm_get_format_name(fb->format->format, &format_name));
3891 		return -EINVAL;
3892 	}
3893 
3894 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3895 	case DRM_MODE_ROTATE_0:
3896 		plane_info->rotation = ROTATION_ANGLE_0;
3897 		break;
3898 	case DRM_MODE_ROTATE_90:
3899 		plane_info->rotation = ROTATION_ANGLE_90;
3900 		break;
3901 	case DRM_MODE_ROTATE_180:
3902 		plane_info->rotation = ROTATION_ANGLE_180;
3903 		break;
3904 	case DRM_MODE_ROTATE_270:
3905 		plane_info->rotation = ROTATION_ANGLE_270;
3906 		break;
3907 	default:
3908 		plane_info->rotation = ROTATION_ANGLE_0;
3909 		break;
3910 	}
3911 
3912 	plane_info->visible = true;
3913 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3914 
3915 	plane_info->layer_index = 0;
3916 
3917 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3918 					  &plane_info->color_space);
3919 	if (ret)
3920 		return ret;
3921 
3922 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3923 					   plane_info->rotation, tiling_flags,
3924 					   &plane_info->tiling_info,
3925 					   &plane_info->plane_size,
3926 					   &plane_info->dcc, address, tmz_surface,
3927 					   force_disable_dcc);
3928 	if (ret)
3929 		return ret;
3930 
3931 	fill_blending_from_plane_state(
3932 		plane_state, &plane_info->per_pixel_alpha,
3933 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3934 
3935 	return 0;
3936 }
3937 
3938 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3939 				    struct dc_plane_state *dc_plane_state,
3940 				    struct drm_plane_state *plane_state,
3941 				    struct drm_crtc_state *crtc_state)
3942 {
3943 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3944 	const struct amdgpu_framebuffer *amdgpu_fb =
3945 		to_amdgpu_framebuffer(plane_state->fb);
3946 	struct dc_scaling_info scaling_info;
3947 	struct dc_plane_info plane_info;
3948 	uint64_t tiling_flags;
3949 	int ret;
3950 	bool tmz_surface = false;
3951 	bool force_disable_dcc = false;
3952 
3953 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3954 	if (ret)
3955 		return ret;
3956 
3957 	dc_plane_state->src_rect = scaling_info.src_rect;
3958 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3959 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3960 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3961 
3962 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3963 	if (ret)
3964 		return ret;
3965 
3966 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3967 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3968 					  &plane_info,
3969 					  &dc_plane_state->address,
3970 					  tmz_surface,
3971 					  force_disable_dcc);
3972 	if (ret)
3973 		return ret;
3974 
3975 	dc_plane_state->format = plane_info.format;
3976 	dc_plane_state->color_space = plane_info.color_space;
3977 	dc_plane_state->format = plane_info.format;
3978 	dc_plane_state->plane_size = plane_info.plane_size;
3979 	dc_plane_state->rotation = plane_info.rotation;
3980 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3981 	dc_plane_state->stereo_format = plane_info.stereo_format;
3982 	dc_plane_state->tiling_info = plane_info.tiling_info;
3983 	dc_plane_state->visible = plane_info.visible;
3984 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3985 	dc_plane_state->global_alpha = plane_info.global_alpha;
3986 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3987 	dc_plane_state->dcc = plane_info.dcc;
3988 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3989 
3990 	/*
3991 	 * Always set input transfer function, since plane state is refreshed
3992 	 * every time.
3993 	 */
3994 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3995 	if (ret)
3996 		return ret;
3997 
3998 	return 0;
3999 }
4000 
4001 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4002 					   const struct dm_connector_state *dm_state,
4003 					   struct dc_stream_state *stream)
4004 {
4005 	enum amdgpu_rmx_type rmx_type;
4006 
4007 	struct rect src = { 0 }; /* viewport in composition space*/
4008 	struct rect dst = { 0 }; /* stream addressable area */
4009 
4010 	/* no mode. nothing to be done */
4011 	if (!mode)
4012 		return;
4013 
4014 	/* Full screen scaling by default */
4015 	src.width = mode->hdisplay;
4016 	src.height = mode->vdisplay;
4017 	dst.width = stream->timing.h_addressable;
4018 	dst.height = stream->timing.v_addressable;
4019 
4020 	if (dm_state) {
4021 		rmx_type = dm_state->scaling;
4022 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4023 			if (src.width * dst.height <
4024 					src.height * dst.width) {
4025 				/* height needs less upscaling/more downscaling */
4026 				dst.width = src.width *
4027 						dst.height / src.height;
4028 			} else {
4029 				/* width needs less upscaling/more downscaling */
4030 				dst.height = src.height *
4031 						dst.width / src.width;
4032 			}
4033 		} else if (rmx_type == RMX_CENTER) {
4034 			dst = src;
4035 		}
4036 
4037 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4038 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4039 
4040 		if (dm_state->underscan_enable) {
4041 			dst.x += dm_state->underscan_hborder / 2;
4042 			dst.y += dm_state->underscan_vborder / 2;
4043 			dst.width -= dm_state->underscan_hborder;
4044 			dst.height -= dm_state->underscan_vborder;
4045 		}
4046 	}
4047 
4048 	stream->src = src;
4049 	stream->dst = dst;
4050 
4051 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4052 			dst.x, dst.y, dst.width, dst.height);
4053 
4054 }
4055 
4056 static enum dc_color_depth
4057 convert_color_depth_from_display_info(const struct drm_connector *connector,
4058 				      bool is_y420, int requested_bpc)
4059 {
4060 	uint8_t bpc;
4061 
4062 	if (is_y420) {
4063 		bpc = 8;
4064 
4065 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4066 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4067 			bpc = 16;
4068 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4069 			bpc = 12;
4070 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4071 			bpc = 10;
4072 	} else {
4073 		bpc = (uint8_t)connector->display_info.bpc;
4074 		/* Assume 8 bpc by default if no bpc is specified. */
4075 		bpc = bpc ? bpc : 8;
4076 	}
4077 
4078 	if (requested_bpc > 0) {
4079 		/*
4080 		 * Cap display bpc based on the user requested value.
4081 		 *
4082 		 * The value for state->max_bpc may not correctly updated
4083 		 * depending on when the connector gets added to the state
4084 		 * or if this was called outside of atomic check, so it
4085 		 * can't be used directly.
4086 		 */
4087 		bpc = min_t(u8, bpc, requested_bpc);
4088 
4089 		/* Round down to the nearest even number. */
4090 		bpc = bpc - (bpc & 1);
4091 	}
4092 
4093 	switch (bpc) {
4094 	case 0:
4095 		/*
4096 		 * Temporary Work around, DRM doesn't parse color depth for
4097 		 * EDID revision before 1.4
4098 		 * TODO: Fix edid parsing
4099 		 */
4100 		return COLOR_DEPTH_888;
4101 	case 6:
4102 		return COLOR_DEPTH_666;
4103 	case 8:
4104 		return COLOR_DEPTH_888;
4105 	case 10:
4106 		return COLOR_DEPTH_101010;
4107 	case 12:
4108 		return COLOR_DEPTH_121212;
4109 	case 14:
4110 		return COLOR_DEPTH_141414;
4111 	case 16:
4112 		return COLOR_DEPTH_161616;
4113 	default:
4114 		return COLOR_DEPTH_UNDEFINED;
4115 	}
4116 }
4117 
4118 static enum dc_aspect_ratio
4119 get_aspect_ratio(const struct drm_display_mode *mode_in)
4120 {
4121 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4122 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4123 }
4124 
4125 static enum dc_color_space
4126 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4127 {
4128 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4129 
4130 	switch (dc_crtc_timing->pixel_encoding)	{
4131 	case PIXEL_ENCODING_YCBCR422:
4132 	case PIXEL_ENCODING_YCBCR444:
4133 	case PIXEL_ENCODING_YCBCR420:
4134 	{
4135 		/*
4136 		 * 27030khz is the separation point between HDTV and SDTV
4137 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4138 		 * respectively
4139 		 */
4140 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4141 			if (dc_crtc_timing->flags.Y_ONLY)
4142 				color_space =
4143 					COLOR_SPACE_YCBCR709_LIMITED;
4144 			else
4145 				color_space = COLOR_SPACE_YCBCR709;
4146 		} else {
4147 			if (dc_crtc_timing->flags.Y_ONLY)
4148 				color_space =
4149 					COLOR_SPACE_YCBCR601_LIMITED;
4150 			else
4151 				color_space = COLOR_SPACE_YCBCR601;
4152 		}
4153 
4154 	}
4155 	break;
4156 	case PIXEL_ENCODING_RGB:
4157 		color_space = COLOR_SPACE_SRGB;
4158 		break;
4159 
4160 	default:
4161 		WARN_ON(1);
4162 		break;
4163 	}
4164 
4165 	return color_space;
4166 }
4167 
4168 static bool adjust_colour_depth_from_display_info(
4169 	struct dc_crtc_timing *timing_out,
4170 	const struct drm_display_info *info)
4171 {
4172 	enum dc_color_depth depth = timing_out->display_color_depth;
4173 	int normalized_clk;
4174 	do {
4175 		normalized_clk = timing_out->pix_clk_100hz / 10;
4176 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4177 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4178 			normalized_clk /= 2;
4179 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4180 		switch (depth) {
4181 		case COLOR_DEPTH_888:
4182 			break;
4183 		case COLOR_DEPTH_101010:
4184 			normalized_clk = (normalized_clk * 30) / 24;
4185 			break;
4186 		case COLOR_DEPTH_121212:
4187 			normalized_clk = (normalized_clk * 36) / 24;
4188 			break;
4189 		case COLOR_DEPTH_161616:
4190 			normalized_clk = (normalized_clk * 48) / 24;
4191 			break;
4192 		default:
4193 			/* The above depths are the only ones valid for HDMI. */
4194 			return false;
4195 		}
4196 		if (normalized_clk <= info->max_tmds_clock) {
4197 			timing_out->display_color_depth = depth;
4198 			return true;
4199 		}
4200 	} while (--depth > COLOR_DEPTH_666);
4201 	return false;
4202 }
4203 
4204 static void fill_stream_properties_from_drm_display_mode(
4205 	struct dc_stream_state *stream,
4206 	const struct drm_display_mode *mode_in,
4207 	const struct drm_connector *connector,
4208 	const struct drm_connector_state *connector_state,
4209 	const struct dc_stream_state *old_stream,
4210 	int requested_bpc)
4211 {
4212 	struct dc_crtc_timing *timing_out = &stream->timing;
4213 	const struct drm_display_info *info = &connector->display_info;
4214 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4215 	struct hdmi_vendor_infoframe hv_frame;
4216 	struct hdmi_avi_infoframe avi_frame;
4217 
4218 	memset(&hv_frame, 0, sizeof(hv_frame));
4219 	memset(&avi_frame, 0, sizeof(avi_frame));
4220 
4221 	timing_out->h_border_left = 0;
4222 	timing_out->h_border_right = 0;
4223 	timing_out->v_border_top = 0;
4224 	timing_out->v_border_bottom = 0;
4225 	/* TODO: un-hardcode */
4226 	if (drm_mode_is_420_only(info, mode_in)
4227 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4228 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4229 	else if (drm_mode_is_420_also(info, mode_in)
4230 			&& aconnector->force_yuv420_output)
4231 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4232 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4233 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4234 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4235 	else
4236 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4237 
4238 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4239 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4240 		connector,
4241 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4242 		requested_bpc);
4243 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4244 	timing_out->hdmi_vic = 0;
4245 
4246 	if(old_stream) {
4247 		timing_out->vic = old_stream->timing.vic;
4248 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4249 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4250 	} else {
4251 		timing_out->vic = drm_match_cea_mode(mode_in);
4252 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4253 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4254 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4255 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4256 	}
4257 
4258 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4259 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4260 		timing_out->vic = avi_frame.video_code;
4261 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4262 		timing_out->hdmi_vic = hv_frame.vic;
4263 	}
4264 
4265 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4266 	timing_out->h_total = mode_in->crtc_htotal;
4267 	timing_out->h_sync_width =
4268 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4269 	timing_out->h_front_porch =
4270 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4271 	timing_out->v_total = mode_in->crtc_vtotal;
4272 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4273 	timing_out->v_front_porch =
4274 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4275 	timing_out->v_sync_width =
4276 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4277 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4278 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4279 
4280 	stream->output_color_space = get_output_color_space(timing_out);
4281 
4282 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4283 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4284 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4285 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4286 		    drm_mode_is_420_also(info, mode_in) &&
4287 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4288 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4289 			adjust_colour_depth_from_display_info(timing_out, info);
4290 		}
4291 	}
4292 }
4293 
4294 static void fill_audio_info(struct audio_info *audio_info,
4295 			    const struct drm_connector *drm_connector,
4296 			    const struct dc_sink *dc_sink)
4297 {
4298 	int i = 0;
4299 	int cea_revision = 0;
4300 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4301 
4302 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4303 	audio_info->product_id = edid_caps->product_id;
4304 
4305 	cea_revision = drm_connector->display_info.cea_rev;
4306 
4307 	strscpy(audio_info->display_name,
4308 		edid_caps->display_name,
4309 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4310 
4311 	if (cea_revision >= 3) {
4312 		audio_info->mode_count = edid_caps->audio_mode_count;
4313 
4314 		for (i = 0; i < audio_info->mode_count; ++i) {
4315 			audio_info->modes[i].format_code =
4316 					(enum audio_format_code)
4317 					(edid_caps->audio_modes[i].format_code);
4318 			audio_info->modes[i].channel_count =
4319 					edid_caps->audio_modes[i].channel_count;
4320 			audio_info->modes[i].sample_rates.all =
4321 					edid_caps->audio_modes[i].sample_rate;
4322 			audio_info->modes[i].sample_size =
4323 					edid_caps->audio_modes[i].sample_size;
4324 		}
4325 	}
4326 
4327 	audio_info->flags.all = edid_caps->speaker_flags;
4328 
4329 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4330 	if (drm_connector->latency_present[0]) {
4331 		audio_info->video_latency = drm_connector->video_latency[0];
4332 		audio_info->audio_latency = drm_connector->audio_latency[0];
4333 	}
4334 
4335 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4336 
4337 }
4338 
4339 static void
4340 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4341 				      struct drm_display_mode *dst_mode)
4342 {
4343 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4344 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4345 	dst_mode->crtc_clock = src_mode->crtc_clock;
4346 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4347 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4348 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4349 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4350 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4351 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4352 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4353 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4354 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4355 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4356 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4357 }
4358 
4359 static void
4360 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4361 					const struct drm_display_mode *native_mode,
4362 					bool scale_enabled)
4363 {
4364 	if (scale_enabled) {
4365 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4366 	} else if (native_mode->clock == drm_mode->clock &&
4367 			native_mode->htotal == drm_mode->htotal &&
4368 			native_mode->vtotal == drm_mode->vtotal) {
4369 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4370 	} else {
4371 		/* no scaling nor amdgpu inserted, no need to patch */
4372 	}
4373 }
4374 
4375 static struct dc_sink *
4376 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4377 {
4378 	struct dc_sink_init_data sink_init_data = { 0 };
4379 	struct dc_sink *sink = NULL;
4380 	sink_init_data.link = aconnector->dc_link;
4381 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4382 
4383 	sink = dc_sink_create(&sink_init_data);
4384 	if (!sink) {
4385 		DRM_ERROR("Failed to create sink!\n");
4386 		return NULL;
4387 	}
4388 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4389 
4390 	return sink;
4391 }
4392 
4393 static void set_multisync_trigger_params(
4394 		struct dc_stream_state *stream)
4395 {
4396 	if (stream->triggered_crtc_reset.enabled) {
4397 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4398 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4399 	}
4400 }
4401 
4402 static void set_master_stream(struct dc_stream_state *stream_set[],
4403 			      int stream_count)
4404 {
4405 	int j, highest_rfr = 0, master_stream = 0;
4406 
4407 	for (j = 0;  j < stream_count; j++) {
4408 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4409 			int refresh_rate = 0;
4410 
4411 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4412 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4413 			if (refresh_rate > highest_rfr) {
4414 				highest_rfr = refresh_rate;
4415 				master_stream = j;
4416 			}
4417 		}
4418 	}
4419 	for (j = 0;  j < stream_count; j++) {
4420 		if (stream_set[j])
4421 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4422 	}
4423 }
4424 
4425 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4426 {
4427 	int i = 0;
4428 
4429 	if (context->stream_count < 2)
4430 		return;
4431 	for (i = 0; i < context->stream_count ; i++) {
4432 		if (!context->streams[i])
4433 			continue;
4434 		/*
4435 		 * TODO: add a function to read AMD VSDB bits and set
4436 		 * crtc_sync_master.multi_sync_enabled flag
4437 		 * For now it's set to false
4438 		 */
4439 		set_multisync_trigger_params(context->streams[i]);
4440 	}
4441 	set_master_stream(context->streams, context->stream_count);
4442 }
4443 
4444 static struct dc_stream_state *
4445 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4446 		       const struct drm_display_mode *drm_mode,
4447 		       const struct dm_connector_state *dm_state,
4448 		       const struct dc_stream_state *old_stream,
4449 		       int requested_bpc)
4450 {
4451 	struct drm_display_mode *preferred_mode = NULL;
4452 	struct drm_connector *drm_connector;
4453 	const struct drm_connector_state *con_state =
4454 		dm_state ? &dm_state->base : NULL;
4455 	struct dc_stream_state *stream = NULL;
4456 	struct drm_display_mode mode = *drm_mode;
4457 	bool native_mode_found = false;
4458 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4459 	int mode_refresh;
4460 	int preferred_refresh = 0;
4461 #if defined(CONFIG_DRM_AMD_DC_DCN)
4462 	struct dsc_dec_dpcd_caps dsc_caps;
4463 #endif
4464 	uint32_t link_bandwidth_kbps;
4465 
4466 	struct dc_sink *sink = NULL;
4467 	if (aconnector == NULL) {
4468 		DRM_ERROR("aconnector is NULL!\n");
4469 		return stream;
4470 	}
4471 
4472 	drm_connector = &aconnector->base;
4473 
4474 	if (!aconnector->dc_sink) {
4475 		sink = create_fake_sink(aconnector);
4476 		if (!sink)
4477 			return stream;
4478 	} else {
4479 		sink = aconnector->dc_sink;
4480 		dc_sink_retain(sink);
4481 	}
4482 
4483 	stream = dc_create_stream_for_sink(sink);
4484 
4485 	if (stream == NULL) {
4486 		DRM_ERROR("Failed to create stream for sink!\n");
4487 		goto finish;
4488 	}
4489 
4490 	stream->dm_stream_context = aconnector;
4491 
4492 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4493 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4494 
4495 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4496 		/* Search for preferred mode */
4497 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4498 			native_mode_found = true;
4499 			break;
4500 		}
4501 	}
4502 	if (!native_mode_found)
4503 		preferred_mode = list_first_entry_or_null(
4504 				&aconnector->base.modes,
4505 				struct drm_display_mode,
4506 				head);
4507 
4508 	mode_refresh = drm_mode_vrefresh(&mode);
4509 
4510 	if (preferred_mode == NULL) {
4511 		/*
4512 		 * This may not be an error, the use case is when we have no
4513 		 * usermode calls to reset and set mode upon hotplug. In this
4514 		 * case, we call set mode ourselves to restore the previous mode
4515 		 * and the modelist may not be filled in in time.
4516 		 */
4517 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4518 	} else {
4519 		decide_crtc_timing_for_drm_display_mode(
4520 				&mode, preferred_mode,
4521 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4522 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4523 	}
4524 
4525 	if (!dm_state)
4526 		drm_mode_set_crtcinfo(&mode, 0);
4527 
4528 	/*
4529 	* If scaling is enabled and refresh rate didn't change
4530 	* we copy the vic and polarities of the old timings
4531 	*/
4532 	if (!scale || mode_refresh != preferred_refresh)
4533 		fill_stream_properties_from_drm_display_mode(stream,
4534 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4535 	else
4536 		fill_stream_properties_from_drm_display_mode(stream,
4537 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4538 
4539 	stream->timing.flags.DSC = 0;
4540 
4541 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4542 #if defined(CONFIG_DRM_AMD_DC_DCN)
4543 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4544 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4545 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4546 				      &dsc_caps);
4547 #endif
4548 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4549 							     dc_link_get_link_cap(aconnector->dc_link));
4550 
4551 #if defined(CONFIG_DRM_AMD_DC_DCN)
4552 		if (dsc_caps.is_dsc_supported)
4553 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4554 						  &dsc_caps,
4555 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4556 						  link_bandwidth_kbps,
4557 						  &stream->timing,
4558 						  &stream->timing.dsc_cfg))
4559 				stream->timing.flags.DSC = 1;
4560 #endif
4561 	}
4562 
4563 	update_stream_scaling_settings(&mode, dm_state, stream);
4564 
4565 	fill_audio_info(
4566 		&stream->audio_info,
4567 		drm_connector,
4568 		sink);
4569 
4570 	update_stream_signal(stream, sink);
4571 
4572 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4573 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4574 	if (stream->link->psr_settings.psr_feature_enabled) {
4575 		//
4576 		// should decide stream support vsc sdp colorimetry capability
4577 		// before building vsc info packet
4578 		//
4579 		stream->use_vsc_sdp_for_colorimetry = false;
4580 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4581 			stream->use_vsc_sdp_for_colorimetry =
4582 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4583 		} else {
4584 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4585 				stream->use_vsc_sdp_for_colorimetry = true;
4586 		}
4587 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4588 	}
4589 finish:
4590 	dc_sink_release(sink);
4591 
4592 	return stream;
4593 }
4594 
4595 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4596 {
4597 	drm_crtc_cleanup(crtc);
4598 	kfree(crtc);
4599 }
4600 
4601 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4602 				  struct drm_crtc_state *state)
4603 {
4604 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4605 
4606 	/* TODO Destroy dc_stream objects are stream object is flattened */
4607 	if (cur->stream)
4608 		dc_stream_release(cur->stream);
4609 
4610 
4611 	__drm_atomic_helper_crtc_destroy_state(state);
4612 
4613 
4614 	kfree(state);
4615 }
4616 
4617 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4618 {
4619 	struct dm_crtc_state *state;
4620 
4621 	if (crtc->state)
4622 		dm_crtc_destroy_state(crtc, crtc->state);
4623 
4624 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4625 	if (WARN_ON(!state))
4626 		return;
4627 
4628 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4629 }
4630 
4631 static struct drm_crtc_state *
4632 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4633 {
4634 	struct dm_crtc_state *state, *cur;
4635 
4636 	cur = to_dm_crtc_state(crtc->state);
4637 
4638 	if (WARN_ON(!crtc->state))
4639 		return NULL;
4640 
4641 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4642 	if (!state)
4643 		return NULL;
4644 
4645 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4646 
4647 	if (cur->stream) {
4648 		state->stream = cur->stream;
4649 		dc_stream_retain(state->stream);
4650 	}
4651 
4652 	state->active_planes = cur->active_planes;
4653 	state->vrr_params = cur->vrr_params;
4654 	state->vrr_infopacket = cur->vrr_infopacket;
4655 	state->abm_level = cur->abm_level;
4656 	state->vrr_supported = cur->vrr_supported;
4657 	state->freesync_config = cur->freesync_config;
4658 	state->crc_src = cur->crc_src;
4659 	state->cm_has_degamma = cur->cm_has_degamma;
4660 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4661 
4662 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4663 
4664 	return &state->base;
4665 }
4666 
4667 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4668 {
4669 	enum dc_irq_source irq_source;
4670 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4671 	struct amdgpu_device *adev = crtc->dev->dev_private;
4672 	int rc;
4673 
4674 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4675 
4676 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4677 
4678 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4679 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4680 	return rc;
4681 }
4682 
4683 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4684 {
4685 	enum dc_irq_source irq_source;
4686 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4687 	struct amdgpu_device *adev = crtc->dev->dev_private;
4688 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4689 	int rc = 0;
4690 
4691 	if (enable) {
4692 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4693 		if (amdgpu_dm_vrr_active(acrtc_state))
4694 			rc = dm_set_vupdate_irq(crtc, true);
4695 	} else {
4696 		/* vblank irq off -> vupdate irq off */
4697 		rc = dm_set_vupdate_irq(crtc, false);
4698 	}
4699 
4700 	if (rc)
4701 		return rc;
4702 
4703 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4704 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4705 }
4706 
4707 static int dm_enable_vblank(struct drm_crtc *crtc)
4708 {
4709 	return dm_set_vblank(crtc, true);
4710 }
4711 
4712 static void dm_disable_vblank(struct drm_crtc *crtc)
4713 {
4714 	dm_set_vblank(crtc, false);
4715 }
4716 
4717 /* Implemented only the options currently availible for the driver */
4718 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4719 	.reset = dm_crtc_reset_state,
4720 	.destroy = amdgpu_dm_crtc_destroy,
4721 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4722 	.set_config = drm_atomic_helper_set_config,
4723 	.page_flip = drm_atomic_helper_page_flip,
4724 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4725 	.atomic_destroy_state = dm_crtc_destroy_state,
4726 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4727 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4728 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4729 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4730 	.enable_vblank = dm_enable_vblank,
4731 	.disable_vblank = dm_disable_vblank,
4732 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4733 };
4734 
4735 static enum drm_connector_status
4736 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4737 {
4738 	bool connected;
4739 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4740 
4741 	/*
4742 	 * Notes:
4743 	 * 1. This interface is NOT called in context of HPD irq.
4744 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4745 	 * makes it a bad place for *any* MST-related activity.
4746 	 */
4747 
4748 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4749 	    !aconnector->fake_enable)
4750 		connected = (aconnector->dc_sink != NULL);
4751 	else
4752 		connected = (aconnector->base.force == DRM_FORCE_ON);
4753 
4754 	return (connected ? connector_status_connected :
4755 			connector_status_disconnected);
4756 }
4757 
4758 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4759 					    struct drm_connector_state *connector_state,
4760 					    struct drm_property *property,
4761 					    uint64_t val)
4762 {
4763 	struct drm_device *dev = connector->dev;
4764 	struct amdgpu_device *adev = dev->dev_private;
4765 	struct dm_connector_state *dm_old_state =
4766 		to_dm_connector_state(connector->state);
4767 	struct dm_connector_state *dm_new_state =
4768 		to_dm_connector_state(connector_state);
4769 
4770 	int ret = -EINVAL;
4771 
4772 	if (property == dev->mode_config.scaling_mode_property) {
4773 		enum amdgpu_rmx_type rmx_type;
4774 
4775 		switch (val) {
4776 		case DRM_MODE_SCALE_CENTER:
4777 			rmx_type = RMX_CENTER;
4778 			break;
4779 		case DRM_MODE_SCALE_ASPECT:
4780 			rmx_type = RMX_ASPECT;
4781 			break;
4782 		case DRM_MODE_SCALE_FULLSCREEN:
4783 			rmx_type = RMX_FULL;
4784 			break;
4785 		case DRM_MODE_SCALE_NONE:
4786 		default:
4787 			rmx_type = RMX_OFF;
4788 			break;
4789 		}
4790 
4791 		if (dm_old_state->scaling == rmx_type)
4792 			return 0;
4793 
4794 		dm_new_state->scaling = rmx_type;
4795 		ret = 0;
4796 	} else if (property == adev->mode_info.underscan_hborder_property) {
4797 		dm_new_state->underscan_hborder = val;
4798 		ret = 0;
4799 	} else if (property == adev->mode_info.underscan_vborder_property) {
4800 		dm_new_state->underscan_vborder = val;
4801 		ret = 0;
4802 	} else if (property == adev->mode_info.underscan_property) {
4803 		dm_new_state->underscan_enable = val;
4804 		ret = 0;
4805 	} else if (property == adev->mode_info.abm_level_property) {
4806 		dm_new_state->abm_level = val;
4807 		ret = 0;
4808 	}
4809 
4810 	return ret;
4811 }
4812 
4813 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4814 					    const struct drm_connector_state *state,
4815 					    struct drm_property *property,
4816 					    uint64_t *val)
4817 {
4818 	struct drm_device *dev = connector->dev;
4819 	struct amdgpu_device *adev = dev->dev_private;
4820 	struct dm_connector_state *dm_state =
4821 		to_dm_connector_state(state);
4822 	int ret = -EINVAL;
4823 
4824 	if (property == dev->mode_config.scaling_mode_property) {
4825 		switch (dm_state->scaling) {
4826 		case RMX_CENTER:
4827 			*val = DRM_MODE_SCALE_CENTER;
4828 			break;
4829 		case RMX_ASPECT:
4830 			*val = DRM_MODE_SCALE_ASPECT;
4831 			break;
4832 		case RMX_FULL:
4833 			*val = DRM_MODE_SCALE_FULLSCREEN;
4834 			break;
4835 		case RMX_OFF:
4836 		default:
4837 			*val = DRM_MODE_SCALE_NONE;
4838 			break;
4839 		}
4840 		ret = 0;
4841 	} else if (property == adev->mode_info.underscan_hborder_property) {
4842 		*val = dm_state->underscan_hborder;
4843 		ret = 0;
4844 	} else if (property == adev->mode_info.underscan_vborder_property) {
4845 		*val = dm_state->underscan_vborder;
4846 		ret = 0;
4847 	} else if (property == adev->mode_info.underscan_property) {
4848 		*val = dm_state->underscan_enable;
4849 		ret = 0;
4850 	} else if (property == adev->mode_info.abm_level_property) {
4851 		*val = dm_state->abm_level;
4852 		ret = 0;
4853 	}
4854 
4855 	return ret;
4856 }
4857 
4858 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4859 {
4860 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4861 
4862 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4863 }
4864 
4865 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4866 {
4867 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4868 	const struct dc_link *link = aconnector->dc_link;
4869 	struct amdgpu_device *adev = connector->dev->dev_private;
4870 	struct amdgpu_display_manager *dm = &adev->dm;
4871 
4872 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4873 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4874 
4875 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4876 	    link->type != dc_connection_none &&
4877 	    dm->backlight_dev) {
4878 		backlight_device_unregister(dm->backlight_dev);
4879 		dm->backlight_dev = NULL;
4880 	}
4881 #endif
4882 
4883 	if (aconnector->dc_em_sink)
4884 		dc_sink_release(aconnector->dc_em_sink);
4885 	aconnector->dc_em_sink = NULL;
4886 	if (aconnector->dc_sink)
4887 		dc_sink_release(aconnector->dc_sink);
4888 	aconnector->dc_sink = NULL;
4889 
4890 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4891 	drm_connector_unregister(connector);
4892 	drm_connector_cleanup(connector);
4893 	if (aconnector->i2c) {
4894 		i2c_del_adapter(&aconnector->i2c->base);
4895 		kfree(aconnector->i2c);
4896 	}
4897 	kfree(aconnector->dm_dp_aux.aux.name);
4898 
4899 	kfree(connector);
4900 }
4901 
4902 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4903 {
4904 	struct dm_connector_state *state =
4905 		to_dm_connector_state(connector->state);
4906 
4907 	if (connector->state)
4908 		__drm_atomic_helper_connector_destroy_state(connector->state);
4909 
4910 	kfree(state);
4911 
4912 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4913 
4914 	if (state) {
4915 		state->scaling = RMX_OFF;
4916 		state->underscan_enable = false;
4917 		state->underscan_hborder = 0;
4918 		state->underscan_vborder = 0;
4919 		state->base.max_requested_bpc = 8;
4920 		state->vcpi_slots = 0;
4921 		state->pbn = 0;
4922 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4923 			state->abm_level = amdgpu_dm_abm_level;
4924 
4925 		__drm_atomic_helper_connector_reset(connector, &state->base);
4926 	}
4927 }
4928 
4929 struct drm_connector_state *
4930 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4931 {
4932 	struct dm_connector_state *state =
4933 		to_dm_connector_state(connector->state);
4934 
4935 	struct dm_connector_state *new_state =
4936 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4937 
4938 	if (!new_state)
4939 		return NULL;
4940 
4941 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4942 
4943 	new_state->freesync_capable = state->freesync_capable;
4944 	new_state->abm_level = state->abm_level;
4945 	new_state->scaling = state->scaling;
4946 	new_state->underscan_enable = state->underscan_enable;
4947 	new_state->underscan_hborder = state->underscan_hborder;
4948 	new_state->underscan_vborder = state->underscan_vborder;
4949 	new_state->vcpi_slots = state->vcpi_slots;
4950 	new_state->pbn = state->pbn;
4951 	return &new_state->base;
4952 }
4953 
4954 static int
4955 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4956 {
4957 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4958 		to_amdgpu_dm_connector(connector);
4959 	int r;
4960 
4961 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4962 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4963 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4964 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4965 		if (r)
4966 			return r;
4967 	}
4968 
4969 #if defined(CONFIG_DEBUG_FS)
4970 	connector_debugfs_init(amdgpu_dm_connector);
4971 #endif
4972 
4973 	return 0;
4974 }
4975 
4976 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4977 	.reset = amdgpu_dm_connector_funcs_reset,
4978 	.detect = amdgpu_dm_connector_detect,
4979 	.fill_modes = drm_helper_probe_single_connector_modes,
4980 	.destroy = amdgpu_dm_connector_destroy,
4981 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4982 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4983 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4984 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4985 	.late_register = amdgpu_dm_connector_late_register,
4986 	.early_unregister = amdgpu_dm_connector_unregister
4987 };
4988 
4989 static int get_modes(struct drm_connector *connector)
4990 {
4991 	return amdgpu_dm_connector_get_modes(connector);
4992 }
4993 
4994 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4995 {
4996 	struct dc_sink_init_data init_params = {
4997 			.link = aconnector->dc_link,
4998 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4999 	};
5000 	struct edid *edid;
5001 
5002 	if (!aconnector->base.edid_blob_ptr) {
5003 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5004 				aconnector->base.name);
5005 
5006 		aconnector->base.force = DRM_FORCE_OFF;
5007 		aconnector->base.override_edid = false;
5008 		return;
5009 	}
5010 
5011 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5012 
5013 	aconnector->edid = edid;
5014 
5015 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5016 		aconnector->dc_link,
5017 		(uint8_t *)edid,
5018 		(edid->extensions + 1) * EDID_LENGTH,
5019 		&init_params);
5020 
5021 	if (aconnector->base.force == DRM_FORCE_ON) {
5022 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5023 		aconnector->dc_link->local_sink :
5024 		aconnector->dc_em_sink;
5025 		dc_sink_retain(aconnector->dc_sink);
5026 	}
5027 }
5028 
5029 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5030 {
5031 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5032 
5033 	/*
5034 	 * In case of headless boot with force on for DP managed connector
5035 	 * Those settings have to be != 0 to get initial modeset
5036 	 */
5037 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5038 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5039 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5040 	}
5041 
5042 
5043 	aconnector->base.override_edid = true;
5044 	create_eml_sink(aconnector);
5045 }
5046 
5047 static struct dc_stream_state *
5048 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5049 				const struct drm_display_mode *drm_mode,
5050 				const struct dm_connector_state *dm_state,
5051 				const struct dc_stream_state *old_stream)
5052 {
5053 	struct drm_connector *connector = &aconnector->base;
5054 	struct amdgpu_device *adev = connector->dev->dev_private;
5055 	struct dc_stream_state *stream;
5056 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5057 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5058 	enum dc_status dc_result = DC_OK;
5059 
5060 	do {
5061 		stream = create_stream_for_sink(aconnector, drm_mode,
5062 						dm_state, old_stream,
5063 						requested_bpc);
5064 		if (stream == NULL) {
5065 			DRM_ERROR("Failed to create stream for sink!\n");
5066 			break;
5067 		}
5068 
5069 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5070 
5071 		if (dc_result != DC_OK) {
5072 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5073 				      drm_mode->hdisplay,
5074 				      drm_mode->vdisplay,
5075 				      drm_mode->clock,
5076 				      dc_result,
5077 				      dc_status_to_str(dc_result));
5078 
5079 			dc_stream_release(stream);
5080 			stream = NULL;
5081 			requested_bpc -= 2; /* lower bpc to retry validation */
5082 		}
5083 
5084 	} while (stream == NULL && requested_bpc >= 6);
5085 
5086 	return stream;
5087 }
5088 
5089 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5090 				   struct drm_display_mode *mode)
5091 {
5092 	int result = MODE_ERROR;
5093 	struct dc_sink *dc_sink;
5094 	/* TODO: Unhardcode stream count */
5095 	struct dc_stream_state *stream;
5096 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5097 
5098 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5099 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5100 		return result;
5101 
5102 	/*
5103 	 * Only run this the first time mode_valid is called to initilialize
5104 	 * EDID mgmt
5105 	 */
5106 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5107 		!aconnector->dc_em_sink)
5108 		handle_edid_mgmt(aconnector);
5109 
5110 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5111 
5112 	if (dc_sink == NULL) {
5113 		DRM_ERROR("dc_sink is NULL!\n");
5114 		goto fail;
5115 	}
5116 
5117 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5118 	if (stream) {
5119 		dc_stream_release(stream);
5120 		result = MODE_OK;
5121 	}
5122 
5123 fail:
5124 	/* TODO: error handling*/
5125 	return result;
5126 }
5127 
5128 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5129 				struct dc_info_packet *out)
5130 {
5131 	struct hdmi_drm_infoframe frame;
5132 	unsigned char buf[30]; /* 26 + 4 */
5133 	ssize_t len;
5134 	int ret, i;
5135 
5136 	memset(out, 0, sizeof(*out));
5137 
5138 	if (!state->hdr_output_metadata)
5139 		return 0;
5140 
5141 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5142 	if (ret)
5143 		return ret;
5144 
5145 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5146 	if (len < 0)
5147 		return (int)len;
5148 
5149 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5150 	if (len != 30)
5151 		return -EINVAL;
5152 
5153 	/* Prepare the infopacket for DC. */
5154 	switch (state->connector->connector_type) {
5155 	case DRM_MODE_CONNECTOR_HDMIA:
5156 		out->hb0 = 0x87; /* type */
5157 		out->hb1 = 0x01; /* version */
5158 		out->hb2 = 0x1A; /* length */
5159 		out->sb[0] = buf[3]; /* checksum */
5160 		i = 1;
5161 		break;
5162 
5163 	case DRM_MODE_CONNECTOR_DisplayPort:
5164 	case DRM_MODE_CONNECTOR_eDP:
5165 		out->hb0 = 0x00; /* sdp id, zero */
5166 		out->hb1 = 0x87; /* type */
5167 		out->hb2 = 0x1D; /* payload len - 1 */
5168 		out->hb3 = (0x13 << 2); /* sdp version */
5169 		out->sb[0] = 0x01; /* version */
5170 		out->sb[1] = 0x1A; /* length */
5171 		i = 2;
5172 		break;
5173 
5174 	default:
5175 		return -EINVAL;
5176 	}
5177 
5178 	memcpy(&out->sb[i], &buf[4], 26);
5179 	out->valid = true;
5180 
5181 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5182 		       sizeof(out->sb), false);
5183 
5184 	return 0;
5185 }
5186 
5187 static bool
5188 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5189 			  const struct drm_connector_state *new_state)
5190 {
5191 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5192 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5193 
5194 	if (old_blob != new_blob) {
5195 		if (old_blob && new_blob &&
5196 		    old_blob->length == new_blob->length)
5197 			return memcmp(old_blob->data, new_blob->data,
5198 				      old_blob->length);
5199 
5200 		return true;
5201 	}
5202 
5203 	return false;
5204 }
5205 
5206 static int
5207 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5208 				 struct drm_atomic_state *state)
5209 {
5210 	struct drm_connector_state *new_con_state =
5211 		drm_atomic_get_new_connector_state(state, conn);
5212 	struct drm_connector_state *old_con_state =
5213 		drm_atomic_get_old_connector_state(state, conn);
5214 	struct drm_crtc *crtc = new_con_state->crtc;
5215 	struct drm_crtc_state *new_crtc_state;
5216 	int ret;
5217 
5218 	if (!crtc)
5219 		return 0;
5220 
5221 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5222 		struct dc_info_packet hdr_infopacket;
5223 
5224 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5225 		if (ret)
5226 			return ret;
5227 
5228 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5229 		if (IS_ERR(new_crtc_state))
5230 			return PTR_ERR(new_crtc_state);
5231 
5232 		/*
5233 		 * DC considers the stream backends changed if the
5234 		 * static metadata changes. Forcing the modeset also
5235 		 * gives a simple way for userspace to switch from
5236 		 * 8bpc to 10bpc when setting the metadata to enter
5237 		 * or exit HDR.
5238 		 *
5239 		 * Changing the static metadata after it's been
5240 		 * set is permissible, however. So only force a
5241 		 * modeset if we're entering or exiting HDR.
5242 		 */
5243 		new_crtc_state->mode_changed =
5244 			!old_con_state->hdr_output_metadata ||
5245 			!new_con_state->hdr_output_metadata;
5246 	}
5247 
5248 	return 0;
5249 }
5250 
5251 static const struct drm_connector_helper_funcs
5252 amdgpu_dm_connector_helper_funcs = {
5253 	/*
5254 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5255 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5256 	 * are missing after user start lightdm. So we need to renew modes list.
5257 	 * in get_modes call back, not just return the modes count
5258 	 */
5259 	.get_modes = get_modes,
5260 	.mode_valid = amdgpu_dm_connector_mode_valid,
5261 	.atomic_check = amdgpu_dm_connector_atomic_check,
5262 };
5263 
5264 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5265 {
5266 }
5267 
5268 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5269 {
5270 	struct drm_device *dev = new_crtc_state->crtc->dev;
5271 	struct drm_plane *plane;
5272 
5273 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5274 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5275 			return true;
5276 	}
5277 
5278 	return false;
5279 }
5280 
5281 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5282 {
5283 	struct drm_atomic_state *state = new_crtc_state->state;
5284 	struct drm_plane *plane;
5285 	int num_active = 0;
5286 
5287 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5288 		struct drm_plane_state *new_plane_state;
5289 
5290 		/* Cursor planes are "fake". */
5291 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5292 			continue;
5293 
5294 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5295 
5296 		if (!new_plane_state) {
5297 			/*
5298 			 * The plane is enable on the CRTC and hasn't changed
5299 			 * state. This means that it previously passed
5300 			 * validation and is therefore enabled.
5301 			 */
5302 			num_active += 1;
5303 			continue;
5304 		}
5305 
5306 		/* We need a framebuffer to be considered enabled. */
5307 		num_active += (new_plane_state->fb != NULL);
5308 	}
5309 
5310 	return num_active;
5311 }
5312 
5313 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5314 					 struct drm_crtc_state *new_crtc_state)
5315 {
5316 	struct dm_crtc_state *dm_new_crtc_state =
5317 		to_dm_crtc_state(new_crtc_state);
5318 
5319 	dm_new_crtc_state->active_planes = 0;
5320 
5321 	if (!dm_new_crtc_state->stream)
5322 		return;
5323 
5324 	dm_new_crtc_state->active_planes =
5325 		count_crtc_active_planes(new_crtc_state);
5326 }
5327 
5328 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5329 				       struct drm_crtc_state *state)
5330 {
5331 	struct amdgpu_device *adev = crtc->dev->dev_private;
5332 	struct dc *dc = adev->dm.dc;
5333 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5334 	int ret = -EINVAL;
5335 
5336 	dm_update_crtc_active_planes(crtc, state);
5337 
5338 	if (unlikely(!dm_crtc_state->stream &&
5339 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5340 		WARN_ON(1);
5341 		return ret;
5342 	}
5343 
5344 	/* In some use cases, like reset, no stream is attached */
5345 	if (!dm_crtc_state->stream)
5346 		return 0;
5347 
5348 	/*
5349 	 * We want at least one hardware plane enabled to use
5350 	 * the stream with a cursor enabled.
5351 	 */
5352 	if (state->enable && state->active &&
5353 	    does_crtc_have_active_cursor(state) &&
5354 	    dm_crtc_state->active_planes == 0)
5355 		return -EINVAL;
5356 
5357 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5358 		return 0;
5359 
5360 	return ret;
5361 }
5362 
5363 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5364 				      const struct drm_display_mode *mode,
5365 				      struct drm_display_mode *adjusted_mode)
5366 {
5367 	return true;
5368 }
5369 
5370 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5371 	.disable = dm_crtc_helper_disable,
5372 	.atomic_check = dm_crtc_helper_atomic_check,
5373 	.mode_fixup = dm_crtc_helper_mode_fixup,
5374 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5375 };
5376 
5377 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5378 {
5379 
5380 }
5381 
5382 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5383 {
5384 	switch (display_color_depth) {
5385 		case COLOR_DEPTH_666:
5386 			return 6;
5387 		case COLOR_DEPTH_888:
5388 			return 8;
5389 		case COLOR_DEPTH_101010:
5390 			return 10;
5391 		case COLOR_DEPTH_121212:
5392 			return 12;
5393 		case COLOR_DEPTH_141414:
5394 			return 14;
5395 		case COLOR_DEPTH_161616:
5396 			return 16;
5397 		default:
5398 			break;
5399 		}
5400 	return 0;
5401 }
5402 
5403 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5404 					  struct drm_crtc_state *crtc_state,
5405 					  struct drm_connector_state *conn_state)
5406 {
5407 	struct drm_atomic_state *state = crtc_state->state;
5408 	struct drm_connector *connector = conn_state->connector;
5409 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5410 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5411 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5412 	struct drm_dp_mst_topology_mgr *mst_mgr;
5413 	struct drm_dp_mst_port *mst_port;
5414 	enum dc_color_depth color_depth;
5415 	int clock, bpp = 0;
5416 	bool is_y420 = false;
5417 
5418 	if (!aconnector->port || !aconnector->dc_sink)
5419 		return 0;
5420 
5421 	mst_port = aconnector->port;
5422 	mst_mgr = &aconnector->mst_port->mst_mgr;
5423 
5424 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5425 		return 0;
5426 
5427 	if (!state->duplicated) {
5428 		int max_bpc = conn_state->max_requested_bpc;
5429 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5430 				aconnector->force_yuv420_output;
5431 		color_depth = convert_color_depth_from_display_info(connector,
5432 								    is_y420,
5433 								    max_bpc);
5434 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5435 		clock = adjusted_mode->clock;
5436 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5437 	}
5438 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5439 									   mst_mgr,
5440 									   mst_port,
5441 									   dm_new_connector_state->pbn,
5442 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5443 	if (dm_new_connector_state->vcpi_slots < 0) {
5444 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5445 		return dm_new_connector_state->vcpi_slots;
5446 	}
5447 	return 0;
5448 }
5449 
5450 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5451 	.disable = dm_encoder_helper_disable,
5452 	.atomic_check = dm_encoder_helper_atomic_check
5453 };
5454 
5455 #if defined(CONFIG_DRM_AMD_DC_DCN)
5456 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5457 					    struct dc_state *dc_state)
5458 {
5459 	struct dc_stream_state *stream = NULL;
5460 	struct drm_connector *connector;
5461 	struct drm_connector_state *new_con_state, *old_con_state;
5462 	struct amdgpu_dm_connector *aconnector;
5463 	struct dm_connector_state *dm_conn_state;
5464 	int i, j, clock, bpp;
5465 	int vcpi, pbn_div, pbn = 0;
5466 
5467 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5468 
5469 		aconnector = to_amdgpu_dm_connector(connector);
5470 
5471 		if (!aconnector->port)
5472 			continue;
5473 
5474 		if (!new_con_state || !new_con_state->crtc)
5475 			continue;
5476 
5477 		dm_conn_state = to_dm_connector_state(new_con_state);
5478 
5479 		for (j = 0; j < dc_state->stream_count; j++) {
5480 			stream = dc_state->streams[j];
5481 			if (!stream)
5482 				continue;
5483 
5484 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5485 				break;
5486 
5487 			stream = NULL;
5488 		}
5489 
5490 		if (!stream)
5491 			continue;
5492 
5493 		if (stream->timing.flags.DSC != 1) {
5494 			drm_dp_mst_atomic_enable_dsc(state,
5495 						     aconnector->port,
5496 						     dm_conn_state->pbn,
5497 						     0,
5498 						     false);
5499 			continue;
5500 		}
5501 
5502 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5503 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5504 		clock = stream->timing.pix_clk_100hz / 10;
5505 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5506 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5507 						    aconnector->port,
5508 						    pbn, pbn_div,
5509 						    true);
5510 		if (vcpi < 0)
5511 			return vcpi;
5512 
5513 		dm_conn_state->pbn = pbn;
5514 		dm_conn_state->vcpi_slots = vcpi;
5515 	}
5516 	return 0;
5517 }
5518 #endif
5519 
5520 static void dm_drm_plane_reset(struct drm_plane *plane)
5521 {
5522 	struct dm_plane_state *amdgpu_state = NULL;
5523 
5524 	if (plane->state)
5525 		plane->funcs->atomic_destroy_state(plane, plane->state);
5526 
5527 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5528 	WARN_ON(amdgpu_state == NULL);
5529 
5530 	if (amdgpu_state)
5531 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5532 }
5533 
5534 static struct drm_plane_state *
5535 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5536 {
5537 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5538 
5539 	old_dm_plane_state = to_dm_plane_state(plane->state);
5540 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5541 	if (!dm_plane_state)
5542 		return NULL;
5543 
5544 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5545 
5546 	if (old_dm_plane_state->dc_state) {
5547 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5548 		dc_plane_state_retain(dm_plane_state->dc_state);
5549 	}
5550 
5551 	return &dm_plane_state->base;
5552 }
5553 
5554 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5555 				struct drm_plane_state *state)
5556 {
5557 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5558 
5559 	if (dm_plane_state->dc_state)
5560 		dc_plane_state_release(dm_plane_state->dc_state);
5561 
5562 	drm_atomic_helper_plane_destroy_state(plane, state);
5563 }
5564 
5565 static const struct drm_plane_funcs dm_plane_funcs = {
5566 	.update_plane	= drm_atomic_helper_update_plane,
5567 	.disable_plane	= drm_atomic_helper_disable_plane,
5568 	.destroy	= drm_primary_helper_destroy,
5569 	.reset = dm_drm_plane_reset,
5570 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5571 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5572 };
5573 
5574 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5575 				      struct drm_plane_state *new_state)
5576 {
5577 	struct amdgpu_framebuffer *afb;
5578 	struct drm_gem_object *obj;
5579 	struct amdgpu_device *adev;
5580 	struct amdgpu_bo *rbo;
5581 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5582 	struct list_head list;
5583 	struct ttm_validate_buffer tv;
5584 	struct ww_acquire_ctx ticket;
5585 	uint64_t tiling_flags;
5586 	uint32_t domain;
5587 	int r;
5588 	bool tmz_surface = false;
5589 	bool force_disable_dcc = false;
5590 
5591 	dm_plane_state_old = to_dm_plane_state(plane->state);
5592 	dm_plane_state_new = to_dm_plane_state(new_state);
5593 
5594 	if (!new_state->fb) {
5595 		DRM_DEBUG_DRIVER("No FB bound\n");
5596 		return 0;
5597 	}
5598 
5599 	afb = to_amdgpu_framebuffer(new_state->fb);
5600 	obj = new_state->fb->obj[0];
5601 	rbo = gem_to_amdgpu_bo(obj);
5602 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5603 	INIT_LIST_HEAD(&list);
5604 
5605 	tv.bo = &rbo->tbo;
5606 	tv.num_shared = 1;
5607 	list_add(&tv.head, &list);
5608 
5609 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5610 	if (r) {
5611 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5612 		return r;
5613 	}
5614 
5615 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5616 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5617 	else
5618 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5619 
5620 	r = amdgpu_bo_pin(rbo, domain);
5621 	if (unlikely(r != 0)) {
5622 		if (r != -ERESTARTSYS)
5623 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5624 		ttm_eu_backoff_reservation(&ticket, &list);
5625 		return r;
5626 	}
5627 
5628 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5629 	if (unlikely(r != 0)) {
5630 		amdgpu_bo_unpin(rbo);
5631 		ttm_eu_backoff_reservation(&ticket, &list);
5632 		DRM_ERROR("%p bind failed\n", rbo);
5633 		return r;
5634 	}
5635 
5636 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5637 
5638 	tmz_surface = amdgpu_bo_encrypted(rbo);
5639 
5640 	ttm_eu_backoff_reservation(&ticket, &list);
5641 
5642 	afb->address = amdgpu_bo_gpu_offset(rbo);
5643 
5644 	amdgpu_bo_ref(rbo);
5645 
5646 	if (dm_plane_state_new->dc_state &&
5647 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5648 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5649 
5650 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5651 		fill_plane_buffer_attributes(
5652 			adev, afb, plane_state->format, plane_state->rotation,
5653 			tiling_flags, &plane_state->tiling_info,
5654 			&plane_state->plane_size, &plane_state->dcc,
5655 			&plane_state->address, tmz_surface,
5656 			force_disable_dcc);
5657 	}
5658 
5659 	return 0;
5660 }
5661 
5662 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5663 				       struct drm_plane_state *old_state)
5664 {
5665 	struct amdgpu_bo *rbo;
5666 	int r;
5667 
5668 	if (!old_state->fb)
5669 		return;
5670 
5671 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5672 	r = amdgpu_bo_reserve(rbo, false);
5673 	if (unlikely(r)) {
5674 		DRM_ERROR("failed to reserve rbo before unpin\n");
5675 		return;
5676 	}
5677 
5678 	amdgpu_bo_unpin(rbo);
5679 	amdgpu_bo_unreserve(rbo);
5680 	amdgpu_bo_unref(&rbo);
5681 }
5682 
5683 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5684 				       struct drm_crtc_state *new_crtc_state)
5685 {
5686 	int max_downscale = 0;
5687 	int max_upscale = INT_MAX;
5688 
5689 	/* TODO: These should be checked against DC plane caps */
5690 	return drm_atomic_helper_check_plane_state(
5691 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5692 }
5693 
5694 static int dm_plane_atomic_check(struct drm_plane *plane,
5695 				 struct drm_plane_state *state)
5696 {
5697 	struct amdgpu_device *adev = plane->dev->dev_private;
5698 	struct dc *dc = adev->dm.dc;
5699 	struct dm_plane_state *dm_plane_state;
5700 	struct dc_scaling_info scaling_info;
5701 	struct drm_crtc_state *new_crtc_state;
5702 	int ret;
5703 
5704 	dm_plane_state = to_dm_plane_state(state);
5705 
5706 	if (!dm_plane_state->dc_state)
5707 		return 0;
5708 
5709 	new_crtc_state =
5710 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5711 	if (!new_crtc_state)
5712 		return -EINVAL;
5713 
5714 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5715 	if (ret)
5716 		return ret;
5717 
5718 	ret = fill_dc_scaling_info(state, &scaling_info);
5719 	if (ret)
5720 		return ret;
5721 
5722 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5723 		return 0;
5724 
5725 	return -EINVAL;
5726 }
5727 
5728 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5729 				       struct drm_plane_state *new_plane_state)
5730 {
5731 	/* Only support async updates on cursor planes. */
5732 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5733 		return -EINVAL;
5734 
5735 	return 0;
5736 }
5737 
5738 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5739 					 struct drm_plane_state *new_state)
5740 {
5741 	struct drm_plane_state *old_state =
5742 		drm_atomic_get_old_plane_state(new_state->state, plane);
5743 
5744 	swap(plane->state->fb, new_state->fb);
5745 
5746 	plane->state->src_x = new_state->src_x;
5747 	plane->state->src_y = new_state->src_y;
5748 	plane->state->src_w = new_state->src_w;
5749 	plane->state->src_h = new_state->src_h;
5750 	plane->state->crtc_x = new_state->crtc_x;
5751 	plane->state->crtc_y = new_state->crtc_y;
5752 	plane->state->crtc_w = new_state->crtc_w;
5753 	plane->state->crtc_h = new_state->crtc_h;
5754 
5755 	handle_cursor_update(plane, old_state);
5756 }
5757 
5758 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5759 	.prepare_fb = dm_plane_helper_prepare_fb,
5760 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5761 	.atomic_check = dm_plane_atomic_check,
5762 	.atomic_async_check = dm_plane_atomic_async_check,
5763 	.atomic_async_update = dm_plane_atomic_async_update
5764 };
5765 
5766 /*
5767  * TODO: these are currently initialized to rgb formats only.
5768  * For future use cases we should either initialize them dynamically based on
5769  * plane capabilities, or initialize this array to all formats, so internal drm
5770  * check will succeed, and let DC implement proper check
5771  */
5772 static const uint32_t rgb_formats[] = {
5773 	DRM_FORMAT_XRGB8888,
5774 	DRM_FORMAT_ARGB8888,
5775 	DRM_FORMAT_RGBA8888,
5776 	DRM_FORMAT_XRGB2101010,
5777 	DRM_FORMAT_XBGR2101010,
5778 	DRM_FORMAT_ARGB2101010,
5779 	DRM_FORMAT_ABGR2101010,
5780 	DRM_FORMAT_XBGR8888,
5781 	DRM_FORMAT_ABGR8888,
5782 	DRM_FORMAT_RGB565,
5783 };
5784 
5785 static const uint32_t overlay_formats[] = {
5786 	DRM_FORMAT_XRGB8888,
5787 	DRM_FORMAT_ARGB8888,
5788 	DRM_FORMAT_RGBA8888,
5789 	DRM_FORMAT_XBGR8888,
5790 	DRM_FORMAT_ABGR8888,
5791 	DRM_FORMAT_RGB565
5792 };
5793 
5794 static const u32 cursor_formats[] = {
5795 	DRM_FORMAT_ARGB8888
5796 };
5797 
5798 static int get_plane_formats(const struct drm_plane *plane,
5799 			     const struct dc_plane_cap *plane_cap,
5800 			     uint32_t *formats, int max_formats)
5801 {
5802 	int i, num_formats = 0;
5803 
5804 	/*
5805 	 * TODO: Query support for each group of formats directly from
5806 	 * DC plane caps. This will require adding more formats to the
5807 	 * caps list.
5808 	 */
5809 
5810 	switch (plane->type) {
5811 	case DRM_PLANE_TYPE_PRIMARY:
5812 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5813 			if (num_formats >= max_formats)
5814 				break;
5815 
5816 			formats[num_formats++] = rgb_formats[i];
5817 		}
5818 
5819 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5820 			formats[num_formats++] = DRM_FORMAT_NV12;
5821 		if (plane_cap && plane_cap->pixel_format_support.p010)
5822 			formats[num_formats++] = DRM_FORMAT_P010;
5823 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5824 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5825 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5826 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5827 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5828 		}
5829 		break;
5830 
5831 	case DRM_PLANE_TYPE_OVERLAY:
5832 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5833 			if (num_formats >= max_formats)
5834 				break;
5835 
5836 			formats[num_formats++] = overlay_formats[i];
5837 		}
5838 		break;
5839 
5840 	case DRM_PLANE_TYPE_CURSOR:
5841 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5842 			if (num_formats >= max_formats)
5843 				break;
5844 
5845 			formats[num_formats++] = cursor_formats[i];
5846 		}
5847 		break;
5848 	}
5849 
5850 	return num_formats;
5851 }
5852 
5853 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5854 				struct drm_plane *plane,
5855 				unsigned long possible_crtcs,
5856 				const struct dc_plane_cap *plane_cap)
5857 {
5858 	uint32_t formats[32];
5859 	int num_formats;
5860 	int res = -EPERM;
5861 	unsigned int supported_rotations;
5862 
5863 	num_formats = get_plane_formats(plane, plane_cap, formats,
5864 					ARRAY_SIZE(formats));
5865 
5866 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5867 				       &dm_plane_funcs, formats, num_formats,
5868 				       NULL, plane->type, NULL);
5869 	if (res)
5870 		return res;
5871 
5872 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5873 	    plane_cap && plane_cap->per_pixel_alpha) {
5874 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5875 					  BIT(DRM_MODE_BLEND_PREMULTI);
5876 
5877 		drm_plane_create_alpha_property(plane);
5878 		drm_plane_create_blend_mode_property(plane, blend_caps);
5879 	}
5880 
5881 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5882 	    plane_cap &&
5883 	    (plane_cap->pixel_format_support.nv12 ||
5884 	     plane_cap->pixel_format_support.p010)) {
5885 		/* This only affects YUV formats. */
5886 		drm_plane_create_color_properties(
5887 			plane,
5888 			BIT(DRM_COLOR_YCBCR_BT601) |
5889 			BIT(DRM_COLOR_YCBCR_BT709) |
5890 			BIT(DRM_COLOR_YCBCR_BT2020),
5891 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5892 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5893 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5894 	}
5895 
5896 	supported_rotations =
5897 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5898 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5899 
5900 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5901 					   supported_rotations);
5902 
5903 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5904 
5905 	/* Create (reset) the plane state */
5906 	if (plane->funcs->reset)
5907 		plane->funcs->reset(plane);
5908 
5909 	return 0;
5910 }
5911 
5912 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5913 			       struct drm_plane *plane,
5914 			       uint32_t crtc_index)
5915 {
5916 	struct amdgpu_crtc *acrtc = NULL;
5917 	struct drm_plane *cursor_plane;
5918 
5919 	int res = -ENOMEM;
5920 
5921 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5922 	if (!cursor_plane)
5923 		goto fail;
5924 
5925 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5926 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5927 
5928 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5929 	if (!acrtc)
5930 		goto fail;
5931 
5932 	res = drm_crtc_init_with_planes(
5933 			dm->ddev,
5934 			&acrtc->base,
5935 			plane,
5936 			cursor_plane,
5937 			&amdgpu_dm_crtc_funcs, NULL);
5938 
5939 	if (res)
5940 		goto fail;
5941 
5942 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5943 
5944 	/* Create (reset) the plane state */
5945 	if (acrtc->base.funcs->reset)
5946 		acrtc->base.funcs->reset(&acrtc->base);
5947 
5948 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5949 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5950 
5951 	acrtc->crtc_id = crtc_index;
5952 	acrtc->base.enabled = false;
5953 	acrtc->otg_inst = -1;
5954 
5955 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5956 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5957 				   true, MAX_COLOR_LUT_ENTRIES);
5958 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5959 
5960 	return 0;
5961 
5962 fail:
5963 	kfree(acrtc);
5964 	kfree(cursor_plane);
5965 	return res;
5966 }
5967 
5968 
5969 static int to_drm_connector_type(enum signal_type st)
5970 {
5971 	switch (st) {
5972 	case SIGNAL_TYPE_HDMI_TYPE_A:
5973 		return DRM_MODE_CONNECTOR_HDMIA;
5974 	case SIGNAL_TYPE_EDP:
5975 		return DRM_MODE_CONNECTOR_eDP;
5976 	case SIGNAL_TYPE_LVDS:
5977 		return DRM_MODE_CONNECTOR_LVDS;
5978 	case SIGNAL_TYPE_RGB:
5979 		return DRM_MODE_CONNECTOR_VGA;
5980 	case SIGNAL_TYPE_DISPLAY_PORT:
5981 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5982 		return DRM_MODE_CONNECTOR_DisplayPort;
5983 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5984 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5985 		return DRM_MODE_CONNECTOR_DVID;
5986 	case SIGNAL_TYPE_VIRTUAL:
5987 		return DRM_MODE_CONNECTOR_VIRTUAL;
5988 
5989 	default:
5990 		return DRM_MODE_CONNECTOR_Unknown;
5991 	}
5992 }
5993 
5994 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5995 {
5996 	struct drm_encoder *encoder;
5997 
5998 	/* There is only one encoder per connector */
5999 	drm_connector_for_each_possible_encoder(connector, encoder)
6000 		return encoder;
6001 
6002 	return NULL;
6003 }
6004 
6005 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6006 {
6007 	struct drm_encoder *encoder;
6008 	struct amdgpu_encoder *amdgpu_encoder;
6009 
6010 	encoder = amdgpu_dm_connector_to_encoder(connector);
6011 
6012 	if (encoder == NULL)
6013 		return;
6014 
6015 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6016 
6017 	amdgpu_encoder->native_mode.clock = 0;
6018 
6019 	if (!list_empty(&connector->probed_modes)) {
6020 		struct drm_display_mode *preferred_mode = NULL;
6021 
6022 		list_for_each_entry(preferred_mode,
6023 				    &connector->probed_modes,
6024 				    head) {
6025 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6026 				amdgpu_encoder->native_mode = *preferred_mode;
6027 
6028 			break;
6029 		}
6030 
6031 	}
6032 }
6033 
6034 static struct drm_display_mode *
6035 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6036 			     char *name,
6037 			     int hdisplay, int vdisplay)
6038 {
6039 	struct drm_device *dev = encoder->dev;
6040 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6041 	struct drm_display_mode *mode = NULL;
6042 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6043 
6044 	mode = drm_mode_duplicate(dev, native_mode);
6045 
6046 	if (mode == NULL)
6047 		return NULL;
6048 
6049 	mode->hdisplay = hdisplay;
6050 	mode->vdisplay = vdisplay;
6051 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6052 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6053 
6054 	return mode;
6055 
6056 }
6057 
6058 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6059 						 struct drm_connector *connector)
6060 {
6061 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6062 	struct drm_display_mode *mode = NULL;
6063 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6064 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6065 				to_amdgpu_dm_connector(connector);
6066 	int i;
6067 	int n;
6068 	struct mode_size {
6069 		char name[DRM_DISPLAY_MODE_LEN];
6070 		int w;
6071 		int h;
6072 	} common_modes[] = {
6073 		{  "640x480",  640,  480},
6074 		{  "800x600",  800,  600},
6075 		{ "1024x768", 1024,  768},
6076 		{ "1280x720", 1280,  720},
6077 		{ "1280x800", 1280,  800},
6078 		{"1280x1024", 1280, 1024},
6079 		{ "1440x900", 1440,  900},
6080 		{"1680x1050", 1680, 1050},
6081 		{"1600x1200", 1600, 1200},
6082 		{"1920x1080", 1920, 1080},
6083 		{"1920x1200", 1920, 1200}
6084 	};
6085 
6086 	n = ARRAY_SIZE(common_modes);
6087 
6088 	for (i = 0; i < n; i++) {
6089 		struct drm_display_mode *curmode = NULL;
6090 		bool mode_existed = false;
6091 
6092 		if (common_modes[i].w > native_mode->hdisplay ||
6093 		    common_modes[i].h > native_mode->vdisplay ||
6094 		   (common_modes[i].w == native_mode->hdisplay &&
6095 		    common_modes[i].h == native_mode->vdisplay))
6096 			continue;
6097 
6098 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6099 			if (common_modes[i].w == curmode->hdisplay &&
6100 			    common_modes[i].h == curmode->vdisplay) {
6101 				mode_existed = true;
6102 				break;
6103 			}
6104 		}
6105 
6106 		if (mode_existed)
6107 			continue;
6108 
6109 		mode = amdgpu_dm_create_common_mode(encoder,
6110 				common_modes[i].name, common_modes[i].w,
6111 				common_modes[i].h);
6112 		drm_mode_probed_add(connector, mode);
6113 		amdgpu_dm_connector->num_modes++;
6114 	}
6115 }
6116 
6117 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6118 					      struct edid *edid)
6119 {
6120 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6121 			to_amdgpu_dm_connector(connector);
6122 
6123 	if (edid) {
6124 		/* empty probed_modes */
6125 		INIT_LIST_HEAD(&connector->probed_modes);
6126 		amdgpu_dm_connector->num_modes =
6127 				drm_add_edid_modes(connector, edid);
6128 
6129 		/* sorting the probed modes before calling function
6130 		 * amdgpu_dm_get_native_mode() since EDID can have
6131 		 * more than one preferred mode. The modes that are
6132 		 * later in the probed mode list could be of higher
6133 		 * and preferred resolution. For example, 3840x2160
6134 		 * resolution in base EDID preferred timing and 4096x2160
6135 		 * preferred resolution in DID extension block later.
6136 		 */
6137 		drm_mode_sort(&connector->probed_modes);
6138 		amdgpu_dm_get_native_mode(connector);
6139 	} else {
6140 		amdgpu_dm_connector->num_modes = 0;
6141 	}
6142 }
6143 
6144 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6145 {
6146 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6147 			to_amdgpu_dm_connector(connector);
6148 	struct drm_encoder *encoder;
6149 	struct edid *edid = amdgpu_dm_connector->edid;
6150 
6151 	encoder = amdgpu_dm_connector_to_encoder(connector);
6152 
6153 	if (!edid || !drm_edid_is_valid(edid)) {
6154 		amdgpu_dm_connector->num_modes =
6155 				drm_add_modes_noedid(connector, 640, 480);
6156 	} else {
6157 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6158 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6159 	}
6160 	amdgpu_dm_fbc_init(connector);
6161 
6162 	return amdgpu_dm_connector->num_modes;
6163 }
6164 
6165 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6166 				     struct amdgpu_dm_connector *aconnector,
6167 				     int connector_type,
6168 				     struct dc_link *link,
6169 				     int link_index)
6170 {
6171 	struct amdgpu_device *adev = dm->ddev->dev_private;
6172 
6173 	/*
6174 	 * Some of the properties below require access to state, like bpc.
6175 	 * Allocate some default initial connector state with our reset helper.
6176 	 */
6177 	if (aconnector->base.funcs->reset)
6178 		aconnector->base.funcs->reset(&aconnector->base);
6179 
6180 	aconnector->connector_id = link_index;
6181 	aconnector->dc_link = link;
6182 	aconnector->base.interlace_allowed = false;
6183 	aconnector->base.doublescan_allowed = false;
6184 	aconnector->base.stereo_allowed = false;
6185 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6186 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6187 	aconnector->audio_inst = -1;
6188 	mutex_init(&aconnector->hpd_lock);
6189 
6190 	/*
6191 	 * configure support HPD hot plug connector_>polled default value is 0
6192 	 * which means HPD hot plug not supported
6193 	 */
6194 	switch (connector_type) {
6195 	case DRM_MODE_CONNECTOR_HDMIA:
6196 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6197 		aconnector->base.ycbcr_420_allowed =
6198 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6199 		break;
6200 	case DRM_MODE_CONNECTOR_DisplayPort:
6201 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6202 		aconnector->base.ycbcr_420_allowed =
6203 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6204 		break;
6205 	case DRM_MODE_CONNECTOR_DVID:
6206 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6207 		break;
6208 	default:
6209 		break;
6210 	}
6211 
6212 	drm_object_attach_property(&aconnector->base.base,
6213 				dm->ddev->mode_config.scaling_mode_property,
6214 				DRM_MODE_SCALE_NONE);
6215 
6216 	drm_object_attach_property(&aconnector->base.base,
6217 				adev->mode_info.underscan_property,
6218 				UNDERSCAN_OFF);
6219 	drm_object_attach_property(&aconnector->base.base,
6220 				adev->mode_info.underscan_hborder_property,
6221 				0);
6222 	drm_object_attach_property(&aconnector->base.base,
6223 				adev->mode_info.underscan_vborder_property,
6224 				0);
6225 
6226 	if (!aconnector->mst_port)
6227 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6228 
6229 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6230 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6231 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6232 
6233 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6234 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6235 		drm_object_attach_property(&aconnector->base.base,
6236 				adev->mode_info.abm_level_property, 0);
6237 	}
6238 
6239 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6240 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6241 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6242 		drm_object_attach_property(
6243 			&aconnector->base.base,
6244 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6245 
6246 		if (!aconnector->mst_port)
6247 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6248 
6249 #ifdef CONFIG_DRM_AMD_DC_HDCP
6250 		if (adev->dm.hdcp_workqueue)
6251 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6252 #endif
6253 	}
6254 }
6255 
6256 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6257 			      struct i2c_msg *msgs, int num)
6258 {
6259 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6260 	struct ddc_service *ddc_service = i2c->ddc_service;
6261 	struct i2c_command cmd;
6262 	int i;
6263 	int result = -EIO;
6264 
6265 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6266 
6267 	if (!cmd.payloads)
6268 		return result;
6269 
6270 	cmd.number_of_payloads = num;
6271 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6272 	cmd.speed = 100;
6273 
6274 	for (i = 0; i < num; i++) {
6275 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6276 		cmd.payloads[i].address = msgs[i].addr;
6277 		cmd.payloads[i].length = msgs[i].len;
6278 		cmd.payloads[i].data = msgs[i].buf;
6279 	}
6280 
6281 	if (dc_submit_i2c(
6282 			ddc_service->ctx->dc,
6283 			ddc_service->ddc_pin->hw_info.ddc_channel,
6284 			&cmd))
6285 		result = num;
6286 
6287 	kfree(cmd.payloads);
6288 	return result;
6289 }
6290 
6291 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6292 {
6293 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6294 }
6295 
6296 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6297 	.master_xfer = amdgpu_dm_i2c_xfer,
6298 	.functionality = amdgpu_dm_i2c_func,
6299 };
6300 
6301 static struct amdgpu_i2c_adapter *
6302 create_i2c(struct ddc_service *ddc_service,
6303 	   int link_index,
6304 	   int *res)
6305 {
6306 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6307 	struct amdgpu_i2c_adapter *i2c;
6308 
6309 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6310 	if (!i2c)
6311 		return NULL;
6312 	i2c->base.owner = THIS_MODULE;
6313 	i2c->base.class = I2C_CLASS_DDC;
6314 	i2c->base.dev.parent = &adev->pdev->dev;
6315 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6316 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6317 	i2c_set_adapdata(&i2c->base, i2c);
6318 	i2c->ddc_service = ddc_service;
6319 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6320 
6321 	return i2c;
6322 }
6323 
6324 
6325 /*
6326  * Note: this function assumes that dc_link_detect() was called for the
6327  * dc_link which will be represented by this aconnector.
6328  */
6329 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6330 				    struct amdgpu_dm_connector *aconnector,
6331 				    uint32_t link_index,
6332 				    struct amdgpu_encoder *aencoder)
6333 {
6334 	int res = 0;
6335 	int connector_type;
6336 	struct dc *dc = dm->dc;
6337 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6338 	struct amdgpu_i2c_adapter *i2c;
6339 
6340 	link->priv = aconnector;
6341 
6342 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6343 
6344 	i2c = create_i2c(link->ddc, link->link_index, &res);
6345 	if (!i2c) {
6346 		DRM_ERROR("Failed to create i2c adapter data\n");
6347 		return -ENOMEM;
6348 	}
6349 
6350 	aconnector->i2c = i2c;
6351 	res = i2c_add_adapter(&i2c->base);
6352 
6353 	if (res) {
6354 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6355 		goto out_free;
6356 	}
6357 
6358 	connector_type = to_drm_connector_type(link->connector_signal);
6359 
6360 	res = drm_connector_init_with_ddc(
6361 			dm->ddev,
6362 			&aconnector->base,
6363 			&amdgpu_dm_connector_funcs,
6364 			connector_type,
6365 			&i2c->base);
6366 
6367 	if (res) {
6368 		DRM_ERROR("connector_init failed\n");
6369 		aconnector->connector_id = -1;
6370 		goto out_free;
6371 	}
6372 
6373 	drm_connector_helper_add(
6374 			&aconnector->base,
6375 			&amdgpu_dm_connector_helper_funcs);
6376 
6377 	amdgpu_dm_connector_init_helper(
6378 		dm,
6379 		aconnector,
6380 		connector_type,
6381 		link,
6382 		link_index);
6383 
6384 	drm_connector_attach_encoder(
6385 		&aconnector->base, &aencoder->base);
6386 
6387 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6388 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6389 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6390 
6391 out_free:
6392 	if (res) {
6393 		kfree(i2c);
6394 		aconnector->i2c = NULL;
6395 	}
6396 	return res;
6397 }
6398 
6399 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6400 {
6401 	switch (adev->mode_info.num_crtc) {
6402 	case 1:
6403 		return 0x1;
6404 	case 2:
6405 		return 0x3;
6406 	case 3:
6407 		return 0x7;
6408 	case 4:
6409 		return 0xf;
6410 	case 5:
6411 		return 0x1f;
6412 	case 6:
6413 	default:
6414 		return 0x3f;
6415 	}
6416 }
6417 
6418 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6419 				  struct amdgpu_encoder *aencoder,
6420 				  uint32_t link_index)
6421 {
6422 	struct amdgpu_device *adev = dev->dev_private;
6423 
6424 	int res = drm_encoder_init(dev,
6425 				   &aencoder->base,
6426 				   &amdgpu_dm_encoder_funcs,
6427 				   DRM_MODE_ENCODER_TMDS,
6428 				   NULL);
6429 
6430 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6431 
6432 	if (!res)
6433 		aencoder->encoder_id = link_index;
6434 	else
6435 		aencoder->encoder_id = -1;
6436 
6437 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6438 
6439 	return res;
6440 }
6441 
6442 static void manage_dm_interrupts(struct amdgpu_device *adev,
6443 				 struct amdgpu_crtc *acrtc,
6444 				 bool enable)
6445 {
6446 	/*
6447 	 * We have no guarantee that the frontend index maps to the same
6448 	 * backend index - some even map to more than one.
6449 	 *
6450 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6451 	 */
6452 	int irq_type =
6453 		amdgpu_display_crtc_idx_to_irq_type(
6454 			adev,
6455 			acrtc->crtc_id);
6456 
6457 	if (enable) {
6458 		drm_crtc_vblank_on(&acrtc->base);
6459 		amdgpu_irq_get(
6460 			adev,
6461 			&adev->pageflip_irq,
6462 			irq_type);
6463 	} else {
6464 
6465 		amdgpu_irq_put(
6466 			adev,
6467 			&adev->pageflip_irq,
6468 			irq_type);
6469 		drm_crtc_vblank_off(&acrtc->base);
6470 	}
6471 }
6472 
6473 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6474 				      struct amdgpu_crtc *acrtc)
6475 {
6476 	int irq_type =
6477 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6478 
6479 	/**
6480 	 * This reads the current state for the IRQ and force reapplies
6481 	 * the setting to hardware.
6482 	 */
6483 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6484 }
6485 
6486 static bool
6487 is_scaling_state_different(const struct dm_connector_state *dm_state,
6488 			   const struct dm_connector_state *old_dm_state)
6489 {
6490 	if (dm_state->scaling != old_dm_state->scaling)
6491 		return true;
6492 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6493 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6494 			return true;
6495 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6496 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6497 			return true;
6498 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6499 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6500 		return true;
6501 	return false;
6502 }
6503 
6504 #ifdef CONFIG_DRM_AMD_DC_HDCP
6505 static bool is_content_protection_different(struct drm_connector_state *state,
6506 					    const struct drm_connector_state *old_state,
6507 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6508 {
6509 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6510 
6511 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6512 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6513 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6514 		return true;
6515 	}
6516 
6517 	/* CP is being re enabled, ignore this */
6518 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6519 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6520 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6521 		return false;
6522 	}
6523 
6524 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6525 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6526 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6527 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6528 
6529 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6530 	 * hot-plug, headless s3, dpms
6531 	 */
6532 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6533 	    aconnector->dc_sink != NULL)
6534 		return true;
6535 
6536 	if (old_state->content_protection == state->content_protection)
6537 		return false;
6538 
6539 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6540 		return true;
6541 
6542 	return false;
6543 }
6544 
6545 #endif
6546 static void remove_stream(struct amdgpu_device *adev,
6547 			  struct amdgpu_crtc *acrtc,
6548 			  struct dc_stream_state *stream)
6549 {
6550 	/* this is the update mode case */
6551 
6552 	acrtc->otg_inst = -1;
6553 	acrtc->enabled = false;
6554 }
6555 
6556 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6557 			       struct dc_cursor_position *position)
6558 {
6559 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6560 	int x, y;
6561 	int xorigin = 0, yorigin = 0;
6562 
6563 	position->enable = false;
6564 	position->x = 0;
6565 	position->y = 0;
6566 
6567 	if (!crtc || !plane->state->fb)
6568 		return 0;
6569 
6570 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6571 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6572 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6573 			  __func__,
6574 			  plane->state->crtc_w,
6575 			  plane->state->crtc_h);
6576 		return -EINVAL;
6577 	}
6578 
6579 	x = plane->state->crtc_x;
6580 	y = plane->state->crtc_y;
6581 
6582 	if (x <= -amdgpu_crtc->max_cursor_width ||
6583 	    y <= -amdgpu_crtc->max_cursor_height)
6584 		return 0;
6585 
6586 	if (x < 0) {
6587 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6588 		x = 0;
6589 	}
6590 	if (y < 0) {
6591 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6592 		y = 0;
6593 	}
6594 	position->enable = true;
6595 	position->translate_by_source = true;
6596 	position->x = x;
6597 	position->y = y;
6598 	position->x_hotspot = xorigin;
6599 	position->y_hotspot = yorigin;
6600 
6601 	return 0;
6602 }
6603 
6604 static void handle_cursor_update(struct drm_plane *plane,
6605 				 struct drm_plane_state *old_plane_state)
6606 {
6607 	struct amdgpu_device *adev = plane->dev->dev_private;
6608 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6609 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6610 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6611 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6612 	uint64_t address = afb ? afb->address : 0;
6613 	struct dc_cursor_position position;
6614 	struct dc_cursor_attributes attributes;
6615 	int ret;
6616 
6617 	if (!plane->state->fb && !old_plane_state->fb)
6618 		return;
6619 
6620 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6621 			 __func__,
6622 			 amdgpu_crtc->crtc_id,
6623 			 plane->state->crtc_w,
6624 			 plane->state->crtc_h);
6625 
6626 	ret = get_cursor_position(plane, crtc, &position);
6627 	if (ret)
6628 		return;
6629 
6630 	if (!position.enable) {
6631 		/* turn off cursor */
6632 		if (crtc_state && crtc_state->stream) {
6633 			mutex_lock(&adev->dm.dc_lock);
6634 			dc_stream_set_cursor_position(crtc_state->stream,
6635 						      &position);
6636 			mutex_unlock(&adev->dm.dc_lock);
6637 		}
6638 		return;
6639 	}
6640 
6641 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6642 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6643 
6644 	memset(&attributes, 0, sizeof(attributes));
6645 	attributes.address.high_part = upper_32_bits(address);
6646 	attributes.address.low_part  = lower_32_bits(address);
6647 	attributes.width             = plane->state->crtc_w;
6648 	attributes.height            = plane->state->crtc_h;
6649 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6650 	attributes.rotation_angle    = 0;
6651 	attributes.attribute_flags.value = 0;
6652 
6653 	attributes.pitch = attributes.width;
6654 
6655 	if (crtc_state->stream) {
6656 		mutex_lock(&adev->dm.dc_lock);
6657 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6658 							 &attributes))
6659 			DRM_ERROR("DC failed to set cursor attributes\n");
6660 
6661 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6662 						   &position))
6663 			DRM_ERROR("DC failed to set cursor position\n");
6664 		mutex_unlock(&adev->dm.dc_lock);
6665 	}
6666 }
6667 
6668 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6669 {
6670 
6671 	assert_spin_locked(&acrtc->base.dev->event_lock);
6672 	WARN_ON(acrtc->event);
6673 
6674 	acrtc->event = acrtc->base.state->event;
6675 
6676 	/* Set the flip status */
6677 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6678 
6679 	/* Mark this event as consumed */
6680 	acrtc->base.state->event = NULL;
6681 
6682 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6683 						 acrtc->crtc_id);
6684 }
6685 
6686 static void update_freesync_state_on_stream(
6687 	struct amdgpu_display_manager *dm,
6688 	struct dm_crtc_state *new_crtc_state,
6689 	struct dc_stream_state *new_stream,
6690 	struct dc_plane_state *surface,
6691 	u32 flip_timestamp_in_us)
6692 {
6693 	struct mod_vrr_params vrr_params;
6694 	struct dc_info_packet vrr_infopacket = {0};
6695 	struct amdgpu_device *adev = dm->adev;
6696 	unsigned long flags;
6697 
6698 	if (!new_stream)
6699 		return;
6700 
6701 	/*
6702 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6703 	 * For now it's sufficient to just guard against these conditions.
6704 	 */
6705 
6706 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6707 		return;
6708 
6709 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6710 	vrr_params = new_crtc_state->vrr_params;
6711 
6712 	if (surface) {
6713 		mod_freesync_handle_preflip(
6714 			dm->freesync_module,
6715 			surface,
6716 			new_stream,
6717 			flip_timestamp_in_us,
6718 			&vrr_params);
6719 
6720 		if (adev->family < AMDGPU_FAMILY_AI &&
6721 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6722 			mod_freesync_handle_v_update(dm->freesync_module,
6723 						     new_stream, &vrr_params);
6724 
6725 			/* Need to call this before the frame ends. */
6726 			dc_stream_adjust_vmin_vmax(dm->dc,
6727 						   new_crtc_state->stream,
6728 						   &vrr_params.adjust);
6729 		}
6730 	}
6731 
6732 	mod_freesync_build_vrr_infopacket(
6733 		dm->freesync_module,
6734 		new_stream,
6735 		&vrr_params,
6736 		PACKET_TYPE_VRR,
6737 		TRANSFER_FUNC_UNKNOWN,
6738 		&vrr_infopacket);
6739 
6740 	new_crtc_state->freesync_timing_changed |=
6741 		(memcmp(&new_crtc_state->vrr_params.adjust,
6742 			&vrr_params.adjust,
6743 			sizeof(vrr_params.adjust)) != 0);
6744 
6745 	new_crtc_state->freesync_vrr_info_changed |=
6746 		(memcmp(&new_crtc_state->vrr_infopacket,
6747 			&vrr_infopacket,
6748 			sizeof(vrr_infopacket)) != 0);
6749 
6750 	new_crtc_state->vrr_params = vrr_params;
6751 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6752 
6753 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6754 	new_stream->vrr_infopacket = vrr_infopacket;
6755 
6756 	if (new_crtc_state->freesync_vrr_info_changed)
6757 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6758 			      new_crtc_state->base.crtc->base.id,
6759 			      (int)new_crtc_state->base.vrr_enabled,
6760 			      (int)vrr_params.state);
6761 
6762 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6763 }
6764 
6765 static void pre_update_freesync_state_on_stream(
6766 	struct amdgpu_display_manager *dm,
6767 	struct dm_crtc_state *new_crtc_state)
6768 {
6769 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6770 	struct mod_vrr_params vrr_params;
6771 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6772 	struct amdgpu_device *adev = dm->adev;
6773 	unsigned long flags;
6774 
6775 	if (!new_stream)
6776 		return;
6777 
6778 	/*
6779 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6780 	 * For now it's sufficient to just guard against these conditions.
6781 	 */
6782 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6783 		return;
6784 
6785 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6786 	vrr_params = new_crtc_state->vrr_params;
6787 
6788 	if (new_crtc_state->vrr_supported &&
6789 	    config.min_refresh_in_uhz &&
6790 	    config.max_refresh_in_uhz) {
6791 		config.state = new_crtc_state->base.vrr_enabled ?
6792 			VRR_STATE_ACTIVE_VARIABLE :
6793 			VRR_STATE_INACTIVE;
6794 	} else {
6795 		config.state = VRR_STATE_UNSUPPORTED;
6796 	}
6797 
6798 	mod_freesync_build_vrr_params(dm->freesync_module,
6799 				      new_stream,
6800 				      &config, &vrr_params);
6801 
6802 	new_crtc_state->freesync_timing_changed |=
6803 		(memcmp(&new_crtc_state->vrr_params.adjust,
6804 			&vrr_params.adjust,
6805 			sizeof(vrr_params.adjust)) != 0);
6806 
6807 	new_crtc_state->vrr_params = vrr_params;
6808 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6809 }
6810 
6811 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6812 					    struct dm_crtc_state *new_state)
6813 {
6814 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6815 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6816 
6817 	if (!old_vrr_active && new_vrr_active) {
6818 		/* Transition VRR inactive -> active:
6819 		 * While VRR is active, we must not disable vblank irq, as a
6820 		 * reenable after disable would compute bogus vblank/pflip
6821 		 * timestamps if it likely happened inside display front-porch.
6822 		 *
6823 		 * We also need vupdate irq for the actual core vblank handling
6824 		 * at end of vblank.
6825 		 */
6826 		dm_set_vupdate_irq(new_state->base.crtc, true);
6827 		drm_crtc_vblank_get(new_state->base.crtc);
6828 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6829 				 __func__, new_state->base.crtc->base.id);
6830 	} else if (old_vrr_active && !new_vrr_active) {
6831 		/* Transition VRR active -> inactive:
6832 		 * Allow vblank irq disable again for fixed refresh rate.
6833 		 */
6834 		dm_set_vupdate_irq(new_state->base.crtc, false);
6835 		drm_crtc_vblank_put(new_state->base.crtc);
6836 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6837 				 __func__, new_state->base.crtc->base.id);
6838 	}
6839 }
6840 
6841 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6842 {
6843 	struct drm_plane *plane;
6844 	struct drm_plane_state *old_plane_state, *new_plane_state;
6845 	int i;
6846 
6847 	/*
6848 	 * TODO: Make this per-stream so we don't issue redundant updates for
6849 	 * commits with multiple streams.
6850 	 */
6851 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6852 				       new_plane_state, i)
6853 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6854 			handle_cursor_update(plane, old_plane_state);
6855 }
6856 
6857 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6858 				    struct dc_state *dc_state,
6859 				    struct drm_device *dev,
6860 				    struct amdgpu_display_manager *dm,
6861 				    struct drm_crtc *pcrtc,
6862 				    bool wait_for_vblank)
6863 {
6864 	uint32_t i;
6865 	uint64_t timestamp_ns;
6866 	struct drm_plane *plane;
6867 	struct drm_plane_state *old_plane_state, *new_plane_state;
6868 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6869 	struct drm_crtc_state *new_pcrtc_state =
6870 			drm_atomic_get_new_crtc_state(state, pcrtc);
6871 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6872 	struct dm_crtc_state *dm_old_crtc_state =
6873 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6874 	int planes_count = 0, vpos, hpos;
6875 	long r;
6876 	unsigned long flags;
6877 	struct amdgpu_bo *abo;
6878 	uint64_t tiling_flags;
6879 	bool tmz_surface = false;
6880 	uint32_t target_vblank, last_flip_vblank;
6881 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6882 	bool pflip_present = false;
6883 	struct {
6884 		struct dc_surface_update surface_updates[MAX_SURFACES];
6885 		struct dc_plane_info plane_infos[MAX_SURFACES];
6886 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6887 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6888 		struct dc_stream_update stream_update;
6889 	} *bundle;
6890 
6891 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6892 
6893 	if (!bundle) {
6894 		dm_error("Failed to allocate update bundle\n");
6895 		goto cleanup;
6896 	}
6897 
6898 	/*
6899 	 * Disable the cursor first if we're disabling all the planes.
6900 	 * It'll remain on the screen after the planes are re-enabled
6901 	 * if we don't.
6902 	 */
6903 	if (acrtc_state->active_planes == 0)
6904 		amdgpu_dm_commit_cursors(state);
6905 
6906 	/* update planes when needed */
6907 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6908 		struct drm_crtc *crtc = new_plane_state->crtc;
6909 		struct drm_crtc_state *new_crtc_state;
6910 		struct drm_framebuffer *fb = new_plane_state->fb;
6911 		bool plane_needs_flip;
6912 		struct dc_plane_state *dc_plane;
6913 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6914 
6915 		/* Cursor plane is handled after stream updates */
6916 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6917 			continue;
6918 
6919 		if (!fb || !crtc || pcrtc != crtc)
6920 			continue;
6921 
6922 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6923 		if (!new_crtc_state->active)
6924 			continue;
6925 
6926 		dc_plane = dm_new_plane_state->dc_state;
6927 
6928 		bundle->surface_updates[planes_count].surface = dc_plane;
6929 		if (new_pcrtc_state->color_mgmt_changed) {
6930 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6931 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6932 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6933 		}
6934 
6935 		fill_dc_scaling_info(new_plane_state,
6936 				     &bundle->scaling_infos[planes_count]);
6937 
6938 		bundle->surface_updates[planes_count].scaling_info =
6939 			&bundle->scaling_infos[planes_count];
6940 
6941 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6942 
6943 		pflip_present = pflip_present || plane_needs_flip;
6944 
6945 		if (!plane_needs_flip) {
6946 			planes_count += 1;
6947 			continue;
6948 		}
6949 
6950 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6951 
6952 		/*
6953 		 * Wait for all fences on this FB. Do limited wait to avoid
6954 		 * deadlock during GPU reset when this fence will not signal
6955 		 * but we hold reservation lock for the BO.
6956 		 */
6957 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6958 							false,
6959 							msecs_to_jiffies(5000));
6960 		if (unlikely(r <= 0))
6961 			DRM_ERROR("Waiting for fences timed out!");
6962 
6963 		/*
6964 		 * TODO This might fail and hence better not used, wait
6965 		 * explicitly on fences instead
6966 		 * and in general should be called for
6967 		 * blocking commit to as per framework helpers
6968 		 */
6969 		r = amdgpu_bo_reserve(abo, true);
6970 		if (unlikely(r != 0))
6971 			DRM_ERROR("failed to reserve buffer before flip\n");
6972 
6973 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6974 
6975 		tmz_surface = amdgpu_bo_encrypted(abo);
6976 
6977 		amdgpu_bo_unreserve(abo);
6978 
6979 		fill_dc_plane_info_and_addr(
6980 			dm->adev, new_plane_state, tiling_flags,
6981 			&bundle->plane_infos[planes_count],
6982 			&bundle->flip_addrs[planes_count].address,
6983 			tmz_surface,
6984 			false);
6985 
6986 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6987 				 new_plane_state->plane->index,
6988 				 bundle->plane_infos[planes_count].dcc.enable);
6989 
6990 		bundle->surface_updates[planes_count].plane_info =
6991 			&bundle->plane_infos[planes_count];
6992 
6993 		/*
6994 		 * Only allow immediate flips for fast updates that don't
6995 		 * change FB pitch, DCC state, rotation or mirroing.
6996 		 */
6997 		bundle->flip_addrs[planes_count].flip_immediate =
6998 			crtc->state->async_flip &&
6999 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7000 
7001 		timestamp_ns = ktime_get_ns();
7002 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7003 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7004 		bundle->surface_updates[planes_count].surface = dc_plane;
7005 
7006 		if (!bundle->surface_updates[planes_count].surface) {
7007 			DRM_ERROR("No surface for CRTC: id=%d\n",
7008 					acrtc_attach->crtc_id);
7009 			continue;
7010 		}
7011 
7012 		if (plane == pcrtc->primary)
7013 			update_freesync_state_on_stream(
7014 				dm,
7015 				acrtc_state,
7016 				acrtc_state->stream,
7017 				dc_plane,
7018 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7019 
7020 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7021 				 __func__,
7022 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7023 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7024 
7025 		planes_count += 1;
7026 
7027 	}
7028 
7029 	if (pflip_present) {
7030 		if (!vrr_active) {
7031 			/* Use old throttling in non-vrr fixed refresh rate mode
7032 			 * to keep flip scheduling based on target vblank counts
7033 			 * working in a backwards compatible way, e.g., for
7034 			 * clients using the GLX_OML_sync_control extension or
7035 			 * DRI3/Present extension with defined target_msc.
7036 			 */
7037 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7038 		}
7039 		else {
7040 			/* For variable refresh rate mode only:
7041 			 * Get vblank of last completed flip to avoid > 1 vrr
7042 			 * flips per video frame by use of throttling, but allow
7043 			 * flip programming anywhere in the possibly large
7044 			 * variable vrr vblank interval for fine-grained flip
7045 			 * timing control and more opportunity to avoid stutter
7046 			 * on late submission of flips.
7047 			 */
7048 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7049 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7050 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7051 		}
7052 
7053 		target_vblank = last_flip_vblank + wait_for_vblank;
7054 
7055 		/*
7056 		 * Wait until we're out of the vertical blank period before the one
7057 		 * targeted by the flip
7058 		 */
7059 		while ((acrtc_attach->enabled &&
7060 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7061 							    0, &vpos, &hpos, NULL,
7062 							    NULL, &pcrtc->hwmode)
7063 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7064 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7065 			(int)(target_vblank -
7066 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7067 			usleep_range(1000, 1100);
7068 		}
7069 
7070 		/**
7071 		 * Prepare the flip event for the pageflip interrupt to handle.
7072 		 *
7073 		 * This only works in the case where we've already turned on the
7074 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7075 		 * from 0 -> n planes we have to skip a hardware generated event
7076 		 * and rely on sending it from software.
7077 		 */
7078 		if (acrtc_attach->base.state->event &&
7079 		    acrtc_state->active_planes > 0) {
7080 			drm_crtc_vblank_get(pcrtc);
7081 
7082 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7083 
7084 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7085 			prepare_flip_isr(acrtc_attach);
7086 
7087 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7088 		}
7089 
7090 		if (acrtc_state->stream) {
7091 			if (acrtc_state->freesync_vrr_info_changed)
7092 				bundle->stream_update.vrr_infopacket =
7093 					&acrtc_state->stream->vrr_infopacket;
7094 		}
7095 	}
7096 
7097 	/* Update the planes if changed or disable if we don't have any. */
7098 	if ((planes_count || acrtc_state->active_planes == 0) &&
7099 		acrtc_state->stream) {
7100 		bundle->stream_update.stream = acrtc_state->stream;
7101 		if (new_pcrtc_state->mode_changed) {
7102 			bundle->stream_update.src = acrtc_state->stream->src;
7103 			bundle->stream_update.dst = acrtc_state->stream->dst;
7104 		}
7105 
7106 		if (new_pcrtc_state->color_mgmt_changed) {
7107 			/*
7108 			 * TODO: This isn't fully correct since we've actually
7109 			 * already modified the stream in place.
7110 			 */
7111 			bundle->stream_update.gamut_remap =
7112 				&acrtc_state->stream->gamut_remap_matrix;
7113 			bundle->stream_update.output_csc_transform =
7114 				&acrtc_state->stream->csc_color_matrix;
7115 			bundle->stream_update.out_transfer_func =
7116 				acrtc_state->stream->out_transfer_func;
7117 		}
7118 
7119 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7120 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7121 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7122 
7123 		/*
7124 		 * If FreeSync state on the stream has changed then we need to
7125 		 * re-adjust the min/max bounds now that DC doesn't handle this
7126 		 * as part of commit.
7127 		 */
7128 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7129 		    amdgpu_dm_vrr_active(acrtc_state)) {
7130 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7131 			dc_stream_adjust_vmin_vmax(
7132 				dm->dc, acrtc_state->stream,
7133 				&acrtc_state->vrr_params.adjust);
7134 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7135 		}
7136 		mutex_lock(&dm->dc_lock);
7137 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7138 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7139 			amdgpu_dm_psr_disable(acrtc_state->stream);
7140 
7141 		dc_commit_updates_for_stream(dm->dc,
7142 						     bundle->surface_updates,
7143 						     planes_count,
7144 						     acrtc_state->stream,
7145 						     &bundle->stream_update,
7146 						     dc_state);
7147 
7148 		/**
7149 		 * Enable or disable the interrupts on the backend.
7150 		 *
7151 		 * Most pipes are put into power gating when unused.
7152 		 *
7153 		 * When power gating is enabled on a pipe we lose the
7154 		 * interrupt enablement state when power gating is disabled.
7155 		 *
7156 		 * So we need to update the IRQ control state in hardware
7157 		 * whenever the pipe turns on (since it could be previously
7158 		 * power gated) or off (since some pipes can't be power gated
7159 		 * on some ASICs).
7160 		 */
7161 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7162 			dm_update_pflip_irq_state(
7163 				(struct amdgpu_device *)dev->dev_private,
7164 				acrtc_attach);
7165 
7166 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7167 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7168 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7169 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7170 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7171 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7172 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7173 			amdgpu_dm_psr_enable(acrtc_state->stream);
7174 		}
7175 
7176 		mutex_unlock(&dm->dc_lock);
7177 	}
7178 
7179 	/*
7180 	 * Update cursor state *after* programming all the planes.
7181 	 * This avoids redundant programming in the case where we're going
7182 	 * to be disabling a single plane - those pipes are being disabled.
7183 	 */
7184 	if (acrtc_state->active_planes)
7185 		amdgpu_dm_commit_cursors(state);
7186 
7187 cleanup:
7188 	kfree(bundle);
7189 }
7190 
7191 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7192 				   struct drm_atomic_state *state)
7193 {
7194 	struct amdgpu_device *adev = dev->dev_private;
7195 	struct amdgpu_dm_connector *aconnector;
7196 	struct drm_connector *connector;
7197 	struct drm_connector_state *old_con_state, *new_con_state;
7198 	struct drm_crtc_state *new_crtc_state;
7199 	struct dm_crtc_state *new_dm_crtc_state;
7200 	const struct dc_stream_status *status;
7201 	int i, inst;
7202 
7203 	/* Notify device removals. */
7204 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7205 		if (old_con_state->crtc != new_con_state->crtc) {
7206 			/* CRTC changes require notification. */
7207 			goto notify;
7208 		}
7209 
7210 		if (!new_con_state->crtc)
7211 			continue;
7212 
7213 		new_crtc_state = drm_atomic_get_new_crtc_state(
7214 			state, new_con_state->crtc);
7215 
7216 		if (!new_crtc_state)
7217 			continue;
7218 
7219 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7220 			continue;
7221 
7222 	notify:
7223 		aconnector = to_amdgpu_dm_connector(connector);
7224 
7225 		mutex_lock(&adev->dm.audio_lock);
7226 		inst = aconnector->audio_inst;
7227 		aconnector->audio_inst = -1;
7228 		mutex_unlock(&adev->dm.audio_lock);
7229 
7230 		amdgpu_dm_audio_eld_notify(adev, inst);
7231 	}
7232 
7233 	/* Notify audio device additions. */
7234 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7235 		if (!new_con_state->crtc)
7236 			continue;
7237 
7238 		new_crtc_state = drm_atomic_get_new_crtc_state(
7239 			state, new_con_state->crtc);
7240 
7241 		if (!new_crtc_state)
7242 			continue;
7243 
7244 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7245 			continue;
7246 
7247 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7248 		if (!new_dm_crtc_state->stream)
7249 			continue;
7250 
7251 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7252 		if (!status)
7253 			continue;
7254 
7255 		aconnector = to_amdgpu_dm_connector(connector);
7256 
7257 		mutex_lock(&adev->dm.audio_lock);
7258 		inst = status->audio_inst;
7259 		aconnector->audio_inst = inst;
7260 		mutex_unlock(&adev->dm.audio_lock);
7261 
7262 		amdgpu_dm_audio_eld_notify(adev, inst);
7263 	}
7264 }
7265 
7266 /*
7267  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7268  * @crtc_state: the DRM CRTC state
7269  * @stream_state: the DC stream state.
7270  *
7271  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7272  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7273  */
7274 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7275 						struct dc_stream_state *stream_state)
7276 {
7277 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7278 }
7279 
7280 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7281 				   struct drm_atomic_state *state,
7282 				   bool nonblock)
7283 {
7284 	struct drm_crtc *crtc;
7285 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7286 	struct amdgpu_device *adev = dev->dev_private;
7287 	int i;
7288 
7289 	/*
7290 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7291 	 * a modeset, being disabled, or have no active planes.
7292 	 *
7293 	 * It's done in atomic commit rather than commit tail for now since
7294 	 * some of these interrupt handlers access the current CRTC state and
7295 	 * potentially the stream pointer itself.
7296 	 *
7297 	 * Since the atomic state is swapped within atomic commit and not within
7298 	 * commit tail this would leave to new state (that hasn't been committed yet)
7299 	 * being accesssed from within the handlers.
7300 	 *
7301 	 * TODO: Fix this so we can do this in commit tail and not have to block
7302 	 * in atomic check.
7303 	 */
7304 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7305 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7306 
7307 		if (old_crtc_state->active &&
7308 		    (!new_crtc_state->active ||
7309 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7310 			manage_dm_interrupts(adev, acrtc, false);
7311 	}
7312 	/*
7313 	 * Add check here for SoC's that support hardware cursor plane, to
7314 	 * unset legacy_cursor_update
7315 	 */
7316 
7317 	return drm_atomic_helper_commit(dev, state, nonblock);
7318 
7319 	/*TODO Handle EINTR, reenable IRQ*/
7320 }
7321 
7322 /**
7323  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7324  * @state: The atomic state to commit
7325  *
7326  * This will tell DC to commit the constructed DC state from atomic_check,
7327  * programming the hardware. Any failures here implies a hardware failure, since
7328  * atomic check should have filtered anything non-kosher.
7329  */
7330 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7331 {
7332 	struct drm_device *dev = state->dev;
7333 	struct amdgpu_device *adev = dev->dev_private;
7334 	struct amdgpu_display_manager *dm = &adev->dm;
7335 	struct dm_atomic_state *dm_state;
7336 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7337 	uint32_t i, j;
7338 	struct drm_crtc *crtc;
7339 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7340 	unsigned long flags;
7341 	bool wait_for_vblank = true;
7342 	struct drm_connector *connector;
7343 	struct drm_connector_state *old_con_state, *new_con_state;
7344 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7345 	int crtc_disable_count = 0;
7346 
7347 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7348 
7349 	dm_state = dm_atomic_get_new_state(state);
7350 	if (dm_state && dm_state->context) {
7351 		dc_state = dm_state->context;
7352 	} else {
7353 		/* No state changes, retain current state. */
7354 		dc_state_temp = dc_create_state(dm->dc);
7355 		ASSERT(dc_state_temp);
7356 		dc_state = dc_state_temp;
7357 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7358 	}
7359 
7360 	/* update changed items */
7361 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7362 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7363 
7364 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7365 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7366 
7367 		DRM_DEBUG_DRIVER(
7368 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7369 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7370 			"connectors_changed:%d\n",
7371 			acrtc->crtc_id,
7372 			new_crtc_state->enable,
7373 			new_crtc_state->active,
7374 			new_crtc_state->planes_changed,
7375 			new_crtc_state->mode_changed,
7376 			new_crtc_state->active_changed,
7377 			new_crtc_state->connectors_changed);
7378 
7379 		/* Copy all transient state flags into dc state */
7380 		if (dm_new_crtc_state->stream) {
7381 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7382 							    dm_new_crtc_state->stream);
7383 		}
7384 
7385 		/* handles headless hotplug case, updating new_state and
7386 		 * aconnector as needed
7387 		 */
7388 
7389 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7390 
7391 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7392 
7393 			if (!dm_new_crtc_state->stream) {
7394 				/*
7395 				 * this could happen because of issues with
7396 				 * userspace notifications delivery.
7397 				 * In this case userspace tries to set mode on
7398 				 * display which is disconnected in fact.
7399 				 * dc_sink is NULL in this case on aconnector.
7400 				 * We expect reset mode will come soon.
7401 				 *
7402 				 * This can also happen when unplug is done
7403 				 * during resume sequence ended
7404 				 *
7405 				 * In this case, we want to pretend we still
7406 				 * have a sink to keep the pipe running so that
7407 				 * hw state is consistent with the sw state
7408 				 */
7409 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7410 						__func__, acrtc->base.base.id);
7411 				continue;
7412 			}
7413 
7414 			if (dm_old_crtc_state->stream)
7415 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7416 
7417 			pm_runtime_get_noresume(dev->dev);
7418 
7419 			acrtc->enabled = true;
7420 			acrtc->hw_mode = new_crtc_state->mode;
7421 			crtc->hwmode = new_crtc_state->mode;
7422 		} else if (modereset_required(new_crtc_state)) {
7423 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7424 			/* i.e. reset mode */
7425 			if (dm_old_crtc_state->stream) {
7426 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7427 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7428 
7429 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7430 			}
7431 		}
7432 	} /* for_each_crtc_in_state() */
7433 
7434 	if (dc_state) {
7435 		dm_enable_per_frame_crtc_master_sync(dc_state);
7436 		mutex_lock(&dm->dc_lock);
7437 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7438 		mutex_unlock(&dm->dc_lock);
7439 	}
7440 
7441 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7442 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7443 
7444 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7445 
7446 		if (dm_new_crtc_state->stream != NULL) {
7447 			const struct dc_stream_status *status =
7448 					dc_stream_get_status(dm_new_crtc_state->stream);
7449 
7450 			if (!status)
7451 				status = dc_stream_get_status_from_state(dc_state,
7452 									 dm_new_crtc_state->stream);
7453 
7454 			if (!status)
7455 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7456 			else
7457 				acrtc->otg_inst = status->primary_otg_inst;
7458 		}
7459 	}
7460 #ifdef CONFIG_DRM_AMD_DC_HDCP
7461 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7462 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7463 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7464 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7465 
7466 		new_crtc_state = NULL;
7467 
7468 		if (acrtc)
7469 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7470 
7471 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7472 
7473 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7474 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7475 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7476 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7477 			continue;
7478 		}
7479 
7480 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7481 			hdcp_update_display(
7482 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7483 				new_con_state->hdcp_content_type,
7484 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7485 													 : false);
7486 	}
7487 #endif
7488 
7489 	/* Handle connector state changes */
7490 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7491 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7492 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7493 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7494 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7495 		struct dc_stream_update stream_update;
7496 		struct dc_info_packet hdr_packet;
7497 		struct dc_stream_status *status = NULL;
7498 		bool abm_changed, hdr_changed, scaling_changed;
7499 
7500 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7501 		memset(&stream_update, 0, sizeof(stream_update));
7502 
7503 		if (acrtc) {
7504 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7505 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7506 		}
7507 
7508 		/* Skip any modesets/resets */
7509 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7510 			continue;
7511 
7512 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7513 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7514 
7515 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7516 							     dm_old_con_state);
7517 
7518 		abm_changed = dm_new_crtc_state->abm_level !=
7519 			      dm_old_crtc_state->abm_level;
7520 
7521 		hdr_changed =
7522 			is_hdr_metadata_different(old_con_state, new_con_state);
7523 
7524 		if (!scaling_changed && !abm_changed && !hdr_changed)
7525 			continue;
7526 
7527 		stream_update.stream = dm_new_crtc_state->stream;
7528 		if (scaling_changed) {
7529 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7530 					dm_new_con_state, dm_new_crtc_state->stream);
7531 
7532 			stream_update.src = dm_new_crtc_state->stream->src;
7533 			stream_update.dst = dm_new_crtc_state->stream->dst;
7534 		}
7535 
7536 		if (abm_changed) {
7537 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7538 
7539 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7540 		}
7541 
7542 		if (hdr_changed) {
7543 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7544 			stream_update.hdr_static_metadata = &hdr_packet;
7545 		}
7546 
7547 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7548 		WARN_ON(!status);
7549 		WARN_ON(!status->plane_count);
7550 
7551 		/*
7552 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7553 		 * Here we create an empty update on each plane.
7554 		 * To fix this, DC should permit updating only stream properties.
7555 		 */
7556 		for (j = 0; j < status->plane_count; j++)
7557 			dummy_updates[j].surface = status->plane_states[0];
7558 
7559 
7560 		mutex_lock(&dm->dc_lock);
7561 		dc_commit_updates_for_stream(dm->dc,
7562 						     dummy_updates,
7563 						     status->plane_count,
7564 						     dm_new_crtc_state->stream,
7565 						     &stream_update,
7566 						     dc_state);
7567 		mutex_unlock(&dm->dc_lock);
7568 	}
7569 
7570 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7571 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7572 				      new_crtc_state, i) {
7573 		if (old_crtc_state->active && !new_crtc_state->active)
7574 			crtc_disable_count++;
7575 
7576 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7577 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7578 
7579 		/* Update freesync active state. */
7580 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7581 
7582 		/* Handle vrr on->off / off->on transitions */
7583 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7584 						dm_new_crtc_state);
7585 	}
7586 
7587 	/**
7588 	 * Enable interrupts for CRTCs that are newly enabled or went through
7589 	 * a modeset. It was intentionally deferred until after the front end
7590 	 * state was modified to wait until the OTG was on and so the IRQ
7591 	 * handlers didn't access stale or invalid state.
7592 	 */
7593 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7594 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7595 
7596 		if (new_crtc_state->active &&
7597 		    (!old_crtc_state->active ||
7598 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7599 			manage_dm_interrupts(adev, acrtc, true);
7600 #ifdef CONFIG_DEBUG_FS
7601 			/**
7602 			 * Frontend may have changed so reapply the CRC capture
7603 			 * settings for the stream.
7604 			 */
7605 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7606 
7607 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7608 				amdgpu_dm_crtc_configure_crc_source(
7609 					crtc, dm_new_crtc_state,
7610 					dm_new_crtc_state->crc_src);
7611 			}
7612 #endif
7613 		}
7614 	}
7615 
7616 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7617 		if (new_crtc_state->async_flip)
7618 			wait_for_vblank = false;
7619 
7620 	/* update planes when needed per crtc*/
7621 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7622 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7623 
7624 		if (dm_new_crtc_state->stream)
7625 			amdgpu_dm_commit_planes(state, dc_state, dev,
7626 						dm, crtc, wait_for_vblank);
7627 	}
7628 
7629 	/* Update audio instances for each connector. */
7630 	amdgpu_dm_commit_audio(dev, state);
7631 
7632 	/*
7633 	 * send vblank event on all events not handled in flip and
7634 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7635 	 */
7636 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7637 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7638 
7639 		if (new_crtc_state->event)
7640 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7641 
7642 		new_crtc_state->event = NULL;
7643 	}
7644 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7645 
7646 	/* Signal HW programming completion */
7647 	drm_atomic_helper_commit_hw_done(state);
7648 
7649 	if (wait_for_vblank)
7650 		drm_atomic_helper_wait_for_flip_done(dev, state);
7651 
7652 	drm_atomic_helper_cleanup_planes(dev, state);
7653 
7654 	/*
7655 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7656 	 * so we can put the GPU into runtime suspend if we're not driving any
7657 	 * displays anymore
7658 	 */
7659 	for (i = 0; i < crtc_disable_count; i++)
7660 		pm_runtime_put_autosuspend(dev->dev);
7661 	pm_runtime_mark_last_busy(dev->dev);
7662 
7663 	if (dc_state_temp)
7664 		dc_release_state(dc_state_temp);
7665 }
7666 
7667 
7668 static int dm_force_atomic_commit(struct drm_connector *connector)
7669 {
7670 	int ret = 0;
7671 	struct drm_device *ddev = connector->dev;
7672 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7673 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7674 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7675 	struct drm_connector_state *conn_state;
7676 	struct drm_crtc_state *crtc_state;
7677 	struct drm_plane_state *plane_state;
7678 
7679 	if (!state)
7680 		return -ENOMEM;
7681 
7682 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7683 
7684 	/* Construct an atomic state to restore previous display setting */
7685 
7686 	/*
7687 	 * Attach connectors to drm_atomic_state
7688 	 */
7689 	conn_state = drm_atomic_get_connector_state(state, connector);
7690 
7691 	ret = PTR_ERR_OR_ZERO(conn_state);
7692 	if (ret)
7693 		goto err;
7694 
7695 	/* Attach crtc to drm_atomic_state*/
7696 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7697 
7698 	ret = PTR_ERR_OR_ZERO(crtc_state);
7699 	if (ret)
7700 		goto err;
7701 
7702 	/* force a restore */
7703 	crtc_state->mode_changed = true;
7704 
7705 	/* Attach plane to drm_atomic_state */
7706 	plane_state = drm_atomic_get_plane_state(state, plane);
7707 
7708 	ret = PTR_ERR_OR_ZERO(plane_state);
7709 	if (ret)
7710 		goto err;
7711 
7712 
7713 	/* Call commit internally with the state we just constructed */
7714 	ret = drm_atomic_commit(state);
7715 	if (!ret)
7716 		return 0;
7717 
7718 err:
7719 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7720 	drm_atomic_state_put(state);
7721 
7722 	return ret;
7723 }
7724 
7725 /*
7726  * This function handles all cases when set mode does not come upon hotplug.
7727  * This includes when a display is unplugged then plugged back into the
7728  * same port and when running without usermode desktop manager supprot
7729  */
7730 void dm_restore_drm_connector_state(struct drm_device *dev,
7731 				    struct drm_connector *connector)
7732 {
7733 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7734 	struct amdgpu_crtc *disconnected_acrtc;
7735 	struct dm_crtc_state *acrtc_state;
7736 
7737 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7738 		return;
7739 
7740 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7741 	if (!disconnected_acrtc)
7742 		return;
7743 
7744 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7745 	if (!acrtc_state->stream)
7746 		return;
7747 
7748 	/*
7749 	 * If the previous sink is not released and different from the current,
7750 	 * we deduce we are in a state where we can not rely on usermode call
7751 	 * to turn on the display, so we do it here
7752 	 */
7753 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7754 		dm_force_atomic_commit(&aconnector->base);
7755 }
7756 
7757 /*
7758  * Grabs all modesetting locks to serialize against any blocking commits,
7759  * Waits for completion of all non blocking commits.
7760  */
7761 static int do_aquire_global_lock(struct drm_device *dev,
7762 				 struct drm_atomic_state *state)
7763 {
7764 	struct drm_crtc *crtc;
7765 	struct drm_crtc_commit *commit;
7766 	long ret;
7767 
7768 	/*
7769 	 * Adding all modeset locks to aquire_ctx will
7770 	 * ensure that when the framework release it the
7771 	 * extra locks we are locking here will get released to
7772 	 */
7773 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7774 	if (ret)
7775 		return ret;
7776 
7777 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7778 		spin_lock(&crtc->commit_lock);
7779 		commit = list_first_entry_or_null(&crtc->commit_list,
7780 				struct drm_crtc_commit, commit_entry);
7781 		if (commit)
7782 			drm_crtc_commit_get(commit);
7783 		spin_unlock(&crtc->commit_lock);
7784 
7785 		if (!commit)
7786 			continue;
7787 
7788 		/*
7789 		 * Make sure all pending HW programming completed and
7790 		 * page flips done
7791 		 */
7792 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7793 
7794 		if (ret > 0)
7795 			ret = wait_for_completion_interruptible_timeout(
7796 					&commit->flip_done, 10*HZ);
7797 
7798 		if (ret == 0)
7799 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7800 				  "timed out\n", crtc->base.id, crtc->name);
7801 
7802 		drm_crtc_commit_put(commit);
7803 	}
7804 
7805 	return ret < 0 ? ret : 0;
7806 }
7807 
7808 static void get_freesync_config_for_crtc(
7809 	struct dm_crtc_state *new_crtc_state,
7810 	struct dm_connector_state *new_con_state)
7811 {
7812 	struct mod_freesync_config config = {0};
7813 	struct amdgpu_dm_connector *aconnector =
7814 			to_amdgpu_dm_connector(new_con_state->base.connector);
7815 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7816 	int vrefresh = drm_mode_vrefresh(mode);
7817 
7818 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7819 					vrefresh >= aconnector->min_vfreq &&
7820 					vrefresh <= aconnector->max_vfreq;
7821 
7822 	if (new_crtc_state->vrr_supported) {
7823 		new_crtc_state->stream->ignore_msa_timing_param = true;
7824 		config.state = new_crtc_state->base.vrr_enabled ?
7825 				VRR_STATE_ACTIVE_VARIABLE :
7826 				VRR_STATE_INACTIVE;
7827 		config.min_refresh_in_uhz =
7828 				aconnector->min_vfreq * 1000000;
7829 		config.max_refresh_in_uhz =
7830 				aconnector->max_vfreq * 1000000;
7831 		config.vsif_supported = true;
7832 		config.btr = true;
7833 	}
7834 
7835 	new_crtc_state->freesync_config = config;
7836 }
7837 
7838 static void reset_freesync_config_for_crtc(
7839 	struct dm_crtc_state *new_crtc_state)
7840 {
7841 	new_crtc_state->vrr_supported = false;
7842 
7843 	memset(&new_crtc_state->vrr_params, 0,
7844 	       sizeof(new_crtc_state->vrr_params));
7845 	memset(&new_crtc_state->vrr_infopacket, 0,
7846 	       sizeof(new_crtc_state->vrr_infopacket));
7847 }
7848 
7849 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7850 				struct drm_atomic_state *state,
7851 				struct drm_crtc *crtc,
7852 				struct drm_crtc_state *old_crtc_state,
7853 				struct drm_crtc_state *new_crtc_state,
7854 				bool enable,
7855 				bool *lock_and_validation_needed)
7856 {
7857 	struct dm_atomic_state *dm_state = NULL;
7858 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7859 	struct dc_stream_state *new_stream;
7860 	int ret = 0;
7861 
7862 	/*
7863 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7864 	 * update changed items
7865 	 */
7866 	struct amdgpu_crtc *acrtc = NULL;
7867 	struct amdgpu_dm_connector *aconnector = NULL;
7868 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7869 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7870 
7871 	new_stream = NULL;
7872 
7873 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7874 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7875 	acrtc = to_amdgpu_crtc(crtc);
7876 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7877 
7878 	/* TODO This hack should go away */
7879 	if (aconnector && enable) {
7880 		/* Make sure fake sink is created in plug-in scenario */
7881 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7882 							    &aconnector->base);
7883 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7884 							    &aconnector->base);
7885 
7886 		if (IS_ERR(drm_new_conn_state)) {
7887 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7888 			goto fail;
7889 		}
7890 
7891 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7892 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7893 
7894 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7895 			goto skip_modeset;
7896 
7897 		new_stream = create_validate_stream_for_sink(aconnector,
7898 							     &new_crtc_state->mode,
7899 							     dm_new_conn_state,
7900 							     dm_old_crtc_state->stream);
7901 
7902 		/*
7903 		 * we can have no stream on ACTION_SET if a display
7904 		 * was disconnected during S3, in this case it is not an
7905 		 * error, the OS will be updated after detection, and
7906 		 * will do the right thing on next atomic commit
7907 		 */
7908 
7909 		if (!new_stream) {
7910 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7911 					__func__, acrtc->base.base.id);
7912 			ret = -ENOMEM;
7913 			goto fail;
7914 		}
7915 
7916 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7917 
7918 		ret = fill_hdr_info_packet(drm_new_conn_state,
7919 					   &new_stream->hdr_static_metadata);
7920 		if (ret)
7921 			goto fail;
7922 
7923 		/*
7924 		 * If we already removed the old stream from the context
7925 		 * (and set the new stream to NULL) then we can't reuse
7926 		 * the old stream even if the stream and scaling are unchanged.
7927 		 * We'll hit the BUG_ON and black screen.
7928 		 *
7929 		 * TODO: Refactor this function to allow this check to work
7930 		 * in all conditions.
7931 		 */
7932 		if (dm_new_crtc_state->stream &&
7933 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7934 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7935 			new_crtc_state->mode_changed = false;
7936 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7937 					 new_crtc_state->mode_changed);
7938 		}
7939 	}
7940 
7941 	/* mode_changed flag may get updated above, need to check again */
7942 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7943 		goto skip_modeset;
7944 
7945 	DRM_DEBUG_DRIVER(
7946 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7947 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7948 		"connectors_changed:%d\n",
7949 		acrtc->crtc_id,
7950 		new_crtc_state->enable,
7951 		new_crtc_state->active,
7952 		new_crtc_state->planes_changed,
7953 		new_crtc_state->mode_changed,
7954 		new_crtc_state->active_changed,
7955 		new_crtc_state->connectors_changed);
7956 
7957 	/* Remove stream for any changed/disabled CRTC */
7958 	if (!enable) {
7959 
7960 		if (!dm_old_crtc_state->stream)
7961 			goto skip_modeset;
7962 
7963 		ret = dm_atomic_get_state(state, &dm_state);
7964 		if (ret)
7965 			goto fail;
7966 
7967 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7968 				crtc->base.id);
7969 
7970 		/* i.e. reset mode */
7971 		if (dc_remove_stream_from_ctx(
7972 				dm->dc,
7973 				dm_state->context,
7974 				dm_old_crtc_state->stream) != DC_OK) {
7975 			ret = -EINVAL;
7976 			goto fail;
7977 		}
7978 
7979 		dc_stream_release(dm_old_crtc_state->stream);
7980 		dm_new_crtc_state->stream = NULL;
7981 
7982 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7983 
7984 		*lock_and_validation_needed = true;
7985 
7986 	} else {/* Add stream for any updated/enabled CRTC */
7987 		/*
7988 		 * Quick fix to prevent NULL pointer on new_stream when
7989 		 * added MST connectors not found in existing crtc_state in the chained mode
7990 		 * TODO: need to dig out the root cause of that
7991 		 */
7992 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7993 			goto skip_modeset;
7994 
7995 		if (modereset_required(new_crtc_state))
7996 			goto skip_modeset;
7997 
7998 		if (modeset_required(new_crtc_state, new_stream,
7999 				     dm_old_crtc_state->stream)) {
8000 
8001 			WARN_ON(dm_new_crtc_state->stream);
8002 
8003 			ret = dm_atomic_get_state(state, &dm_state);
8004 			if (ret)
8005 				goto fail;
8006 
8007 			dm_new_crtc_state->stream = new_stream;
8008 
8009 			dc_stream_retain(new_stream);
8010 
8011 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8012 						crtc->base.id);
8013 
8014 			if (dc_add_stream_to_ctx(
8015 					dm->dc,
8016 					dm_state->context,
8017 					dm_new_crtc_state->stream) != DC_OK) {
8018 				ret = -EINVAL;
8019 				goto fail;
8020 			}
8021 
8022 			*lock_and_validation_needed = true;
8023 		}
8024 	}
8025 
8026 skip_modeset:
8027 	/* Release extra reference */
8028 	if (new_stream)
8029 		 dc_stream_release(new_stream);
8030 
8031 	/*
8032 	 * We want to do dc stream updates that do not require a
8033 	 * full modeset below.
8034 	 */
8035 	if (!(enable && aconnector && new_crtc_state->enable &&
8036 	      new_crtc_state->active))
8037 		return 0;
8038 	/*
8039 	 * Given above conditions, the dc state cannot be NULL because:
8040 	 * 1. We're in the process of enabling CRTCs (just been added
8041 	 *    to the dc context, or already is on the context)
8042 	 * 2. Has a valid connector attached, and
8043 	 * 3. Is currently active and enabled.
8044 	 * => The dc stream state currently exists.
8045 	 */
8046 	BUG_ON(dm_new_crtc_state->stream == NULL);
8047 
8048 	/* Scaling or underscan settings */
8049 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8050 		update_stream_scaling_settings(
8051 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8052 
8053 	/* ABM settings */
8054 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8055 
8056 	/*
8057 	 * Color management settings. We also update color properties
8058 	 * when a modeset is needed, to ensure it gets reprogrammed.
8059 	 */
8060 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8061 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8062 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8063 		if (ret)
8064 			goto fail;
8065 	}
8066 
8067 	/* Update Freesync settings. */
8068 	get_freesync_config_for_crtc(dm_new_crtc_state,
8069 				     dm_new_conn_state);
8070 
8071 	return ret;
8072 
8073 fail:
8074 	if (new_stream)
8075 		dc_stream_release(new_stream);
8076 	return ret;
8077 }
8078 
8079 static bool should_reset_plane(struct drm_atomic_state *state,
8080 			       struct drm_plane *plane,
8081 			       struct drm_plane_state *old_plane_state,
8082 			       struct drm_plane_state *new_plane_state)
8083 {
8084 	struct drm_plane *other;
8085 	struct drm_plane_state *old_other_state, *new_other_state;
8086 	struct drm_crtc_state *new_crtc_state;
8087 	int i;
8088 
8089 	/*
8090 	 * TODO: Remove this hack once the checks below are sufficient
8091 	 * enough to determine when we need to reset all the planes on
8092 	 * the stream.
8093 	 */
8094 	if (state->allow_modeset)
8095 		return true;
8096 
8097 	/* Exit early if we know that we're adding or removing the plane. */
8098 	if (old_plane_state->crtc != new_plane_state->crtc)
8099 		return true;
8100 
8101 	/* old crtc == new_crtc == NULL, plane not in context. */
8102 	if (!new_plane_state->crtc)
8103 		return false;
8104 
8105 	new_crtc_state =
8106 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8107 
8108 	if (!new_crtc_state)
8109 		return true;
8110 
8111 	/* CRTC Degamma changes currently require us to recreate planes. */
8112 	if (new_crtc_state->color_mgmt_changed)
8113 		return true;
8114 
8115 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8116 		return true;
8117 
8118 	/*
8119 	 * If there are any new primary or overlay planes being added or
8120 	 * removed then the z-order can potentially change. To ensure
8121 	 * correct z-order and pipe acquisition the current DC architecture
8122 	 * requires us to remove and recreate all existing planes.
8123 	 *
8124 	 * TODO: Come up with a more elegant solution for this.
8125 	 */
8126 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8127 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8128 			continue;
8129 
8130 		if (old_other_state->crtc != new_plane_state->crtc &&
8131 		    new_other_state->crtc != new_plane_state->crtc)
8132 			continue;
8133 
8134 		if (old_other_state->crtc != new_other_state->crtc)
8135 			return true;
8136 
8137 		/* TODO: Remove this once we can handle fast format changes. */
8138 		if (old_other_state->fb && new_other_state->fb &&
8139 		    old_other_state->fb->format != new_other_state->fb->format)
8140 			return true;
8141 	}
8142 
8143 	return false;
8144 }
8145 
8146 static int dm_update_plane_state(struct dc *dc,
8147 				 struct drm_atomic_state *state,
8148 				 struct drm_plane *plane,
8149 				 struct drm_plane_state *old_plane_state,
8150 				 struct drm_plane_state *new_plane_state,
8151 				 bool enable,
8152 				 bool *lock_and_validation_needed)
8153 {
8154 
8155 	struct dm_atomic_state *dm_state = NULL;
8156 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8157 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8158 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8159 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8160 	struct amdgpu_crtc *new_acrtc;
8161 	bool needs_reset;
8162 	int ret = 0;
8163 
8164 
8165 	new_plane_crtc = new_plane_state->crtc;
8166 	old_plane_crtc = old_plane_state->crtc;
8167 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8168 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8169 
8170 	/*TODO Implement better atomic check for cursor plane */
8171 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8172 		if (!enable || !new_plane_crtc ||
8173 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8174 			return 0;
8175 
8176 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8177 
8178 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8179 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8180 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8181 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8182 			return -EINVAL;
8183 		}
8184 
8185 		return 0;
8186 	}
8187 
8188 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8189 					 new_plane_state);
8190 
8191 	/* Remove any changed/removed planes */
8192 	if (!enable) {
8193 		if (!needs_reset)
8194 			return 0;
8195 
8196 		if (!old_plane_crtc)
8197 			return 0;
8198 
8199 		old_crtc_state = drm_atomic_get_old_crtc_state(
8200 				state, old_plane_crtc);
8201 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8202 
8203 		if (!dm_old_crtc_state->stream)
8204 			return 0;
8205 
8206 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8207 				plane->base.id, old_plane_crtc->base.id);
8208 
8209 		ret = dm_atomic_get_state(state, &dm_state);
8210 		if (ret)
8211 			return ret;
8212 
8213 		if (!dc_remove_plane_from_context(
8214 				dc,
8215 				dm_old_crtc_state->stream,
8216 				dm_old_plane_state->dc_state,
8217 				dm_state->context)) {
8218 
8219 			ret = EINVAL;
8220 			return ret;
8221 		}
8222 
8223 
8224 		dc_plane_state_release(dm_old_plane_state->dc_state);
8225 		dm_new_plane_state->dc_state = NULL;
8226 
8227 		*lock_and_validation_needed = true;
8228 
8229 	} else { /* Add new planes */
8230 		struct dc_plane_state *dc_new_plane_state;
8231 
8232 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8233 			return 0;
8234 
8235 		if (!new_plane_crtc)
8236 			return 0;
8237 
8238 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8239 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8240 
8241 		if (!dm_new_crtc_state->stream)
8242 			return 0;
8243 
8244 		if (!needs_reset)
8245 			return 0;
8246 
8247 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8248 		if (ret)
8249 			return ret;
8250 
8251 		WARN_ON(dm_new_plane_state->dc_state);
8252 
8253 		dc_new_plane_state = dc_create_plane_state(dc);
8254 		if (!dc_new_plane_state)
8255 			return -ENOMEM;
8256 
8257 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8258 				plane->base.id, new_plane_crtc->base.id);
8259 
8260 		ret = fill_dc_plane_attributes(
8261 			new_plane_crtc->dev->dev_private,
8262 			dc_new_plane_state,
8263 			new_plane_state,
8264 			new_crtc_state);
8265 		if (ret) {
8266 			dc_plane_state_release(dc_new_plane_state);
8267 			return ret;
8268 		}
8269 
8270 		ret = dm_atomic_get_state(state, &dm_state);
8271 		if (ret) {
8272 			dc_plane_state_release(dc_new_plane_state);
8273 			return ret;
8274 		}
8275 
8276 		/*
8277 		 * Any atomic check errors that occur after this will
8278 		 * not need a release. The plane state will be attached
8279 		 * to the stream, and therefore part of the atomic
8280 		 * state. It'll be released when the atomic state is
8281 		 * cleaned.
8282 		 */
8283 		if (!dc_add_plane_to_context(
8284 				dc,
8285 				dm_new_crtc_state->stream,
8286 				dc_new_plane_state,
8287 				dm_state->context)) {
8288 
8289 			dc_plane_state_release(dc_new_plane_state);
8290 			return -EINVAL;
8291 		}
8292 
8293 		dm_new_plane_state->dc_state = dc_new_plane_state;
8294 
8295 		/* Tell DC to do a full surface update every time there
8296 		 * is a plane change. Inefficient, but works for now.
8297 		 */
8298 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8299 
8300 		*lock_and_validation_needed = true;
8301 	}
8302 
8303 
8304 	return ret;
8305 }
8306 
8307 static int
8308 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8309 				    struct drm_atomic_state *state,
8310 				    enum surface_update_type *out_type)
8311 {
8312 	struct dc *dc = dm->dc;
8313 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8314 	int i, j, num_plane, ret = 0;
8315 	struct drm_plane_state *old_plane_state, *new_plane_state;
8316 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8317 	struct drm_crtc *new_plane_crtc;
8318 	struct drm_plane *plane;
8319 
8320 	struct drm_crtc *crtc;
8321 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8322 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8323 	struct dc_stream_status *status = NULL;
8324 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8325 	struct surface_info_bundle {
8326 		struct dc_surface_update surface_updates[MAX_SURFACES];
8327 		struct dc_plane_info plane_infos[MAX_SURFACES];
8328 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8329 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8330 		struct dc_stream_update stream_update;
8331 	} *bundle;
8332 
8333 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8334 
8335 	if (!bundle) {
8336 		DRM_ERROR("Failed to allocate update bundle\n");
8337 		/* Set type to FULL to avoid crashing in DC*/
8338 		update_type = UPDATE_TYPE_FULL;
8339 		goto cleanup;
8340 	}
8341 
8342 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8343 
8344 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8345 
8346 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8347 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8348 		num_plane = 0;
8349 
8350 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8351 			update_type = UPDATE_TYPE_FULL;
8352 			goto cleanup;
8353 		}
8354 
8355 		if (!new_dm_crtc_state->stream)
8356 			continue;
8357 
8358 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8359 			const struct amdgpu_framebuffer *amdgpu_fb =
8360 				to_amdgpu_framebuffer(new_plane_state->fb);
8361 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8362 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8363 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8364 			uint64_t tiling_flags;
8365 			bool tmz_surface = false;
8366 
8367 			new_plane_crtc = new_plane_state->crtc;
8368 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8369 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8370 
8371 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8372 				continue;
8373 
8374 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8375 				update_type = UPDATE_TYPE_FULL;
8376 				goto cleanup;
8377 			}
8378 
8379 			if (crtc != new_plane_crtc)
8380 				continue;
8381 
8382 			bundle->surface_updates[num_plane].surface =
8383 					new_dm_plane_state->dc_state;
8384 
8385 			if (new_crtc_state->mode_changed) {
8386 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8387 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8388 			}
8389 
8390 			if (new_crtc_state->color_mgmt_changed) {
8391 				bundle->surface_updates[num_plane].gamma =
8392 						new_dm_plane_state->dc_state->gamma_correction;
8393 				bundle->surface_updates[num_plane].in_transfer_func =
8394 						new_dm_plane_state->dc_state->in_transfer_func;
8395 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8396 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8397 				bundle->stream_update.gamut_remap =
8398 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8399 				bundle->stream_update.output_csc_transform =
8400 						&new_dm_crtc_state->stream->csc_color_matrix;
8401 				bundle->stream_update.out_transfer_func =
8402 						new_dm_crtc_state->stream->out_transfer_func;
8403 			}
8404 
8405 			ret = fill_dc_scaling_info(new_plane_state,
8406 						   scaling_info);
8407 			if (ret)
8408 				goto cleanup;
8409 
8410 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8411 
8412 			if (amdgpu_fb) {
8413 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8414 				if (ret)
8415 					goto cleanup;
8416 
8417 				ret = fill_dc_plane_info_and_addr(
8418 					dm->adev, new_plane_state, tiling_flags,
8419 					plane_info,
8420 					&flip_addr->address, tmz_surface,
8421 					false);
8422 				if (ret)
8423 					goto cleanup;
8424 
8425 				bundle->surface_updates[num_plane].plane_info = plane_info;
8426 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8427 			}
8428 
8429 			num_plane++;
8430 		}
8431 
8432 		if (num_plane == 0)
8433 			continue;
8434 
8435 		ret = dm_atomic_get_state(state, &dm_state);
8436 		if (ret)
8437 			goto cleanup;
8438 
8439 		old_dm_state = dm_atomic_get_old_state(state);
8440 		if (!old_dm_state) {
8441 			ret = -EINVAL;
8442 			goto cleanup;
8443 		}
8444 
8445 		status = dc_stream_get_status_from_state(old_dm_state->context,
8446 							 new_dm_crtc_state->stream);
8447 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8448 		/*
8449 		 * TODO: DC modifies the surface during this call so we need
8450 		 * to lock here - find a way to do this without locking.
8451 		 */
8452 		mutex_lock(&dm->dc_lock);
8453 		update_type = dc_check_update_surfaces_for_stream(
8454 				dc,	bundle->surface_updates, num_plane,
8455 				&bundle->stream_update, status);
8456 		mutex_unlock(&dm->dc_lock);
8457 
8458 		if (update_type > UPDATE_TYPE_MED) {
8459 			update_type = UPDATE_TYPE_FULL;
8460 			goto cleanup;
8461 		}
8462 	}
8463 
8464 cleanup:
8465 	kfree(bundle);
8466 
8467 	*out_type = update_type;
8468 	return ret;
8469 }
8470 #if defined(CONFIG_DRM_AMD_DC_DCN)
8471 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8472 {
8473 	struct drm_connector *connector;
8474 	struct drm_connector_state *conn_state;
8475 	struct amdgpu_dm_connector *aconnector = NULL;
8476 	int i;
8477 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8478 		if (conn_state->crtc != crtc)
8479 			continue;
8480 
8481 		aconnector = to_amdgpu_dm_connector(connector);
8482 		if (!aconnector->port || !aconnector->mst_port)
8483 			aconnector = NULL;
8484 		else
8485 			break;
8486 	}
8487 
8488 	if (!aconnector)
8489 		return 0;
8490 
8491 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8492 }
8493 #endif
8494 
8495 /**
8496  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8497  * @dev: The DRM device
8498  * @state: The atomic state to commit
8499  *
8500  * Validate that the given atomic state is programmable by DC into hardware.
8501  * This involves constructing a &struct dc_state reflecting the new hardware
8502  * state we wish to commit, then querying DC to see if it is programmable. It's
8503  * important not to modify the existing DC state. Otherwise, atomic_check
8504  * may unexpectedly commit hardware changes.
8505  *
8506  * When validating the DC state, it's important that the right locks are
8507  * acquired. For full updates case which removes/adds/updates streams on one
8508  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8509  * that any such full update commit will wait for completion of any outstanding
8510  * flip using DRMs synchronization events. See
8511  * dm_determine_update_type_for_commit()
8512  *
8513  * Note that DM adds the affected connectors for all CRTCs in state, when that
8514  * might not seem necessary. This is because DC stream creation requires the
8515  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8516  * be possible but non-trivial - a possible TODO item.
8517  *
8518  * Return: -Error code if validation failed.
8519  */
8520 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8521 				  struct drm_atomic_state *state)
8522 {
8523 	struct amdgpu_device *adev = dev->dev_private;
8524 	struct dm_atomic_state *dm_state = NULL;
8525 	struct dc *dc = adev->dm.dc;
8526 	struct drm_connector *connector;
8527 	struct drm_connector_state *old_con_state, *new_con_state;
8528 	struct drm_crtc *crtc;
8529 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8530 	struct drm_plane *plane;
8531 	struct drm_plane_state *old_plane_state, *new_plane_state;
8532 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8533 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8534 	enum dc_status status;
8535 	int ret, i;
8536 
8537 	/*
8538 	 * This bool will be set for true for any modeset/reset
8539 	 * or plane update which implies non fast surface update.
8540 	 */
8541 	bool lock_and_validation_needed = false;
8542 
8543 	ret = drm_atomic_helper_check_modeset(dev, state);
8544 	if (ret)
8545 		goto fail;
8546 
8547 #if defined(CONFIG_DRM_AMD_DC_DCN)
8548 	if (adev->asic_type >= CHIP_NAVI10) {
8549 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8550 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8551 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8552 				if (ret)
8553 					goto fail;
8554 			}
8555 		}
8556 	}
8557 #endif
8558 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8559 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8560 		    !new_crtc_state->color_mgmt_changed &&
8561 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8562 			continue;
8563 
8564 		if (!new_crtc_state->enable)
8565 			continue;
8566 
8567 		ret = drm_atomic_add_affected_connectors(state, crtc);
8568 		if (ret)
8569 			return ret;
8570 
8571 		ret = drm_atomic_add_affected_planes(state, crtc);
8572 		if (ret)
8573 			goto fail;
8574 	}
8575 
8576 	/*
8577 	 * Add all primary and overlay planes on the CRTC to the state
8578 	 * whenever a plane is enabled to maintain correct z-ordering
8579 	 * and to enable fast surface updates.
8580 	 */
8581 	drm_for_each_crtc(crtc, dev) {
8582 		bool modified = false;
8583 
8584 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8585 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8586 				continue;
8587 
8588 			if (new_plane_state->crtc == crtc ||
8589 			    old_plane_state->crtc == crtc) {
8590 				modified = true;
8591 				break;
8592 			}
8593 		}
8594 
8595 		if (!modified)
8596 			continue;
8597 
8598 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8599 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8600 				continue;
8601 
8602 			new_plane_state =
8603 				drm_atomic_get_plane_state(state, plane);
8604 
8605 			if (IS_ERR(new_plane_state)) {
8606 				ret = PTR_ERR(new_plane_state);
8607 				goto fail;
8608 			}
8609 		}
8610 	}
8611 
8612 	/* Remove exiting planes if they are modified */
8613 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8614 		ret = dm_update_plane_state(dc, state, plane,
8615 					    old_plane_state,
8616 					    new_plane_state,
8617 					    false,
8618 					    &lock_and_validation_needed);
8619 		if (ret)
8620 			goto fail;
8621 	}
8622 
8623 	/* Disable all crtcs which require disable */
8624 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8625 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8626 					   old_crtc_state,
8627 					   new_crtc_state,
8628 					   false,
8629 					   &lock_and_validation_needed);
8630 		if (ret)
8631 			goto fail;
8632 	}
8633 
8634 	/* Enable all crtcs which require enable */
8635 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8636 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8637 					   old_crtc_state,
8638 					   new_crtc_state,
8639 					   true,
8640 					   &lock_and_validation_needed);
8641 		if (ret)
8642 			goto fail;
8643 	}
8644 
8645 	/* Add new/modified planes */
8646 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8647 		ret = dm_update_plane_state(dc, state, plane,
8648 					    old_plane_state,
8649 					    new_plane_state,
8650 					    true,
8651 					    &lock_and_validation_needed);
8652 		if (ret)
8653 			goto fail;
8654 	}
8655 
8656 	/* Run this here since we want to validate the streams we created */
8657 	ret = drm_atomic_helper_check_planes(dev, state);
8658 	if (ret)
8659 		goto fail;
8660 
8661 	if (state->legacy_cursor_update) {
8662 		/*
8663 		 * This is a fast cursor update coming from the plane update
8664 		 * helper, check if it can be done asynchronously for better
8665 		 * performance.
8666 		 */
8667 		state->async_update =
8668 			!drm_atomic_helper_async_check(dev, state);
8669 
8670 		/*
8671 		 * Skip the remaining global validation if this is an async
8672 		 * update. Cursor updates can be done without affecting
8673 		 * state or bandwidth calcs and this avoids the performance
8674 		 * penalty of locking the private state object and
8675 		 * allocating a new dc_state.
8676 		 */
8677 		if (state->async_update)
8678 			return 0;
8679 	}
8680 
8681 	/* Check scaling and underscan changes*/
8682 	/* TODO Removed scaling changes validation due to inability to commit
8683 	 * new stream into context w\o causing full reset. Need to
8684 	 * decide how to handle.
8685 	 */
8686 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8687 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8688 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8689 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8690 
8691 		/* Skip any modesets/resets */
8692 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8693 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8694 			continue;
8695 
8696 		/* Skip any thing not scale or underscan changes */
8697 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8698 			continue;
8699 
8700 		overall_update_type = UPDATE_TYPE_FULL;
8701 		lock_and_validation_needed = true;
8702 	}
8703 
8704 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8705 	if (ret)
8706 		goto fail;
8707 
8708 	if (overall_update_type < update_type)
8709 		overall_update_type = update_type;
8710 
8711 	/*
8712 	 * lock_and_validation_needed was an old way to determine if we need to set
8713 	 * the global lock. Leaving it in to check if we broke any corner cases
8714 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8715 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8716 	 */
8717 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8718 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8719 
8720 	if (overall_update_type > UPDATE_TYPE_FAST) {
8721 		ret = dm_atomic_get_state(state, &dm_state);
8722 		if (ret)
8723 			goto fail;
8724 
8725 		ret = do_aquire_global_lock(dev, state);
8726 		if (ret)
8727 			goto fail;
8728 
8729 #if defined(CONFIG_DRM_AMD_DC_DCN)
8730 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8731 			goto fail;
8732 
8733 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8734 		if (ret)
8735 			goto fail;
8736 #endif
8737 
8738 		/*
8739 		 * Perform validation of MST topology in the state:
8740 		 * We need to perform MST atomic check before calling
8741 		 * dc_validate_global_state(), or there is a chance
8742 		 * to get stuck in an infinite loop and hang eventually.
8743 		 */
8744 		ret = drm_dp_mst_atomic_check(state);
8745 		if (ret)
8746 			goto fail;
8747 		status = dc_validate_global_state(dc, dm_state->context, false);
8748 		if (status != DC_OK) {
8749 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8750 				       dc_status_to_str(status), status);
8751 			ret = -EINVAL;
8752 			goto fail;
8753 		}
8754 	} else {
8755 		/*
8756 		 * The commit is a fast update. Fast updates shouldn't change
8757 		 * the DC context, affect global validation, and can have their
8758 		 * commit work done in parallel with other commits not touching
8759 		 * the same resource. If we have a new DC context as part of
8760 		 * the DM atomic state from validation we need to free it and
8761 		 * retain the existing one instead.
8762 		 *
8763 		 * Furthermore, since the DM atomic state only contains the DC
8764 		 * context and can safely be annulled, we can free the state
8765 		 * and clear the associated private object now to free
8766 		 * some memory and avoid a possible use-after-free later.
8767 		 */
8768 
8769 		for (i = 0; i < state->num_private_objs; i++) {
8770 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8771 
8772 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8773 				int j = state->num_private_objs-1;
8774 
8775 				dm_atomic_destroy_state(obj,
8776 						state->private_objs[i].state);
8777 
8778 				/* If i is not at the end of the array then the
8779 				 * last element needs to be moved to where i was
8780 				 * before the array can safely be truncated.
8781 				 */
8782 				if (i != j)
8783 					state->private_objs[i] =
8784 						state->private_objs[j];
8785 
8786 				state->private_objs[j].ptr = NULL;
8787 				state->private_objs[j].state = NULL;
8788 				state->private_objs[j].old_state = NULL;
8789 				state->private_objs[j].new_state = NULL;
8790 
8791 				state->num_private_objs = j;
8792 				break;
8793 			}
8794 		}
8795 	}
8796 
8797 	/* Store the overall update type for use later in atomic check. */
8798 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8799 		struct dm_crtc_state *dm_new_crtc_state =
8800 			to_dm_crtc_state(new_crtc_state);
8801 
8802 		dm_new_crtc_state->update_type = (int)overall_update_type;
8803 	}
8804 
8805 	/* Must be success */
8806 	WARN_ON(ret);
8807 	return ret;
8808 
8809 fail:
8810 	if (ret == -EDEADLK)
8811 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8812 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8813 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8814 	else
8815 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8816 
8817 	return ret;
8818 }
8819 
8820 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8821 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8822 {
8823 	uint8_t dpcd_data;
8824 	bool capable = false;
8825 
8826 	if (amdgpu_dm_connector->dc_link &&
8827 		dm_helpers_dp_read_dpcd(
8828 				NULL,
8829 				amdgpu_dm_connector->dc_link,
8830 				DP_DOWN_STREAM_PORT_COUNT,
8831 				&dpcd_data,
8832 				sizeof(dpcd_data))) {
8833 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8834 	}
8835 
8836 	return capable;
8837 }
8838 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8839 					struct edid *edid)
8840 {
8841 	int i;
8842 	bool edid_check_required;
8843 	struct detailed_timing *timing;
8844 	struct detailed_non_pixel *data;
8845 	struct detailed_data_monitor_range *range;
8846 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8847 			to_amdgpu_dm_connector(connector);
8848 	struct dm_connector_state *dm_con_state = NULL;
8849 
8850 	struct drm_device *dev = connector->dev;
8851 	struct amdgpu_device *adev = dev->dev_private;
8852 	bool freesync_capable = false;
8853 
8854 	if (!connector->state) {
8855 		DRM_ERROR("%s - Connector has no state", __func__);
8856 		goto update;
8857 	}
8858 
8859 	if (!edid) {
8860 		dm_con_state = to_dm_connector_state(connector->state);
8861 
8862 		amdgpu_dm_connector->min_vfreq = 0;
8863 		amdgpu_dm_connector->max_vfreq = 0;
8864 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8865 
8866 		goto update;
8867 	}
8868 
8869 	dm_con_state = to_dm_connector_state(connector->state);
8870 
8871 	edid_check_required = false;
8872 	if (!amdgpu_dm_connector->dc_sink) {
8873 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8874 		goto update;
8875 	}
8876 	if (!adev->dm.freesync_module)
8877 		goto update;
8878 	/*
8879 	 * if edid non zero restrict freesync only for dp and edp
8880 	 */
8881 	if (edid) {
8882 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8883 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8884 			edid_check_required = is_dp_capable_without_timing_msa(
8885 						adev->dm.dc,
8886 						amdgpu_dm_connector);
8887 		}
8888 	}
8889 	if (edid_check_required == true && (edid->version > 1 ||
8890 	   (edid->version == 1 && edid->revision > 1))) {
8891 		for (i = 0; i < 4; i++) {
8892 
8893 			timing	= &edid->detailed_timings[i];
8894 			data	= &timing->data.other_data;
8895 			range	= &data->data.range;
8896 			/*
8897 			 * Check if monitor has continuous frequency mode
8898 			 */
8899 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8900 				continue;
8901 			/*
8902 			 * Check for flag range limits only. If flag == 1 then
8903 			 * no additional timing information provided.
8904 			 * Default GTF, GTF Secondary curve and CVT are not
8905 			 * supported
8906 			 */
8907 			if (range->flags != 1)
8908 				continue;
8909 
8910 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8911 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8912 			amdgpu_dm_connector->pixel_clock_mhz =
8913 				range->pixel_clock_mhz * 10;
8914 			break;
8915 		}
8916 
8917 		if (amdgpu_dm_connector->max_vfreq -
8918 		    amdgpu_dm_connector->min_vfreq > 10) {
8919 
8920 			freesync_capable = true;
8921 		}
8922 	}
8923 
8924 update:
8925 	if (dm_con_state)
8926 		dm_con_state->freesync_capable = freesync_capable;
8927 
8928 	if (connector->vrr_capable_property)
8929 		drm_connector_set_vrr_capable_property(connector,
8930 						       freesync_capable);
8931 }
8932 
8933 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8934 {
8935 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8936 
8937 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8938 		return;
8939 	if (link->type == dc_connection_none)
8940 		return;
8941 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8942 					dpcd_data, sizeof(dpcd_data))) {
8943 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8944 
8945 		if (dpcd_data[0] == 0) {
8946 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8947 			link->psr_settings.psr_feature_enabled = false;
8948 		} else {
8949 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8950 			link->psr_settings.psr_feature_enabled = true;
8951 		}
8952 
8953 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8954 	}
8955 }
8956 
8957 /*
8958  * amdgpu_dm_link_setup_psr() - configure psr link
8959  * @stream: stream state
8960  *
8961  * Return: true if success
8962  */
8963 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8964 {
8965 	struct dc_link *link = NULL;
8966 	struct psr_config psr_config = {0};
8967 	struct psr_context psr_context = {0};
8968 	bool ret = false;
8969 
8970 	if (stream == NULL)
8971 		return false;
8972 
8973 	link = stream->link;
8974 
8975 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8976 
8977 	if (psr_config.psr_version > 0) {
8978 		psr_config.psr_exit_link_training_required = 0x1;
8979 		psr_config.psr_frame_capture_indication_req = 0;
8980 		psr_config.psr_rfb_setup_time = 0x37;
8981 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8982 		psr_config.allow_smu_optimizations = 0x0;
8983 
8984 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8985 
8986 	}
8987 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
8988 
8989 	return ret;
8990 }
8991 
8992 /*
8993  * amdgpu_dm_psr_enable() - enable psr f/w
8994  * @stream: stream state
8995  *
8996  * Return: true if success
8997  */
8998 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8999 {
9000 	struct dc_link *link = stream->link;
9001 	unsigned int vsync_rate_hz = 0;
9002 	struct dc_static_screen_params params = {0};
9003 	/* Calculate number of static frames before generating interrupt to
9004 	 * enter PSR.
9005 	 */
9006 	// Init fail safe of 2 frames static
9007 	unsigned int num_frames_static = 2;
9008 
9009 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9010 
9011 	vsync_rate_hz = div64_u64(div64_u64((
9012 			stream->timing.pix_clk_100hz * 100),
9013 			stream->timing.v_total),
9014 			stream->timing.h_total);
9015 
9016 	/* Round up
9017 	 * Calculate number of frames such that at least 30 ms of time has
9018 	 * passed.
9019 	 */
9020 	if (vsync_rate_hz != 0) {
9021 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9022 		num_frames_static = (30000 / frame_time_microsec) + 1;
9023 	}
9024 
9025 	params.triggers.cursor_update = true;
9026 	params.triggers.overlay_update = true;
9027 	params.triggers.surface_update = true;
9028 	params.num_frames = num_frames_static;
9029 
9030 	dc_stream_set_static_screen_params(link->ctx->dc,
9031 					   &stream, 1,
9032 					   &params);
9033 
9034 	return dc_link_set_psr_allow_active(link, true, false);
9035 }
9036 
9037 /*
9038  * amdgpu_dm_psr_disable() - disable psr f/w
9039  * @stream:  stream state
9040  *
9041  * Return: true if success
9042  */
9043 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9044 {
9045 
9046 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9047 
9048 	return dc_link_set_psr_allow_active(stream->link, false, true);
9049 }
9050