1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140 
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 				struct drm_plane *plane,
143 				unsigned long possible_crtcs,
144 				const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 			       struct drm_plane *plane,
147 			       uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
150 				    uint32_t link_index,
151 				    struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 				  struct amdgpu_encoder *aencoder,
154 				  uint32_t link_index);
155 
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157 
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 				   struct drm_atomic_state *state,
160 				   bool nonblock);
161 
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163 
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 				  struct drm_atomic_state *state);
166 
167 static void handle_cursor_update(struct drm_plane *plane,
168 				 struct drm_plane_state *old_plane_state);
169 
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 
175 
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 	if (crtc >= adev->mode_info.num_crtc)
192 		return 0;
193 	else {
194 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 				acrtc->base.state);
197 
198 
199 		if (acrtc_state->stream == NULL) {
200 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 				  crtc);
202 			return 0;
203 		}
204 
205 		return dc_stream_get_vblank_counter(acrtc_state->stream);
206 	}
207 }
208 
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 				  u32 *vbl, u32 *position)
211 {
212 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
213 
214 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 		return -EINVAL;
216 	else {
217 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 						acrtc->base.state);
220 
221 		if (acrtc_state->stream ==  NULL) {
222 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 				  crtc);
224 			return 0;
225 		}
226 
227 		/*
228 		 * TODO rework base driver to use values directly.
229 		 * for now parse it back into reg-format
230 		 */
231 		dc_stream_get_scanoutpos(acrtc_state->stream,
232 					 &v_blank_start,
233 					 &v_blank_end,
234 					 &h_position,
235 					 &v_position);
236 
237 		*position = v_position | (h_position << 16);
238 		*vbl = v_blank_start | (v_blank_end << 16);
239 	}
240 
241 	return 0;
242 }
243 
244 static bool dm_is_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return true;
248 }
249 
250 static int dm_wait_for_idle(void *handle)
251 {
252 	/* XXX todo */
253 	return 0;
254 }
255 
256 static bool dm_check_soft_reset(void *handle)
257 {
258 	return false;
259 }
260 
261 static int dm_soft_reset(void *handle)
262 {
263 	/* XXX todo */
264 	return 0;
265 }
266 
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 		     int otg_inst)
270 {
271 	struct drm_device *dev = adev->ddev;
272 	struct drm_crtc *crtc;
273 	struct amdgpu_crtc *amdgpu_crtc;
274 
275 	if (otg_inst == -1) {
276 		WARN_ON(1);
277 		return adev->mode_info.crtcs[0];
278 	}
279 
280 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 		amdgpu_crtc = to_amdgpu_crtc(crtc);
282 
283 		if (amdgpu_crtc->otg_inst == otg_inst)
284 			return amdgpu_crtc;
285 	}
286 
287 	return NULL;
288 }
289 
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295 
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 	struct amdgpu_crtc *amdgpu_crtc;
306 	struct common_irq_params *irq_params = interrupt_params;
307 	struct amdgpu_device *adev = irq_params->adev;
308 	unsigned long flags;
309 	struct drm_pending_vblank_event *e;
310 	struct dm_crtc_state *acrtc_state;
311 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 	bool vrr_active;
313 
314 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315 
316 	/* IRQ could occur when in initial stage */
317 	/* TODO work and BO cleanup */
318 	if (amdgpu_crtc == NULL) {
319 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 		return;
321 	}
322 
323 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
324 
325 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 						 amdgpu_crtc->pflip_status,
328 						 AMDGPU_FLIP_SUBMITTED,
329 						 amdgpu_crtc->crtc_id,
330 						 amdgpu_crtc);
331 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 		return;
333 	}
334 
335 	/* page flip completed. */
336 	e = amdgpu_crtc->event;
337 	amdgpu_crtc->event = NULL;
338 
339 	if (!e)
340 		WARN_ON(1);
341 
342 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344 
345 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 	if (!vrr_active ||
347 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 				      &v_blank_end, &hpos, &vpos) ||
349 	    (vpos < v_blank_start)) {
350 		/* Update to correct count and vblank timestamp if racing with
351 		 * vblank irq. This also updates to the correct vblank timestamp
352 		 * even in VRR mode, as scanout is past the front-porch atm.
353 		 */
354 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355 
356 		/* Wake up userspace by sending the pageflip event with proper
357 		 * count and timestamp of vblank of flip completion.
358 		 */
359 		if (e) {
360 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361 
362 			/* Event sent, so done with vblank for this flip */
363 			drm_crtc_vblank_put(&amdgpu_crtc->base);
364 		}
365 	} else if (e) {
366 		/* VRR active and inside front-porch: vblank count and
367 		 * timestamp for pageflip event will only be up to date after
368 		 * drm_crtc_handle_vblank() has been executed from late vblank
369 		 * irq handler after start of back-porch (vline 0). We queue the
370 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 		 * updated timestamp and count, once it runs after us.
372 		 *
373 		 * We need to open-code this instead of using the helper
374 		 * drm_crtc_arm_vblank_event(), as that helper would
375 		 * call drm_crtc_accurate_vblank_count(), which we must
376 		 * not call in VRR mode while we are in front-porch!
377 		 */
378 
379 		/* sequence will be replaced by real count during send-out. */
380 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 		e->pipe = amdgpu_crtc->crtc_id;
382 
383 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 		e = NULL;
385 	}
386 
387 	/* Keep track of vblank of this flip for flip throttling. We use the
388 	 * cooked hw counter, as that one incremented at start of this vblank
389 	 * of pageflip completion, so last_flip_vblank is the forbidden count
390 	 * for queueing new pageflips if vsync + VRR is enabled.
391 	 */
392 	amdgpu_crtc->last_flip_vblank =
393 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394 
395 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397 
398 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 			 vrr_active, (int) !e);
401 }
402 
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 	struct common_irq_params *irq_params = interrupt_params;
406 	struct amdgpu_device *adev = irq_params->adev;
407 	struct amdgpu_crtc *acrtc;
408 	struct dm_crtc_state *acrtc_state;
409 	unsigned long flags;
410 
411 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 
413 	if (acrtc) {
414 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
415 
416 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 			      acrtc->crtc_id,
418 			      amdgpu_dm_vrr_active(acrtc_state));
419 
420 		/* Core vblank handling is done here after end of front-porch in
421 		 * vrr mode, as vblank timestamping will give valid results
422 		 * while now done after front-porch. This will also deliver
423 		 * page-flip completion events that have been queued to us
424 		 * if a pageflip happened inside front-porch.
425 		 */
426 		if (amdgpu_dm_vrr_active(acrtc_state)) {
427 			drm_crtc_handle_vblank(&acrtc->base);
428 
429 			/* BTR processing for pre-DCE12 ASICs */
430 			if (acrtc_state->stream &&
431 			    adev->family < AMDGPU_FAMILY_AI) {
432 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 				mod_freesync_handle_v_update(
434 				    adev->dm.freesync_module,
435 				    acrtc_state->stream,
436 				    &acrtc_state->vrr_params);
437 
438 				dc_stream_adjust_vmin_vmax(
439 				    adev->dm.dc,
440 				    acrtc_state->stream,
441 				    &acrtc_state->vrr_params.adjust);
442 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 			}
444 		}
445 	}
446 }
447 
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct dm_crtc_state *acrtc_state;
461 	unsigned long flags;
462 
463 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 	if (!acrtc)
465 		return;
466 
467 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
468 
469 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 			 amdgpu_dm_vrr_active(acrtc_state),
471 			 acrtc_state->active_planes);
472 
473 	/**
474 	 * Core vblank handling at start of front-porch is only possible
475 	 * in non-vrr mode, as only there vblank timestamping will give
476 	 * valid results while done in front-porch. Otherwise defer it
477 	 * to dm_vupdate_high_irq after end of front-porch.
478 	 */
479 	if (!amdgpu_dm_vrr_active(acrtc_state))
480 		drm_crtc_handle_vblank(&acrtc->base);
481 
482 	/**
483 	 * Following stuff must happen at start of vblank, for crc
484 	 * computation and below-the-range btr support in vrr mode.
485 	 */
486 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487 
488 	/* BTR updates need to happen before VUPDATE on Vega and above. */
489 	if (adev->family < AMDGPU_FAMILY_AI)
490 		return;
491 
492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
493 
494 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 		mod_freesync_handle_v_update(adev->dm.freesync_module,
497 					     acrtc_state->stream,
498 					     &acrtc_state->vrr_params);
499 
500 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 					   &acrtc_state->vrr_params.adjust);
502 	}
503 
504 	/*
505 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 	 * In that case, pageflip completion interrupts won't fire and pageflip
507 	 * completion events won't get delivered. Prevent this by sending
508 	 * pending pageflip events from here if a flip is still pending.
509 	 *
510 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 	 * avoid race conditions between flip programming and completion,
512 	 * which could cause too early flip completion events.
513 	 */
514 	if (adev->family >= AMDGPU_FAMILY_RV &&
515 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 	    acrtc_state->active_planes == 0) {
517 		if (acrtc->event) {
518 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 			acrtc->event = NULL;
520 			drm_crtc_vblank_put(&acrtc->base);
521 		}
522 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 	}
524 
525 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527 
528 static int dm_set_clockgating_state(void *handle,
529 		  enum amd_clockgating_state state)
530 {
531 	return 0;
532 }
533 
534 static int dm_set_powergating_state(void *handle,
535 		  enum amd_powergating_state state)
536 {
537 	return 0;
538 }
539 
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542 
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 	struct drm_device *dev = connector->dev;
547 	struct amdgpu_device *adev = dev->dev_private;
548 	struct dm_comressor_info *compressor = &adev->dm.compressor;
549 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 	struct drm_display_mode *mode;
551 	unsigned long max_size = 0;
552 
553 	if (adev->dm.dc->fbc_compressor == NULL)
554 		return;
555 
556 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 		return;
558 
559 	if (compressor->bo_ptr)
560 		return;
561 
562 
563 	list_for_each_entry(mode, &connector->modes, head) {
564 		if (max_size < mode->htotal * mode->vtotal)
565 			max_size = mode->htotal * mode->vtotal;
566 	}
567 
568 	if (max_size) {
569 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 			    &compressor->gpu_addr, &compressor->cpu_addr);
572 
573 		if (r)
574 			DRM_ERROR("DM: Failed to initialize FBC\n");
575 		else {
576 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 		}
579 
580 	}
581 
582 }
583 
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 					  int pipe, bool *enabled,
586 					  unsigned char *buf, int max_bytes)
587 {
588 	struct drm_device *dev = dev_get_drvdata(kdev);
589 	struct amdgpu_device *adev = dev->dev_private;
590 	struct drm_connector *connector;
591 	struct drm_connector_list_iter conn_iter;
592 	struct amdgpu_dm_connector *aconnector;
593 	int ret = 0;
594 
595 	*enabled = false;
596 
597 	mutex_lock(&adev->dm.audio_lock);
598 
599 	drm_connector_list_iter_begin(dev, &conn_iter);
600 	drm_for_each_connector_iter(connector, &conn_iter) {
601 		aconnector = to_amdgpu_dm_connector(connector);
602 		if (aconnector->audio_inst != port)
603 			continue;
604 
605 		*enabled = true;
606 		ret = drm_eld_size(connector->eld);
607 		memcpy(buf, connector->eld, min(max_bytes, ret));
608 
609 		break;
610 	}
611 	drm_connector_list_iter_end(&conn_iter);
612 
613 	mutex_unlock(&adev->dm.audio_lock);
614 
615 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616 
617 	return ret;
618 }
619 
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 	.get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623 
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 				       struct device *hda_kdev, void *data)
626 {
627 	struct drm_device *dev = dev_get_drvdata(kdev);
628 	struct amdgpu_device *adev = dev->dev_private;
629 	struct drm_audio_component *acomp = data;
630 
631 	acomp->ops = &amdgpu_dm_audio_component_ops;
632 	acomp->dev = kdev;
633 	adev->dm.audio_component = acomp;
634 
635 	return 0;
636 }
637 
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 					  struct device *hda_kdev, void *data)
640 {
641 	struct drm_device *dev = dev_get_drvdata(kdev);
642 	struct amdgpu_device *adev = dev->dev_private;
643 	struct drm_audio_component *acomp = data;
644 
645 	acomp->ops = NULL;
646 	acomp->dev = NULL;
647 	adev->dm.audio_component = NULL;
648 }
649 
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 	.bind	= amdgpu_dm_audio_component_bind,
652 	.unbind	= amdgpu_dm_audio_component_unbind,
653 };
654 
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 	int i, ret;
658 
659 	if (!amdgpu_audio)
660 		return 0;
661 
662 	adev->mode_info.audio.enabled = true;
663 
664 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665 
666 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 		adev->mode_info.audio.pin[i].channels = -1;
668 		adev->mode_info.audio.pin[i].rate = -1;
669 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 		adev->mode_info.audio.pin[i].status_bits = 0;
671 		adev->mode_info.audio.pin[i].category_code = 0;
672 		adev->mode_info.audio.pin[i].connected = false;
673 		adev->mode_info.audio.pin[i].id =
674 			adev->dm.dc->res_pool->audios[i]->inst;
675 		adev->mode_info.audio.pin[i].offset = 0;
676 	}
677 
678 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 	if (ret < 0)
680 		return ret;
681 
682 	adev->dm.audio_registered = true;
683 
684 	return 0;
685 }
686 
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 	if (!amdgpu_audio)
690 		return;
691 
692 	if (!adev->mode_info.audio.enabled)
693 		return;
694 
695 	if (adev->dm.audio_registered) {
696 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 		adev->dm.audio_registered = false;
698 	}
699 
700 	/* TODO: Disable audio? */
701 
702 	adev->mode_info.audio.enabled = false;
703 }
704 
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 	struct drm_audio_component *acomp = adev->dm.audio_component;
708 
709 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711 
712 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 						 pin, -1);
714 	}
715 }
716 
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 	const struct dmcub_firmware_header_v1_0 *hdr;
720 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 	struct abm *abm = adev->dm.dc->res_pool->abm;
725 	struct dmub_srv_hw_params hw_params;
726 	enum dmub_status status;
727 	const unsigned char *fw_inst_const, *fw_bss_data;
728 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 	bool has_hw_support;
730 
731 	if (!dmub_srv)
732 		/* DMUB isn't supported on the ASIC. */
733 		return 0;
734 
735 	if (!fb_info) {
736 		DRM_ERROR("No framebuffer info for DMUB service.\n");
737 		return -EINVAL;
738 	}
739 
740 	if (!dmub_fw) {
741 		/* Firmware required for DMUB support. */
742 		DRM_ERROR("No firmware provided for DMUB.\n");
743 		return -EINVAL;
744 	}
745 
746 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 	if (status != DMUB_STATUS_OK) {
748 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 		return -EINVAL;
750 	}
751 
752 	if (!has_hw_support) {
753 		DRM_INFO("DMUB unsupported on ASIC\n");
754 		return 0;
755 	}
756 
757 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758 
759 	fw_inst_const = dmub_fw->data +
760 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 			PSP_HEADER_BYTES;
762 
763 	fw_bss_data = dmub_fw->data +
764 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 		      le32_to_cpu(hdr->inst_const_bytes);
766 
767 	/* Copy firmware and bios info into FB memory. */
768 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770 
771 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772 
773 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 	 * amdgpu_ucode_init_single_fw will load dmub firmware
775 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 	 * will be done by dm_dmub_hw_init
777 	 */
778 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 				fw_inst_const_size);
781 	}
782 
783 	if (fw_bss_data_size)
784 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 		       fw_bss_data, fw_bss_data_size);
786 
787 	/* Copy firmware bios info into FB memory. */
788 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 	       adev->bios_size);
790 
791 	/* Reset regions that need to be reset. */
792 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794 
795 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797 
798 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800 
801 	/* Initialize hardware. */
802 	memset(&hw_params, 0, sizeof(hw_params));
803 	hw_params.fb_base = adev->gmc.fb_start;
804 	hw_params.fb_offset = adev->gmc.aper_base;
805 
806 	/* backdoor load firmware and trigger dmub running */
807 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 		hw_params.load_inst_const = true;
809 
810 	if (dmcu)
811 		hw_params.psp_version = dmcu->psp_version;
812 
813 	for (i = 0; i < fb_info->num_fb; ++i)
814 		hw_params.fb[i] = &fb_info->fb[i];
815 
816 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 	if (status != DMUB_STATUS_OK) {
818 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 		return -EINVAL;
820 	}
821 
822 	/* Wait for firmware load to finish. */
823 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 	if (status != DMUB_STATUS_OK)
825 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826 
827 	/* Init DMCU and ABM if available. */
828 	if (dmcu && abm) {
829 		dmcu->funcs->dmcu_init(dmcu);
830 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 	}
832 
833 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 	if (!adev->dm.dc->ctx->dmub_srv) {
835 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 		return -ENOMEM;
837 	}
838 
839 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 		 adev->dm.dmcub_fw_version);
841 
842 	return 0;
843 }
844 
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 	struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 	struct dc_callback_init init_params;
850 #endif
851 	int r;
852 
853 	adev->dm.ddev = adev->ddev;
854 	adev->dm.adev = adev;
855 
856 	/* Zero all the fields */
857 	memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 	memset(&init_params, 0, sizeof(init_params));
860 #endif
861 
862 	mutex_init(&adev->dm.dc_lock);
863 	mutex_init(&adev->dm.audio_lock);
864 
865 	if(amdgpu_dm_irq_init(adev)) {
866 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 		goto error;
868 	}
869 
870 	init_data.asic_id.chip_family = adev->family;
871 
872 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874 
875 	init_data.asic_id.vram_width = adev->gmc.vram_width;
876 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 	init_data.asic_id.atombios_base_address =
878 		adev->mode_info.atom_context->bios;
879 
880 	init_data.driver = adev;
881 
882 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883 
884 	if (!adev->dm.cgs_device) {
885 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 		goto error;
887 	}
888 
889 	init_data.cgs_device = adev->dm.cgs_device;
890 
891 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892 
893 	switch (adev->asic_type) {
894 	case CHIP_CARRIZO:
895 	case CHIP_STONEY:
896 	case CHIP_RAVEN:
897 	case CHIP_RENOIR:
898 		init_data.flags.gpu_vm_support = true;
899 		break;
900 	default:
901 		break;
902 	}
903 
904 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 		init_data.flags.fbc_support = true;
906 
907 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 		init_data.flags.multi_mon_pp_mclk_switch = true;
909 
910 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 		init_data.flags.disable_fractional_pwm = true;
912 
913 	init_data.flags.power_down_display_on_boot = true;
914 
915 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916 
917 	/* Display Core create. */
918 	adev->dm.dc = dc_create(&init_data);
919 
920 	if (adev->dm.dc) {
921 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 	} else {
923 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 		goto error;
925 	}
926 
927 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 	}
931 
932 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 		adev->dm.dc->debug.disable_stutter = true;
937 
938 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 		adev->dm.dc->debug.disable_dsc = true;
940 
941 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 		adev->dm.dc->debug.disable_clock_gate = true;
943 
944 	r = dm_dmub_hw_init(adev);
945 	if (r) {
946 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 		goto error;
948 	}
949 
950 	dc_hardware_init(adev->dm.dc);
951 
952 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 	if (!adev->dm.freesync_module) {
954 		DRM_ERROR(
955 		"amdgpu: failed to initialize freesync_module.\n");
956 	} else
957 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 				adev->dm.freesync_module);
959 
960 	amdgpu_dm_init_color_mod();
961 
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 	if (adev->asic_type >= CHIP_RAVEN) {
964 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965 
966 		if (!adev->dm.hdcp_workqueue)
967 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 		else
969 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 
971 		dc_init_callbacks(adev->dm.dc, &init_params);
972 	}
973 #endif
974 	if (amdgpu_dm_initialize_drm_device(adev)) {
975 		DRM_ERROR(
976 		"amdgpu: failed to initialize sw for display support.\n");
977 		goto error;
978 	}
979 
980 	/* Update the actual used number of crtc */
981 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 
983 	/* create fake encoders for MST */
984 	dm_dp_create_fake_mst_encoders(adev);
985 
986 	/* TODO: Add_display_info? */
987 
988 	/* TODO use dynamic cursor width */
989 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991 
992 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 		DRM_ERROR(
994 		"amdgpu: failed to initialize sw for display support.\n");
995 		goto error;
996 	}
997 
998 	DRM_DEBUG_DRIVER("KMS initialized.\n");
999 
1000 	return 0;
1001 error:
1002 	amdgpu_dm_fini(adev);
1003 
1004 	return -EINVAL;
1005 }
1006 
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 	}
1014 
1015 	amdgpu_dm_audio_fini(adev);
1016 
1017 	amdgpu_dm_destroy_drm_device(&adev->dm);
1018 
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 	if (adev->dm.hdcp_workqueue) {
1021 		hdcp_destroy(adev->dm.hdcp_workqueue);
1022 		adev->dm.hdcp_workqueue = NULL;
1023 	}
1024 
1025 	if (adev->dm.dc)
1026 		dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 	if (adev->dm.dc->ctx->dmub_srv) {
1029 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 		adev->dm.dc->ctx->dmub_srv = NULL;
1031 	}
1032 
1033 	if (adev->dm.dmub_bo)
1034 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 				      &adev->dm.dmub_bo_gpu_addr,
1036 				      &adev->dm.dmub_bo_cpu_addr);
1037 
1038 	/* DC Destroy TODO: Replace destroy DAL */
1039 	if (adev->dm.dc)
1040 		dc_destroy(&adev->dm.dc);
1041 	/*
1042 	 * TODO: pageflip, vlank interrupt
1043 	 *
1044 	 * amdgpu_dm_irq_fini(adev);
1045 	 */
1046 
1047 	if (adev->dm.cgs_device) {
1048 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 		adev->dm.cgs_device = NULL;
1050 	}
1051 	if (adev->dm.freesync_module) {
1052 		mod_freesync_destroy(adev->dm.freesync_module);
1053 		adev->dm.freesync_module = NULL;
1054 	}
1055 
1056 	mutex_destroy(&adev->dm.audio_lock);
1057 	mutex_destroy(&adev->dm.dc_lock);
1058 
1059 	return;
1060 }
1061 
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 	const char *fw_name_dmcu = NULL;
1065 	int r;
1066 	const struct dmcu_firmware_header_v1_0 *hdr;
1067 
1068 	switch(adev->asic_type) {
1069 	case CHIP_BONAIRE:
1070 	case CHIP_HAWAII:
1071 	case CHIP_KAVERI:
1072 	case CHIP_KABINI:
1073 	case CHIP_MULLINS:
1074 	case CHIP_TONGA:
1075 	case CHIP_FIJI:
1076 	case CHIP_CARRIZO:
1077 	case CHIP_STONEY:
1078 	case CHIP_POLARIS11:
1079 	case CHIP_POLARIS10:
1080 	case CHIP_POLARIS12:
1081 	case CHIP_VEGAM:
1082 	case CHIP_VEGA10:
1083 	case CHIP_VEGA12:
1084 	case CHIP_VEGA20:
1085 	case CHIP_NAVI10:
1086 	case CHIP_NAVI14:
1087 	case CHIP_RENOIR:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 	case CHIP_SIENNA_CICHLID:
1090 	case CHIP_NAVY_FLOUNDER:
1091 #endif
1092 		return 0;
1093 	case CHIP_NAVI12:
1094 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1095 		break;
1096 	case CHIP_RAVEN:
1097 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1100 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1101 		else
1102 			return 0;
1103 		break;
1104 	default:
1105 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1106 		return -EINVAL;
1107 	}
1108 
1109 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1111 		return 0;
1112 	}
1113 
1114 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1115 	if (r == -ENOENT) {
1116 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 		adev->dm.fw_dmcu = NULL;
1119 		return 0;
1120 	}
1121 	if (r) {
1122 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1123 			fw_name_dmcu);
1124 		return r;
1125 	}
1126 
1127 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1128 	if (r) {
1129 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1130 			fw_name_dmcu);
1131 		release_firmware(adev->dm.fw_dmcu);
1132 		adev->dm.fw_dmcu = NULL;
1133 		return r;
1134 	}
1135 
1136 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1137 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1138 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1139 	adev->firmware.fw_size +=
1140 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1141 
1142 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1143 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1144 	adev->firmware.fw_size +=
1145 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1146 
1147 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1148 
1149 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1150 
1151 	return 0;
1152 }
1153 
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1155 {
1156 	struct amdgpu_device *adev = ctx;
1157 
1158 	return dm_read_reg(adev->dm.dc->ctx, address);
1159 }
1160 
1161 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1162 				     uint32_t value)
1163 {
1164 	struct amdgpu_device *adev = ctx;
1165 
1166 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1167 }
1168 
1169 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1170 {
1171 	struct dmub_srv_create_params create_params;
1172 	struct dmub_srv_region_params region_params;
1173 	struct dmub_srv_region_info region_info;
1174 	struct dmub_srv_fb_params fb_params;
1175 	struct dmub_srv_fb_info *fb_info;
1176 	struct dmub_srv *dmub_srv;
1177 	const struct dmcub_firmware_header_v1_0 *hdr;
1178 	const char *fw_name_dmub;
1179 	enum dmub_asic dmub_asic;
1180 	enum dmub_status status;
1181 	int r;
1182 
1183 	switch (adev->asic_type) {
1184 	case CHIP_RENOIR:
1185 		dmub_asic = DMUB_ASIC_DCN21;
1186 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1187 		break;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 	case CHIP_SIENNA_CICHLID:
1190 		dmub_asic = DMUB_ASIC_DCN30;
1191 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1192 		break;
1193 	case CHIP_NAVY_FLOUNDER:
1194 		dmub_asic = DMUB_ASIC_DCN30;
1195 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1196 		break;
1197 #endif
1198 
1199 	default:
1200 		/* ASIC doesn't support DMUB. */
1201 		return 0;
1202 	}
1203 
1204 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1205 	if (r) {
1206 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1207 		return 0;
1208 	}
1209 
1210 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1211 	if (r) {
1212 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1213 		return 0;
1214 	}
1215 
1216 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1217 
1218 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1219 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1220 			AMDGPU_UCODE_ID_DMCUB;
1221 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1222 			adev->dm.dmub_fw;
1223 		adev->firmware.fw_size +=
1224 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1225 
1226 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 			 adev->dm.dmcub_fw_version);
1228 	}
1229 
1230 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231 
1232 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1233 	dmub_srv = adev->dm.dmub_srv;
1234 
1235 	if (!dmub_srv) {
1236 		DRM_ERROR("Failed to allocate DMUB service!\n");
1237 		return -ENOMEM;
1238 	}
1239 
1240 	memset(&create_params, 0, sizeof(create_params));
1241 	create_params.user_ctx = adev;
1242 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1243 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1244 	create_params.asic = dmub_asic;
1245 
1246 	/* Create the DMUB service. */
1247 	status = dmub_srv_create(dmub_srv, &create_params);
1248 	if (status != DMUB_STATUS_OK) {
1249 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1250 		return -EINVAL;
1251 	}
1252 
1253 	/* Calculate the size of all the regions for the DMUB service. */
1254 	memset(&region_params, 0, sizeof(region_params));
1255 
1256 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1257 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1258 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1259 	region_params.vbios_size = adev->bios_size;
1260 	region_params.fw_bss_data = region_params.bss_data_size ?
1261 		adev->dm.dmub_fw->data +
1262 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1263 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1264 	region_params.fw_inst_const =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		PSP_HEADER_BYTES;
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	release_firmware(adev->dm.dmub_fw);
1338 	adev->dm.dmub_fw = NULL;
1339 
1340 	release_firmware(adev->dm.fw_dmcu);
1341 	adev->dm.fw_dmcu = NULL;
1342 
1343 	return 0;
1344 }
1345 
1346 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1347 {
1348 	struct amdgpu_dm_connector *aconnector;
1349 	struct drm_connector *connector;
1350 	struct drm_connector_list_iter iter;
1351 	int ret = 0;
1352 
1353 	drm_connector_list_iter_begin(dev, &iter);
1354 	drm_for_each_connector_iter(connector, &iter) {
1355 		aconnector = to_amdgpu_dm_connector(connector);
1356 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1357 		    aconnector->mst_mgr.aux) {
1358 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1359 					 aconnector,
1360 					 aconnector->base.base.id);
1361 
1362 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1363 			if (ret < 0) {
1364 				DRM_ERROR("DM_MST: Failed to start MST\n");
1365 				aconnector->dc_link->type =
1366 					dc_connection_single;
1367 				break;
1368 			}
1369 		}
1370 	}
1371 	drm_connector_list_iter_end(&iter);
1372 
1373 	return ret;
1374 }
1375 
1376 static int dm_late_init(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	struct dmcu_iram_parameters params;
1381 	unsigned int linear_lut[16];
1382 	int i;
1383 	struct dmcu *dmcu = NULL;
1384 	bool ret = true;
1385 
1386 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1387 		return detect_mst_link_for_all_connectors(adev->ddev);
1388 
1389 	dmcu = adev->dm.dc->res_pool->dmcu;
1390 
1391 	for (i = 0; i < 16; i++)
1392 		linear_lut[i] = 0xFFFF * i / 15;
1393 
1394 	params.set = 0;
1395 	params.backlight_ramping_start = 0xCCCC;
1396 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1397 	params.backlight_lut_array_size = 16;
1398 	params.backlight_lut_array = linear_lut;
1399 
1400 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1401 	 * 0xFFFF x 0.01 = 0x28F
1402 	 */
1403 	params.min_abm_backlight = 0x28F;
1404 
1405 	/* In the case where abm is implemented on dmcub,
1406 	 * dmcu object will be null.
1407 	 * ABM 2.4 and up are implemented on dmcub.
1408 	 */
1409 	if (dmcu)
1410 		ret = dmcu_load_iram(dmcu, params);
1411 	else if (adev->dm.dc->ctx->dmub_srv)
1412 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1413 
1414 	if (!ret)
1415 		return -EINVAL;
1416 
1417 	return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419 
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 	struct amdgpu_dm_connector *aconnector;
1423 	struct drm_connector *connector;
1424 	struct drm_connector_list_iter iter;
1425 	struct drm_dp_mst_topology_mgr *mgr;
1426 	int ret;
1427 	bool need_hotplug = false;
1428 
1429 	drm_connector_list_iter_begin(dev, &iter);
1430 	drm_for_each_connector_iter(connector, &iter) {
1431 		aconnector = to_amdgpu_dm_connector(connector);
1432 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 		    aconnector->mst_port)
1434 			continue;
1435 
1436 		mgr = &aconnector->mst_mgr;
1437 
1438 		if (suspend) {
1439 			drm_dp_mst_topology_mgr_suspend(mgr);
1440 		} else {
1441 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 			if (ret < 0) {
1443 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 				need_hotplug = true;
1445 			}
1446 		}
1447 	}
1448 	drm_connector_list_iter_end(&iter);
1449 
1450 	if (need_hotplug)
1451 		drm_kms_helper_hotplug_event(dev);
1452 }
1453 
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 	struct smu_context *smu = &adev->smu;
1457 	int ret = 0;
1458 
1459 	if (!is_support_sw_smu(adev))
1460 		return 0;
1461 
1462 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 	 * on window driver dc implementation.
1464 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 	 * should be passed to smu during boot up and resume from s3.
1466 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 	 * dcn20_resource_construct
1468 	 * then call pplib functions below to pass the settings to smu:
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * smu_set_watermarks_table
1471 	 * navi10_set_watermarks_table
1472 	 * smu_write_watermarks_table
1473 	 *
1474 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 	 * dc has implemented different flow for window driver:
1476 	 * dc_hardware_init / dc_set_power_state
1477 	 * dcn10_init_hw
1478 	 * notify_wm_ranges
1479 	 * set_wm_ranges
1480 	 * -- Linux
1481 	 * smu_set_watermarks_for_clock_ranges
1482 	 * renoir_set_watermarks_table
1483 	 * smu_write_watermarks_table
1484 	 *
1485 	 * For Linux,
1486 	 * dc_hardware_init -> amdgpu_dm_init
1487 	 * dc_set_power_state --> dm_resume
1488 	 *
1489 	 * therefore, this function apply to navi10/12/14 but not Renoir
1490 	 * *
1491 	 */
1492 	switch(adev->asic_type) {
1493 	case CHIP_NAVI10:
1494 	case CHIP_NAVI14:
1495 	case CHIP_NAVI12:
1496 		break;
1497 	default:
1498 		return 0;
1499 	}
1500 
1501 	ret = smu_write_watermarks_table(smu);
1502 	if (ret) {
1503 		DRM_ERROR("Failed to update WMTABLE!\n");
1504 		return ret;
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /**
1511  * dm_hw_init() - Initialize DC device
1512  * @handle: The base driver device containing the amdgpu_dm device.
1513  *
1514  * Initialize the &struct amdgpu_display_manager device. This involves calling
1515  * the initializers of each DM component, then populating the struct with them.
1516  *
1517  * Although the function implies hardware initialization, both hardware and
1518  * software are initialized here. Splitting them out to their relevant init
1519  * hooks is a future TODO item.
1520  *
1521  * Some notable things that are initialized here:
1522  *
1523  * - Display Core, both software and hardware
1524  * - DC modules that we need (freesync and color management)
1525  * - DRM software states
1526  * - Interrupt sources and handlers
1527  * - Vblank support
1528  * - Debug FS entries, if enabled
1529  */
1530 static int dm_hw_init(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 	/* Create DAL display manager */
1534 	amdgpu_dm_init(adev);
1535 	amdgpu_dm_hpd_init(adev);
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * dm_hw_fini() - Teardown DC device
1542  * @handle: The base driver device containing the amdgpu_dm device.
1543  *
1544  * Teardown components within &struct amdgpu_display_manager that require
1545  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546  * were loaded. Also flush IRQ workqueues and disable them.
1547  */
1548 static int dm_hw_fini(void *handle)
1549 {
1550 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 
1552 	amdgpu_dm_hpd_fini(adev);
1553 
1554 	amdgpu_dm_irq_fini(adev);
1555 	amdgpu_dm_fini(adev);
1556 	return 0;
1557 }
1558 
1559 
1560 static int dm_enable_vblank(struct drm_crtc *crtc);
1561 static void dm_disable_vblank(struct drm_crtc *crtc);
1562 
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1564 				 struct dc_state *state, bool enable)
1565 {
1566 	enum dc_irq_source irq_source;
1567 	struct amdgpu_crtc *acrtc;
1568 	int rc = -EBUSY;
1569 	int i = 0;
1570 
1571 	for (i = 0; i < state->stream_count; i++) {
1572 		acrtc = get_crtc_by_otg_inst(
1573 				adev, state->stream_status[i].primary_otg_inst);
1574 
1575 		if (acrtc && state->stream_status[i].plane_count != 0) {
1576 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1577 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1578 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1580 			if (rc)
1581 				DRM_WARN("Failed to %s pflip interrupts\n",
1582 					 enable ? "enable" : "disable");
1583 
1584 			if (enable) {
1585 				rc = dm_enable_vblank(&acrtc->base);
1586 				if (rc)
1587 					DRM_WARN("Failed to enable vblank interrupts\n");
1588 			} else {
1589 				dm_disable_vblank(&acrtc->base);
1590 			}
1591 
1592 		}
1593 	}
1594 
1595 }
1596 
1597 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1598 {
1599 	struct dc_state *context = NULL;
1600 	enum dc_status res = DC_ERROR_UNEXPECTED;
1601 	int i;
1602 	struct dc_stream_state *del_streams[MAX_PIPES];
1603 	int del_streams_count = 0;
1604 
1605 	memset(del_streams, 0, sizeof(del_streams));
1606 
1607 	context = dc_create_state(dc);
1608 	if (context == NULL)
1609 		goto context_alloc_fail;
1610 
1611 	dc_resource_state_copy_construct_current(dc, context);
1612 
1613 	/* First remove from context all streams */
1614 	for (i = 0; i < context->stream_count; i++) {
1615 		struct dc_stream_state *stream = context->streams[i];
1616 
1617 		del_streams[del_streams_count++] = stream;
1618 	}
1619 
1620 	/* Remove all planes for removed streams and then remove the streams */
1621 	for (i = 0; i < del_streams_count; i++) {
1622 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1623 			res = DC_FAIL_DETACH_SURFACES;
1624 			goto fail;
1625 		}
1626 
1627 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1628 		if (res != DC_OK)
1629 			goto fail;
1630 	}
1631 
1632 
1633 	res = dc_validate_global_state(dc, context, false);
1634 
1635 	if (res != DC_OK) {
1636 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1637 		goto fail;
1638 	}
1639 
1640 	res = dc_commit_state(dc, context);
1641 
1642 fail:
1643 	dc_release_state(context);
1644 
1645 context_alloc_fail:
1646 	return res;
1647 }
1648 
1649 static int dm_suspend(void *handle)
1650 {
1651 	struct amdgpu_device *adev = handle;
1652 	struct amdgpu_display_manager *dm = &adev->dm;
1653 	int ret = 0;
1654 
1655 	if (adev->in_gpu_reset) {
1656 		mutex_lock(&dm->dc_lock);
1657 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1658 
1659 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1660 
1661 		amdgpu_dm_commit_zero_streams(dm->dc);
1662 
1663 		amdgpu_dm_irq_suspend(adev);
1664 
1665 		return ret;
1666 	}
1667 
1668 	WARN_ON(adev->dm.cached_state);
1669 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1670 
1671 	s3_handle_mst(adev->ddev, true);
1672 
1673 	amdgpu_dm_irq_suspend(adev);
1674 
1675 
1676 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1677 
1678 	return 0;
1679 }
1680 
1681 static struct amdgpu_dm_connector *
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1683 					     struct drm_crtc *crtc)
1684 {
1685 	uint32_t i;
1686 	struct drm_connector_state *new_con_state;
1687 	struct drm_connector *connector;
1688 	struct drm_crtc *crtc_from_state;
1689 
1690 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1691 		crtc_from_state = new_con_state->crtc;
1692 
1693 		if (crtc_from_state == crtc)
1694 			return to_amdgpu_dm_connector(connector);
1695 	}
1696 
1697 	return NULL;
1698 }
1699 
1700 static void emulated_link_detect(struct dc_link *link)
1701 {
1702 	struct dc_sink_init_data sink_init_data = { 0 };
1703 	struct display_sink_capability sink_caps = { 0 };
1704 	enum dc_edid_status edid_status;
1705 	struct dc_context *dc_ctx = link->ctx;
1706 	struct dc_sink *sink = NULL;
1707 	struct dc_sink *prev_sink = NULL;
1708 
1709 	link->type = dc_connection_none;
1710 	prev_sink = link->local_sink;
1711 
1712 	if (prev_sink != NULL)
1713 		dc_sink_retain(prev_sink);
1714 
1715 	switch (link->connector_signal) {
1716 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1717 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1719 		break;
1720 	}
1721 
1722 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1723 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1725 		break;
1726 	}
1727 
1728 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1729 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1731 		break;
1732 	}
1733 
1734 	case SIGNAL_TYPE_LVDS: {
1735 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1737 		break;
1738 	}
1739 
1740 	case SIGNAL_TYPE_EDP: {
1741 		sink_caps.transaction_type =
1742 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1743 		sink_caps.signal = SIGNAL_TYPE_EDP;
1744 		break;
1745 	}
1746 
1747 	case SIGNAL_TYPE_DISPLAY_PORT: {
1748 		sink_caps.transaction_type =
1749 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1750 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1751 		break;
1752 	}
1753 
1754 	default:
1755 		DC_ERROR("Invalid connector type! signal:%d\n",
1756 			link->connector_signal);
1757 		return;
1758 	}
1759 
1760 	sink_init_data.link = link;
1761 	sink_init_data.sink_signal = sink_caps.signal;
1762 
1763 	sink = dc_sink_create(&sink_init_data);
1764 	if (!sink) {
1765 		DC_ERROR("Failed to create sink!\n");
1766 		return;
1767 	}
1768 
1769 	/* dc_sink_create returns a new reference */
1770 	link->local_sink = sink;
1771 
1772 	edid_status = dm_helpers_read_local_edid(
1773 			link->ctx,
1774 			link,
1775 			sink);
1776 
1777 	if (edid_status != EDID_OK)
1778 		DC_ERROR("Failed to read EDID");
1779 
1780 }
1781 
1782 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1783 				     struct amdgpu_display_manager *dm)
1784 {
1785 	struct {
1786 		struct dc_surface_update surface_updates[MAX_SURFACES];
1787 		struct dc_plane_info plane_infos[MAX_SURFACES];
1788 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1789 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1790 		struct dc_stream_update stream_update;
1791 	} * bundle;
1792 	int k, m;
1793 
1794 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1795 
1796 	if (!bundle) {
1797 		dm_error("Failed to allocate update bundle\n");
1798 		goto cleanup;
1799 	}
1800 
1801 	for (k = 0; k < dc_state->stream_count; k++) {
1802 		bundle->stream_update.stream = dc_state->streams[k];
1803 
1804 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1805 			bundle->surface_updates[m].surface =
1806 				dc_state->stream_status->plane_states[m];
1807 			bundle->surface_updates[m].surface->force_full_update =
1808 				true;
1809 		}
1810 		dc_commit_updates_for_stream(
1811 			dm->dc, bundle->surface_updates,
1812 			dc_state->stream_status->plane_count,
1813 			dc_state->streams[k], &bundle->stream_update, dc_state);
1814 	}
1815 
1816 cleanup:
1817 	kfree(bundle);
1818 
1819 	return;
1820 }
1821 
1822 static int dm_resume(void *handle)
1823 {
1824 	struct amdgpu_device *adev = handle;
1825 	struct drm_device *ddev = adev->ddev;
1826 	struct amdgpu_display_manager *dm = &adev->dm;
1827 	struct amdgpu_dm_connector *aconnector;
1828 	struct drm_connector *connector;
1829 	struct drm_connector_list_iter iter;
1830 	struct drm_crtc *crtc;
1831 	struct drm_crtc_state *new_crtc_state;
1832 	struct dm_crtc_state *dm_new_crtc_state;
1833 	struct drm_plane *plane;
1834 	struct drm_plane_state *new_plane_state;
1835 	struct dm_plane_state *dm_new_plane_state;
1836 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1837 	enum dc_connection_type new_connection_type = dc_connection_none;
1838 	struct dc_state *dc_state;
1839 	int i, r, j;
1840 
1841 	if (adev->in_gpu_reset) {
1842 		dc_state = dm->cached_dc_state;
1843 
1844 		r = dm_dmub_hw_init(adev);
1845 		if (r)
1846 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1847 
1848 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1849 		dc_resume(dm->dc);
1850 
1851 		amdgpu_dm_irq_resume_early(adev);
1852 
1853 		for (i = 0; i < dc_state->stream_count; i++) {
1854 			dc_state->streams[i]->mode_changed = true;
1855 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1856 				dc_state->stream_status->plane_states[j]->update_flags.raw
1857 					= 0xffffffff;
1858 			}
1859 		}
1860 
1861 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1862 
1863 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1864 
1865 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1866 
1867 		dc_release_state(dm->cached_dc_state);
1868 		dm->cached_dc_state = NULL;
1869 
1870 		amdgpu_dm_irq_resume_late(adev);
1871 
1872 		mutex_unlock(&dm->dc_lock);
1873 
1874 		return 0;
1875 	}
1876 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 	dc_release_state(dm_state->context);
1878 	dm_state->context = dc_create_state(dm->dc);
1879 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 	dc_resource_state_construct(dm->dc, dm_state->context);
1881 
1882 	/* Before powering on DC we need to re-initialize DMUB. */
1883 	r = dm_dmub_hw_init(adev);
1884 	if (r)
1885 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1886 
1887 	/* power on hardware */
1888 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1889 
1890 	/* program HPD filter */
1891 	dc_resume(dm->dc);
1892 
1893 	/*
1894 	 * early enable HPD Rx IRQ, should be done before set mode as short
1895 	 * pulse interrupts are used for MST
1896 	 */
1897 	amdgpu_dm_irq_resume_early(adev);
1898 
1899 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 	s3_handle_mst(ddev, false);
1901 
1902 	/* Do detection*/
1903 	drm_connector_list_iter_begin(ddev, &iter);
1904 	drm_for_each_connector_iter(connector, &iter) {
1905 		aconnector = to_amdgpu_dm_connector(connector);
1906 
1907 		/*
1908 		 * this is the case when traversing through already created
1909 		 * MST connectors, should be skipped
1910 		 */
1911 		if (aconnector->mst_port)
1912 			continue;
1913 
1914 		mutex_lock(&aconnector->hpd_lock);
1915 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1916 			DRM_ERROR("KMS: Failed to detect connector\n");
1917 
1918 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1919 			emulated_link_detect(aconnector->dc_link);
1920 		else
1921 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1922 
1923 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1924 			aconnector->fake_enable = false;
1925 
1926 		if (aconnector->dc_sink)
1927 			dc_sink_release(aconnector->dc_sink);
1928 		aconnector->dc_sink = NULL;
1929 		amdgpu_dm_update_connector_after_detect(aconnector);
1930 		mutex_unlock(&aconnector->hpd_lock);
1931 	}
1932 	drm_connector_list_iter_end(&iter);
1933 
1934 	/* Force mode set in atomic commit */
1935 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1936 		new_crtc_state->active_changed = true;
1937 
1938 	/*
1939 	 * atomic_check is expected to create the dc states. We need to release
1940 	 * them here, since they were duplicated as part of the suspend
1941 	 * procedure.
1942 	 */
1943 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1944 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1945 		if (dm_new_crtc_state->stream) {
1946 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1947 			dc_stream_release(dm_new_crtc_state->stream);
1948 			dm_new_crtc_state->stream = NULL;
1949 		}
1950 	}
1951 
1952 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1953 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1954 		if (dm_new_plane_state->dc_state) {
1955 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1956 			dc_plane_state_release(dm_new_plane_state->dc_state);
1957 			dm_new_plane_state->dc_state = NULL;
1958 		}
1959 	}
1960 
1961 	drm_atomic_helper_resume(ddev, dm->cached_state);
1962 
1963 	dm->cached_state = NULL;
1964 
1965 	amdgpu_dm_irq_resume_late(adev);
1966 
1967 	amdgpu_dm_smu_write_watermarks_table(adev);
1968 
1969 	return 0;
1970 }
1971 
1972 /**
1973  * DOC: DM Lifecycle
1974  *
1975  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977  * the base driver's device list to be initialized and torn down accordingly.
1978  *
1979  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1980  */
1981 
1982 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1983 	.name = "dm",
1984 	.early_init = dm_early_init,
1985 	.late_init = dm_late_init,
1986 	.sw_init = dm_sw_init,
1987 	.sw_fini = dm_sw_fini,
1988 	.hw_init = dm_hw_init,
1989 	.hw_fini = dm_hw_fini,
1990 	.suspend = dm_suspend,
1991 	.resume = dm_resume,
1992 	.is_idle = dm_is_idle,
1993 	.wait_for_idle = dm_wait_for_idle,
1994 	.check_soft_reset = dm_check_soft_reset,
1995 	.soft_reset = dm_soft_reset,
1996 	.set_clockgating_state = dm_set_clockgating_state,
1997 	.set_powergating_state = dm_set_powergating_state,
1998 };
1999 
2000 const struct amdgpu_ip_block_version dm_ip_block =
2001 {
2002 	.type = AMD_IP_BLOCK_TYPE_DCE,
2003 	.major = 1,
2004 	.minor = 0,
2005 	.rev = 0,
2006 	.funcs = &amdgpu_dm_funcs,
2007 };
2008 
2009 
2010 /**
2011  * DOC: atomic
2012  *
2013  * *WIP*
2014  */
2015 
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2017 	.fb_create = amdgpu_display_user_framebuffer_create,
2018 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2019 	.atomic_check = amdgpu_dm_atomic_check,
2020 	.atomic_commit = amdgpu_dm_atomic_commit,
2021 };
2022 
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2024 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2025 };
2026 
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2028 {
2029 	u32 max_cll, min_cll, max, min, q, r;
2030 	struct amdgpu_dm_backlight_caps *caps;
2031 	struct amdgpu_display_manager *dm;
2032 	struct drm_connector *conn_base;
2033 	struct amdgpu_device *adev;
2034 	struct dc_link *link = NULL;
2035 	static const u8 pre_computed_values[] = {
2036 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2038 
2039 	if (!aconnector || !aconnector->dc_link)
2040 		return;
2041 
2042 	link = aconnector->dc_link;
2043 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2044 		return;
2045 
2046 	conn_base = &aconnector->base;
2047 	adev = conn_base->dev->dev_private;
2048 	dm = &adev->dm;
2049 	caps = &dm->backlight_caps;
2050 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2051 	caps->aux_support = false;
2052 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2053 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2054 
2055 	if (caps->ext_caps->bits.oled == 1 ||
2056 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2057 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2058 		caps->aux_support = true;
2059 
2060 	/* From the specification (CTA-861-G), for calculating the maximum
2061 	 * luminance we need to use:
2062 	 *	Luminance = 50*2**(CV/32)
2063 	 * Where CV is a one-byte value.
2064 	 * For calculating this expression we may need float point precision;
2065 	 * to avoid this complexity level, we take advantage that CV is divided
2066 	 * by a constant. From the Euclids division algorithm, we know that CV
2067 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 	 * need to pre-compute the value of r/32. For pre-computing the values
2070 	 * We just used the following Ruby line:
2071 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 	 * The results of the above expressions can be verified at
2073 	 * pre_computed_values.
2074 	 */
2075 	q = max_cll >> 5;
2076 	r = max_cll % 32;
2077 	max = (1 << q) * pre_computed_values[r];
2078 
2079 	// min luminance: maxLum * (CV/255)^2 / 100
2080 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2081 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2082 
2083 	caps->aux_max_input_signal = max;
2084 	caps->aux_min_input_signal = min;
2085 }
2086 
2087 void amdgpu_dm_update_connector_after_detect(
2088 		struct amdgpu_dm_connector *aconnector)
2089 {
2090 	struct drm_connector *connector = &aconnector->base;
2091 	struct drm_device *dev = connector->dev;
2092 	struct dc_sink *sink;
2093 
2094 	/* MST handled by drm_mst framework */
2095 	if (aconnector->mst_mgr.mst_state == true)
2096 		return;
2097 
2098 
2099 	sink = aconnector->dc_link->local_sink;
2100 	if (sink)
2101 		dc_sink_retain(sink);
2102 
2103 	/*
2104 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 	 * the connector sink is set to either fake or physical sink depends on link status.
2106 	 * Skip if already done during boot.
2107 	 */
2108 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2109 			&& aconnector->dc_em_sink) {
2110 
2111 		/*
2112 		 * For S3 resume with headless use eml_sink to fake stream
2113 		 * because on resume connector->sink is set to NULL
2114 		 */
2115 		mutex_lock(&dev->mode_config.mutex);
2116 
2117 		if (sink) {
2118 			if (aconnector->dc_sink) {
2119 				amdgpu_dm_update_freesync_caps(connector, NULL);
2120 				/*
2121 				 * retain and release below are used to
2122 				 * bump up refcount for sink because the link doesn't point
2123 				 * to it anymore after disconnect, so on next crtc to connector
2124 				 * reshuffle by UMD we will get into unwanted dc_sink release
2125 				 */
2126 				dc_sink_release(aconnector->dc_sink);
2127 			}
2128 			aconnector->dc_sink = sink;
2129 			dc_sink_retain(aconnector->dc_sink);
2130 			amdgpu_dm_update_freesync_caps(connector,
2131 					aconnector->edid);
2132 		} else {
2133 			amdgpu_dm_update_freesync_caps(connector, NULL);
2134 			if (!aconnector->dc_sink) {
2135 				aconnector->dc_sink = aconnector->dc_em_sink;
2136 				dc_sink_retain(aconnector->dc_sink);
2137 			}
2138 		}
2139 
2140 		mutex_unlock(&dev->mode_config.mutex);
2141 
2142 		if (sink)
2143 			dc_sink_release(sink);
2144 		return;
2145 	}
2146 
2147 	/*
2148 	 * TODO: temporary guard to look for proper fix
2149 	 * if this sink is MST sink, we should not do anything
2150 	 */
2151 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2152 		dc_sink_release(sink);
2153 		return;
2154 	}
2155 
2156 	if (aconnector->dc_sink == sink) {
2157 		/*
2158 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2159 		 * Do nothing!!
2160 		 */
2161 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 				aconnector->connector_id);
2163 		if (sink)
2164 			dc_sink_release(sink);
2165 		return;
2166 	}
2167 
2168 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 		aconnector->connector_id, aconnector->dc_sink, sink);
2170 
2171 	mutex_lock(&dev->mode_config.mutex);
2172 
2173 	/*
2174 	 * 1. Update status of the drm connector
2175 	 * 2. Send an event and let userspace tell us what to do
2176 	 */
2177 	if (sink) {
2178 		/*
2179 		 * TODO: check if we still need the S3 mode update workaround.
2180 		 * If yes, put it here.
2181 		 */
2182 		if (aconnector->dc_sink)
2183 			amdgpu_dm_update_freesync_caps(connector, NULL);
2184 
2185 		aconnector->dc_sink = sink;
2186 		dc_sink_retain(aconnector->dc_sink);
2187 		if (sink->dc_edid.length == 0) {
2188 			aconnector->edid = NULL;
2189 			if (aconnector->dc_link->aux_mode) {
2190 				drm_dp_cec_unset_edid(
2191 					&aconnector->dm_dp_aux.aux);
2192 			}
2193 		} else {
2194 			aconnector->edid =
2195 				(struct edid *)sink->dc_edid.raw_edid;
2196 
2197 			drm_connector_update_edid_property(connector,
2198 							   aconnector->edid);
2199 			drm_add_edid_modes(connector, aconnector->edid);
2200 
2201 			if (aconnector->dc_link->aux_mode)
2202 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2203 						    aconnector->edid);
2204 		}
2205 
2206 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2207 		update_connector_ext_caps(aconnector);
2208 	} else {
2209 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2210 		amdgpu_dm_update_freesync_caps(connector, NULL);
2211 		drm_connector_update_edid_property(connector, NULL);
2212 		aconnector->num_modes = 0;
2213 		dc_sink_release(aconnector->dc_sink);
2214 		aconnector->dc_sink = NULL;
2215 		aconnector->edid = NULL;
2216 #ifdef CONFIG_DRM_AMD_DC_HDCP
2217 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2218 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2219 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2220 #endif
2221 	}
2222 
2223 	mutex_unlock(&dev->mode_config.mutex);
2224 
2225 	if (sink)
2226 		dc_sink_release(sink);
2227 }
2228 
2229 static void handle_hpd_irq(void *param)
2230 {
2231 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2232 	struct drm_connector *connector = &aconnector->base;
2233 	struct drm_device *dev = connector->dev;
2234 	enum dc_connection_type new_connection_type = dc_connection_none;
2235 #ifdef CONFIG_DRM_AMD_DC_HDCP
2236 	struct amdgpu_device *adev = dev->dev_private;
2237 #endif
2238 
2239 	/*
2240 	 * In case of failure or MST no need to update connector status or notify the OS
2241 	 * since (for MST case) MST does this in its own context.
2242 	 */
2243 	mutex_lock(&aconnector->hpd_lock);
2244 
2245 #ifdef CONFIG_DRM_AMD_DC_HDCP
2246 	if (adev->dm.hdcp_workqueue)
2247 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2248 #endif
2249 	if (aconnector->fake_enable)
2250 		aconnector->fake_enable = false;
2251 
2252 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2253 		DRM_ERROR("KMS: Failed to detect connector\n");
2254 
2255 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2256 		emulated_link_detect(aconnector->dc_link);
2257 
2258 
2259 		drm_modeset_lock_all(dev);
2260 		dm_restore_drm_connector_state(dev, connector);
2261 		drm_modeset_unlock_all(dev);
2262 
2263 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2264 			drm_kms_helper_hotplug_event(dev);
2265 
2266 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2267 		amdgpu_dm_update_connector_after_detect(aconnector);
2268 
2269 
2270 		drm_modeset_lock_all(dev);
2271 		dm_restore_drm_connector_state(dev, connector);
2272 		drm_modeset_unlock_all(dev);
2273 
2274 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2275 			drm_kms_helper_hotplug_event(dev);
2276 	}
2277 	mutex_unlock(&aconnector->hpd_lock);
2278 
2279 }
2280 
2281 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2282 {
2283 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2284 	uint8_t dret;
2285 	bool new_irq_handled = false;
2286 	int dpcd_addr;
2287 	int dpcd_bytes_to_read;
2288 
2289 	const int max_process_count = 30;
2290 	int process_count = 0;
2291 
2292 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2293 
2294 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2295 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2296 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2297 		dpcd_addr = DP_SINK_COUNT;
2298 	} else {
2299 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2300 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2301 		dpcd_addr = DP_SINK_COUNT_ESI;
2302 	}
2303 
2304 	dret = drm_dp_dpcd_read(
2305 		&aconnector->dm_dp_aux.aux,
2306 		dpcd_addr,
2307 		esi,
2308 		dpcd_bytes_to_read);
2309 
2310 	while (dret == dpcd_bytes_to_read &&
2311 		process_count < max_process_count) {
2312 		uint8_t retry;
2313 		dret = 0;
2314 
2315 		process_count++;
2316 
2317 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2318 		/* handle HPD short pulse irq */
2319 		if (aconnector->mst_mgr.mst_state)
2320 			drm_dp_mst_hpd_irq(
2321 				&aconnector->mst_mgr,
2322 				esi,
2323 				&new_irq_handled);
2324 
2325 		if (new_irq_handled) {
2326 			/* ACK at DPCD to notify down stream */
2327 			const int ack_dpcd_bytes_to_write =
2328 				dpcd_bytes_to_read - 1;
2329 
2330 			for (retry = 0; retry < 3; retry++) {
2331 				uint8_t wret;
2332 
2333 				wret = drm_dp_dpcd_write(
2334 					&aconnector->dm_dp_aux.aux,
2335 					dpcd_addr + 1,
2336 					&esi[1],
2337 					ack_dpcd_bytes_to_write);
2338 				if (wret == ack_dpcd_bytes_to_write)
2339 					break;
2340 			}
2341 
2342 			/* check if there is new irq to be handled */
2343 			dret = drm_dp_dpcd_read(
2344 				&aconnector->dm_dp_aux.aux,
2345 				dpcd_addr,
2346 				esi,
2347 				dpcd_bytes_to_read);
2348 
2349 			new_irq_handled = false;
2350 		} else {
2351 			break;
2352 		}
2353 	}
2354 
2355 	if (process_count == max_process_count)
2356 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2357 }
2358 
2359 static void handle_hpd_rx_irq(void *param)
2360 {
2361 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2362 	struct drm_connector *connector = &aconnector->base;
2363 	struct drm_device *dev = connector->dev;
2364 	struct dc_link *dc_link = aconnector->dc_link;
2365 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2366 	enum dc_connection_type new_connection_type = dc_connection_none;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368 	union hpd_irq_data hpd_irq_data;
2369 	struct amdgpu_device *adev = dev->dev_private;
2370 
2371 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2372 #endif
2373 
2374 	/*
2375 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2376 	 * conflict, after implement i2c helper, this mutex should be
2377 	 * retired.
2378 	 */
2379 	if (dc_link->type != dc_connection_mst_branch)
2380 		mutex_lock(&aconnector->hpd_lock);
2381 
2382 
2383 #ifdef CONFIG_DRM_AMD_DC_HDCP
2384 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2385 #else
2386 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2387 #endif
2388 			!is_mst_root_connector) {
2389 		/* Downstream Port status changed. */
2390 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2391 			DRM_ERROR("KMS: Failed to detect connector\n");
2392 
2393 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2394 			emulated_link_detect(dc_link);
2395 
2396 			if (aconnector->fake_enable)
2397 				aconnector->fake_enable = false;
2398 
2399 			amdgpu_dm_update_connector_after_detect(aconnector);
2400 
2401 
2402 			drm_modeset_lock_all(dev);
2403 			dm_restore_drm_connector_state(dev, connector);
2404 			drm_modeset_unlock_all(dev);
2405 
2406 			drm_kms_helper_hotplug_event(dev);
2407 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2408 
2409 			if (aconnector->fake_enable)
2410 				aconnector->fake_enable = false;
2411 
2412 			amdgpu_dm_update_connector_after_detect(aconnector);
2413 
2414 
2415 			drm_modeset_lock_all(dev);
2416 			dm_restore_drm_connector_state(dev, connector);
2417 			drm_modeset_unlock_all(dev);
2418 
2419 			drm_kms_helper_hotplug_event(dev);
2420 		}
2421 	}
2422 #ifdef CONFIG_DRM_AMD_DC_HDCP
2423 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2424 		if (adev->dm.hdcp_workqueue)
2425 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2426 	}
2427 #endif
2428 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2429 	    (dc_link->type == dc_connection_mst_branch))
2430 		dm_handle_hpd_rx_irq(aconnector);
2431 
2432 	if (dc_link->type != dc_connection_mst_branch) {
2433 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2434 		mutex_unlock(&aconnector->hpd_lock);
2435 	}
2436 }
2437 
2438 static void register_hpd_handlers(struct amdgpu_device *adev)
2439 {
2440 	struct drm_device *dev = adev->ddev;
2441 	struct drm_connector *connector;
2442 	struct amdgpu_dm_connector *aconnector;
2443 	const struct dc_link *dc_link;
2444 	struct dc_interrupt_params int_params = {0};
2445 
2446 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2447 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2448 
2449 	list_for_each_entry(connector,
2450 			&dev->mode_config.connector_list, head)	{
2451 
2452 		aconnector = to_amdgpu_dm_connector(connector);
2453 		dc_link = aconnector->dc_link;
2454 
2455 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2456 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2457 			int_params.irq_source = dc_link->irq_source_hpd;
2458 
2459 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2460 					handle_hpd_irq,
2461 					(void *) aconnector);
2462 		}
2463 
2464 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2465 
2466 			/* Also register for DP short pulse (hpd_rx). */
2467 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2468 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2469 
2470 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2471 					handle_hpd_rx_irq,
2472 					(void *) aconnector);
2473 		}
2474 	}
2475 }
2476 
2477 /* Register IRQ sources and initialize IRQ callbacks */
2478 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2479 {
2480 	struct dc *dc = adev->dm.dc;
2481 	struct common_irq_params *c_irq_params;
2482 	struct dc_interrupt_params int_params = {0};
2483 	int r;
2484 	int i;
2485 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2486 
2487 	if (adev->asic_type >= CHIP_VEGA10)
2488 		client_id = SOC15_IH_CLIENTID_DCE;
2489 
2490 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2491 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2492 
2493 	/*
2494 	 * Actions of amdgpu_irq_add_id():
2495 	 * 1. Register a set() function with base driver.
2496 	 *    Base driver will call set() function to enable/disable an
2497 	 *    interrupt in DC hardware.
2498 	 * 2. Register amdgpu_dm_irq_handler().
2499 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2500 	 *    coming from DC hardware.
2501 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2502 	 *    for acknowledging and handling. */
2503 
2504 	/* Use VBLANK interrupt */
2505 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2506 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2507 		if (r) {
2508 			DRM_ERROR("Failed to add crtc irq id!\n");
2509 			return r;
2510 		}
2511 
2512 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2513 		int_params.irq_source =
2514 			dc_interrupt_to_irq_source(dc, i, 0);
2515 
2516 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2517 
2518 		c_irq_params->adev = adev;
2519 		c_irq_params->irq_src = int_params.irq_source;
2520 
2521 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2522 				dm_crtc_high_irq, c_irq_params);
2523 	}
2524 
2525 	/* Use VUPDATE interrupt */
2526 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2527 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2528 		if (r) {
2529 			DRM_ERROR("Failed to add vupdate irq id!\n");
2530 			return r;
2531 		}
2532 
2533 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2534 		int_params.irq_source =
2535 			dc_interrupt_to_irq_source(dc, i, 0);
2536 
2537 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2538 
2539 		c_irq_params->adev = adev;
2540 		c_irq_params->irq_src = int_params.irq_source;
2541 
2542 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2543 				dm_vupdate_high_irq, c_irq_params);
2544 	}
2545 
2546 	/* Use GRPH_PFLIP interrupt */
2547 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2548 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2549 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2550 		if (r) {
2551 			DRM_ERROR("Failed to add page flip irq id!\n");
2552 			return r;
2553 		}
2554 
2555 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2556 		int_params.irq_source =
2557 			dc_interrupt_to_irq_source(dc, i, 0);
2558 
2559 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2560 
2561 		c_irq_params->adev = adev;
2562 		c_irq_params->irq_src = int_params.irq_source;
2563 
2564 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2565 				dm_pflip_high_irq, c_irq_params);
2566 
2567 	}
2568 
2569 	/* HPD */
2570 	r = amdgpu_irq_add_id(adev, client_id,
2571 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2572 	if (r) {
2573 		DRM_ERROR("Failed to add hpd irq id!\n");
2574 		return r;
2575 	}
2576 
2577 	register_hpd_handlers(adev);
2578 
2579 	return 0;
2580 }
2581 
2582 #if defined(CONFIG_DRM_AMD_DC_DCN)
2583 /* Register IRQ sources and initialize IRQ callbacks */
2584 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2585 {
2586 	struct dc *dc = adev->dm.dc;
2587 	struct common_irq_params *c_irq_params;
2588 	struct dc_interrupt_params int_params = {0};
2589 	int r;
2590 	int i;
2591 
2592 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2593 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2594 
2595 	/*
2596 	 * Actions of amdgpu_irq_add_id():
2597 	 * 1. Register a set() function with base driver.
2598 	 *    Base driver will call set() function to enable/disable an
2599 	 *    interrupt in DC hardware.
2600 	 * 2. Register amdgpu_dm_irq_handler().
2601 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2602 	 *    coming from DC hardware.
2603 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2604 	 *    for acknowledging and handling.
2605 	 */
2606 
2607 	/* Use VSTARTUP interrupt */
2608 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2609 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2610 			i++) {
2611 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2612 
2613 		if (r) {
2614 			DRM_ERROR("Failed to add crtc irq id!\n");
2615 			return r;
2616 		}
2617 
2618 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2619 		int_params.irq_source =
2620 			dc_interrupt_to_irq_source(dc, i, 0);
2621 
2622 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2623 
2624 		c_irq_params->adev = adev;
2625 		c_irq_params->irq_src = int_params.irq_source;
2626 
2627 		amdgpu_dm_irq_register_interrupt(
2628 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2629 	}
2630 
2631 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2632 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2633 	 * to trigger at end of each vblank, regardless of state of the lock,
2634 	 * matching DCE behaviour.
2635 	 */
2636 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2637 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2638 	     i++) {
2639 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2640 
2641 		if (r) {
2642 			DRM_ERROR("Failed to add vupdate irq id!\n");
2643 			return r;
2644 		}
2645 
2646 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2647 		int_params.irq_source =
2648 			dc_interrupt_to_irq_source(dc, i, 0);
2649 
2650 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2651 
2652 		c_irq_params->adev = adev;
2653 		c_irq_params->irq_src = int_params.irq_source;
2654 
2655 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2656 				dm_vupdate_high_irq, c_irq_params);
2657 	}
2658 
2659 	/* Use GRPH_PFLIP interrupt */
2660 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2661 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2662 			i++) {
2663 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2664 		if (r) {
2665 			DRM_ERROR("Failed to add page flip irq id!\n");
2666 			return r;
2667 		}
2668 
2669 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2670 		int_params.irq_source =
2671 			dc_interrupt_to_irq_source(dc, i, 0);
2672 
2673 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2674 
2675 		c_irq_params->adev = adev;
2676 		c_irq_params->irq_src = int_params.irq_source;
2677 
2678 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2679 				dm_pflip_high_irq, c_irq_params);
2680 
2681 	}
2682 
2683 	/* HPD */
2684 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2685 			&adev->hpd_irq);
2686 	if (r) {
2687 		DRM_ERROR("Failed to add hpd irq id!\n");
2688 		return r;
2689 	}
2690 
2691 	register_hpd_handlers(adev);
2692 
2693 	return 0;
2694 }
2695 #endif
2696 
2697 /*
2698  * Acquires the lock for the atomic state object and returns
2699  * the new atomic state.
2700  *
2701  * This should only be called during atomic check.
2702  */
2703 static int dm_atomic_get_state(struct drm_atomic_state *state,
2704 			       struct dm_atomic_state **dm_state)
2705 {
2706 	struct drm_device *dev = state->dev;
2707 	struct amdgpu_device *adev = dev->dev_private;
2708 	struct amdgpu_display_manager *dm = &adev->dm;
2709 	struct drm_private_state *priv_state;
2710 
2711 	if (*dm_state)
2712 		return 0;
2713 
2714 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2715 	if (IS_ERR(priv_state))
2716 		return PTR_ERR(priv_state);
2717 
2718 	*dm_state = to_dm_atomic_state(priv_state);
2719 
2720 	return 0;
2721 }
2722 
2723 static struct dm_atomic_state *
2724 dm_atomic_get_new_state(struct drm_atomic_state *state)
2725 {
2726 	struct drm_device *dev = state->dev;
2727 	struct amdgpu_device *adev = dev->dev_private;
2728 	struct amdgpu_display_manager *dm = &adev->dm;
2729 	struct drm_private_obj *obj;
2730 	struct drm_private_state *new_obj_state;
2731 	int i;
2732 
2733 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2734 		if (obj->funcs == dm->atomic_obj.funcs)
2735 			return to_dm_atomic_state(new_obj_state);
2736 	}
2737 
2738 	return NULL;
2739 }
2740 
2741 static struct dm_atomic_state *
2742 dm_atomic_get_old_state(struct drm_atomic_state *state)
2743 {
2744 	struct drm_device *dev = state->dev;
2745 	struct amdgpu_device *adev = dev->dev_private;
2746 	struct amdgpu_display_manager *dm = &adev->dm;
2747 	struct drm_private_obj *obj;
2748 	struct drm_private_state *old_obj_state;
2749 	int i;
2750 
2751 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2752 		if (obj->funcs == dm->atomic_obj.funcs)
2753 			return to_dm_atomic_state(old_obj_state);
2754 	}
2755 
2756 	return NULL;
2757 }
2758 
2759 static struct drm_private_state *
2760 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2761 {
2762 	struct dm_atomic_state *old_state, *new_state;
2763 
2764 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2765 	if (!new_state)
2766 		return NULL;
2767 
2768 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2769 
2770 	old_state = to_dm_atomic_state(obj->state);
2771 
2772 	if (old_state && old_state->context)
2773 		new_state->context = dc_copy_state(old_state->context);
2774 
2775 	if (!new_state->context) {
2776 		kfree(new_state);
2777 		return NULL;
2778 	}
2779 
2780 	return &new_state->base;
2781 }
2782 
2783 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2784 				    struct drm_private_state *state)
2785 {
2786 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2787 
2788 	if (dm_state && dm_state->context)
2789 		dc_release_state(dm_state->context);
2790 
2791 	kfree(dm_state);
2792 }
2793 
2794 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2795 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2796 	.atomic_destroy_state = dm_atomic_destroy_state,
2797 };
2798 
2799 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2800 {
2801 	struct dm_atomic_state *state;
2802 	int r;
2803 
2804 	adev->mode_info.mode_config_initialized = true;
2805 
2806 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2807 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2808 
2809 	adev->ddev->mode_config.max_width = 16384;
2810 	adev->ddev->mode_config.max_height = 16384;
2811 
2812 	adev->ddev->mode_config.preferred_depth = 24;
2813 	adev->ddev->mode_config.prefer_shadow = 1;
2814 	/* indicates support for immediate flip */
2815 	adev->ddev->mode_config.async_page_flip = true;
2816 
2817 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2818 
2819 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2820 	if (!state)
2821 		return -ENOMEM;
2822 
2823 	state->context = dc_create_state(adev->dm.dc);
2824 	if (!state->context) {
2825 		kfree(state);
2826 		return -ENOMEM;
2827 	}
2828 
2829 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2830 
2831 	drm_atomic_private_obj_init(adev->ddev,
2832 				    &adev->dm.atomic_obj,
2833 				    &state->base,
2834 				    &dm_atomic_state_funcs);
2835 
2836 	r = amdgpu_display_modeset_create_props(adev);
2837 	if (r) {
2838 		dc_release_state(state->context);
2839 		kfree(state);
2840 		return r;
2841 	}
2842 
2843 	r = amdgpu_dm_audio_init(adev);
2844 	if (r) {
2845 		dc_release_state(state->context);
2846 		kfree(state);
2847 		return r;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2854 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2855 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2856 
2857 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2858 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2859 
2860 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2861 {
2862 #if defined(CONFIG_ACPI)
2863 	struct amdgpu_dm_backlight_caps caps;
2864 
2865 	memset(&caps, 0, sizeof(caps));
2866 
2867 	if (dm->backlight_caps.caps_valid)
2868 		return;
2869 
2870 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2871 	if (caps.caps_valid) {
2872 		dm->backlight_caps.caps_valid = true;
2873 		if (caps.aux_support)
2874 			return;
2875 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2876 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2877 	} else {
2878 		dm->backlight_caps.min_input_signal =
2879 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2880 		dm->backlight_caps.max_input_signal =
2881 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2882 	}
2883 #else
2884 	if (dm->backlight_caps.aux_support)
2885 		return;
2886 
2887 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2888 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2889 #endif
2890 }
2891 
2892 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2893 {
2894 	bool rc;
2895 
2896 	if (!link)
2897 		return 1;
2898 
2899 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2900 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2901 
2902 	return rc ? 0 : 1;
2903 }
2904 
2905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2906 				unsigned *min, unsigned *max)
2907 {
2908 	if (!caps)
2909 		return 0;
2910 
2911 	if (caps->aux_support) {
2912 		// Firmware limits are in nits, DC API wants millinits.
2913 		*max = 1000 * caps->aux_max_input_signal;
2914 		*min = 1000 * caps->aux_min_input_signal;
2915 	} else {
2916 		// Firmware limits are 8-bit, PWM control is 16-bit.
2917 		*max = 0x101 * caps->max_input_signal;
2918 		*min = 0x101 * caps->min_input_signal;
2919 	}
2920 	return 1;
2921 }
2922 
2923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2924 					uint32_t brightness)
2925 {
2926 	unsigned min, max;
2927 
2928 	if (!get_brightness_range(caps, &min, &max))
2929 		return brightness;
2930 
2931 	// Rescale 0..255 to min..max
2932 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
2933 				       AMDGPU_MAX_BL_LEVEL);
2934 }
2935 
2936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
2937 				      uint32_t brightness)
2938 {
2939 	unsigned min, max;
2940 
2941 	if (!get_brightness_range(caps, &min, &max))
2942 		return brightness;
2943 
2944 	if (brightness < min)
2945 		return 0;
2946 	// Rescale min..max to 0..255
2947 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
2948 				 max - min);
2949 }
2950 
2951 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2952 {
2953 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2954 	struct amdgpu_dm_backlight_caps caps;
2955 	struct dc_link *link = NULL;
2956 	u32 brightness;
2957 	bool rc;
2958 
2959 	amdgpu_dm_update_backlight_caps(dm);
2960 	caps = dm->backlight_caps;
2961 
2962 	link = (struct dc_link *)dm->backlight_link;
2963 
2964 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
2965 	// Change brightness based on AUX property
2966 	if (caps.aux_support)
2967 		return set_backlight_via_aux(link, brightness);
2968 
2969 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2970 
2971 	return rc ? 0 : 1;
2972 }
2973 
2974 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2975 {
2976 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2977 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2978 
2979 	if (ret == DC_ERROR_UNEXPECTED)
2980 		return bd->props.brightness;
2981 	return convert_brightness_to_user(&dm->backlight_caps, ret);
2982 }
2983 
2984 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2985 	.options = BL_CORE_SUSPENDRESUME,
2986 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2987 	.update_status	= amdgpu_dm_backlight_update_status,
2988 };
2989 
2990 static void
2991 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2992 {
2993 	char bl_name[16];
2994 	struct backlight_properties props = { 0 };
2995 
2996 	amdgpu_dm_update_backlight_caps(dm);
2997 
2998 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2999 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3000 	props.type = BACKLIGHT_RAW;
3001 
3002 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3003 			dm->adev->ddev->primary->index);
3004 
3005 	dm->backlight_dev = backlight_device_register(bl_name,
3006 			dm->adev->ddev->dev,
3007 			dm,
3008 			&amdgpu_dm_backlight_ops,
3009 			&props);
3010 
3011 	if (IS_ERR(dm->backlight_dev))
3012 		DRM_ERROR("DM: Backlight registration failed!\n");
3013 	else
3014 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3015 }
3016 
3017 #endif
3018 
3019 static int initialize_plane(struct amdgpu_display_manager *dm,
3020 			    struct amdgpu_mode_info *mode_info, int plane_id,
3021 			    enum drm_plane_type plane_type,
3022 			    const struct dc_plane_cap *plane_cap)
3023 {
3024 	struct drm_plane *plane;
3025 	unsigned long possible_crtcs;
3026 	int ret = 0;
3027 
3028 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3029 	if (!plane) {
3030 		DRM_ERROR("KMS: Failed to allocate plane\n");
3031 		return -ENOMEM;
3032 	}
3033 	plane->type = plane_type;
3034 
3035 	/*
3036 	 * HACK: IGT tests expect that the primary plane for a CRTC
3037 	 * can only have one possible CRTC. Only expose support for
3038 	 * any CRTC if they're not going to be used as a primary plane
3039 	 * for a CRTC - like overlay or underlay planes.
3040 	 */
3041 	possible_crtcs = 1 << plane_id;
3042 	if (plane_id >= dm->dc->caps.max_streams)
3043 		possible_crtcs = 0xff;
3044 
3045 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3046 
3047 	if (ret) {
3048 		DRM_ERROR("KMS: Failed to initialize plane\n");
3049 		kfree(plane);
3050 		return ret;
3051 	}
3052 
3053 	if (mode_info)
3054 		mode_info->planes[plane_id] = plane;
3055 
3056 	return ret;
3057 }
3058 
3059 
3060 static void register_backlight_device(struct amdgpu_display_manager *dm,
3061 				      struct dc_link *link)
3062 {
3063 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3064 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3065 
3066 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3067 	    link->type != dc_connection_none) {
3068 		/*
3069 		 * Event if registration failed, we should continue with
3070 		 * DM initialization because not having a backlight control
3071 		 * is better then a black screen.
3072 		 */
3073 		amdgpu_dm_register_backlight_device(dm);
3074 
3075 		if (dm->backlight_dev)
3076 			dm->backlight_link = link;
3077 	}
3078 #endif
3079 }
3080 
3081 
3082 /*
3083  * In this architecture, the association
3084  * connector -> encoder -> crtc
3085  * id not really requried. The crtc and connector will hold the
3086  * display_index as an abstraction to use with DAL component
3087  *
3088  * Returns 0 on success
3089  */
3090 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3091 {
3092 	struct amdgpu_display_manager *dm = &adev->dm;
3093 	int32_t i;
3094 	struct amdgpu_dm_connector *aconnector = NULL;
3095 	struct amdgpu_encoder *aencoder = NULL;
3096 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3097 	uint32_t link_cnt;
3098 	int32_t primary_planes;
3099 	enum dc_connection_type new_connection_type = dc_connection_none;
3100 	const struct dc_plane_cap *plane;
3101 
3102 	link_cnt = dm->dc->caps.max_links;
3103 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3104 		DRM_ERROR("DM: Failed to initialize mode config\n");
3105 		return -EINVAL;
3106 	}
3107 
3108 	/* There is one primary plane per CRTC */
3109 	primary_planes = dm->dc->caps.max_streams;
3110 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3111 
3112 	/*
3113 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3114 	 * Order is reversed to match iteration order in atomic check.
3115 	 */
3116 	for (i = (primary_planes - 1); i >= 0; i--) {
3117 		plane = &dm->dc->caps.planes[i];
3118 
3119 		if (initialize_plane(dm, mode_info, i,
3120 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3121 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3122 			goto fail;
3123 		}
3124 	}
3125 
3126 	/*
3127 	 * Initialize overlay planes, index starting after primary planes.
3128 	 * These planes have a higher DRM index than the primary planes since
3129 	 * they should be considered as having a higher z-order.
3130 	 * Order is reversed to match iteration order in atomic check.
3131 	 *
3132 	 * Only support DCN for now, and only expose one so we don't encourage
3133 	 * userspace to use up all the pipes.
3134 	 */
3135 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3136 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3137 
3138 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3139 			continue;
3140 
3141 		if (!plane->blends_with_above || !plane->blends_with_below)
3142 			continue;
3143 
3144 		if (!plane->pixel_format_support.argb8888)
3145 			continue;
3146 
3147 		if (initialize_plane(dm, NULL, primary_planes + i,
3148 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3149 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3150 			goto fail;
3151 		}
3152 
3153 		/* Only create one overlay plane. */
3154 		break;
3155 	}
3156 
3157 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3158 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3159 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3160 			goto fail;
3161 		}
3162 
3163 	dm->display_indexes_num = dm->dc->caps.max_streams;
3164 
3165 	/* loops over all connectors on the board */
3166 	for (i = 0; i < link_cnt; i++) {
3167 		struct dc_link *link = NULL;
3168 
3169 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3170 			DRM_ERROR(
3171 				"KMS: Cannot support more than %d display indexes\n",
3172 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3173 			continue;
3174 		}
3175 
3176 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3177 		if (!aconnector)
3178 			goto fail;
3179 
3180 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3181 		if (!aencoder)
3182 			goto fail;
3183 
3184 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3185 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3186 			goto fail;
3187 		}
3188 
3189 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3190 			DRM_ERROR("KMS: Failed to initialize connector\n");
3191 			goto fail;
3192 		}
3193 
3194 		link = dc_get_link_at_index(dm->dc, i);
3195 
3196 		if (!dc_link_detect_sink(link, &new_connection_type))
3197 			DRM_ERROR("KMS: Failed to detect connector\n");
3198 
3199 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3200 			emulated_link_detect(link);
3201 			amdgpu_dm_update_connector_after_detect(aconnector);
3202 
3203 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3204 			amdgpu_dm_update_connector_after_detect(aconnector);
3205 			register_backlight_device(dm, link);
3206 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3207 				amdgpu_dm_set_psr_caps(link);
3208 		}
3209 
3210 
3211 	}
3212 
3213 	/* Software is initialized. Now we can register interrupt handlers. */
3214 	switch (adev->asic_type) {
3215 	case CHIP_BONAIRE:
3216 	case CHIP_HAWAII:
3217 	case CHIP_KAVERI:
3218 	case CHIP_KABINI:
3219 	case CHIP_MULLINS:
3220 	case CHIP_TONGA:
3221 	case CHIP_FIJI:
3222 	case CHIP_CARRIZO:
3223 	case CHIP_STONEY:
3224 	case CHIP_POLARIS11:
3225 	case CHIP_POLARIS10:
3226 	case CHIP_POLARIS12:
3227 	case CHIP_VEGAM:
3228 	case CHIP_VEGA10:
3229 	case CHIP_VEGA12:
3230 	case CHIP_VEGA20:
3231 		if (dce110_register_irq_handlers(dm->adev)) {
3232 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3233 			goto fail;
3234 		}
3235 		break;
3236 #if defined(CONFIG_DRM_AMD_DC_DCN)
3237 	case CHIP_RAVEN:
3238 	case CHIP_NAVI12:
3239 	case CHIP_NAVI10:
3240 	case CHIP_NAVI14:
3241 	case CHIP_RENOIR:
3242 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3243 	case CHIP_SIENNA_CICHLID:
3244 	case CHIP_NAVY_FLOUNDER:
3245 #endif
3246 		if (dcn10_register_irq_handlers(dm->adev)) {
3247 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3248 			goto fail;
3249 		}
3250 		break;
3251 #endif
3252 	default:
3253 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3254 		goto fail;
3255 	}
3256 
3257 	/* No userspace support. */
3258 	dm->dc->debug.disable_tri_buf = true;
3259 
3260 	return 0;
3261 fail:
3262 	kfree(aencoder);
3263 	kfree(aconnector);
3264 
3265 	return -EINVAL;
3266 }
3267 
3268 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3269 {
3270 	drm_mode_config_cleanup(dm->ddev);
3271 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3272 	return;
3273 }
3274 
3275 /******************************************************************************
3276  * amdgpu_display_funcs functions
3277  *****************************************************************************/
3278 
3279 /*
3280  * dm_bandwidth_update - program display watermarks
3281  *
3282  * @adev: amdgpu_device pointer
3283  *
3284  * Calculate and program the display watermarks and line buffer allocation.
3285  */
3286 static void dm_bandwidth_update(struct amdgpu_device *adev)
3287 {
3288 	/* TODO: implement later */
3289 }
3290 
3291 static const struct amdgpu_display_funcs dm_display_funcs = {
3292 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3293 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3294 	.backlight_set_level = NULL, /* never called for DC */
3295 	.backlight_get_level = NULL, /* never called for DC */
3296 	.hpd_sense = NULL,/* called unconditionally */
3297 	.hpd_set_polarity = NULL, /* called unconditionally */
3298 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3299 	.page_flip_get_scanoutpos =
3300 		dm_crtc_get_scanoutpos,/* called unconditionally */
3301 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3302 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3303 };
3304 
3305 #if defined(CONFIG_DEBUG_KERNEL_DC)
3306 
3307 static ssize_t s3_debug_store(struct device *device,
3308 			      struct device_attribute *attr,
3309 			      const char *buf,
3310 			      size_t count)
3311 {
3312 	int ret;
3313 	int s3_state;
3314 	struct drm_device *drm_dev = dev_get_drvdata(device);
3315 	struct amdgpu_device *adev = drm_dev->dev_private;
3316 
3317 	ret = kstrtoint(buf, 0, &s3_state);
3318 
3319 	if (ret == 0) {
3320 		if (s3_state) {
3321 			dm_resume(adev);
3322 			drm_kms_helper_hotplug_event(adev->ddev);
3323 		} else
3324 			dm_suspend(adev);
3325 	}
3326 
3327 	return ret == 0 ? count : 0;
3328 }
3329 
3330 DEVICE_ATTR_WO(s3_debug);
3331 
3332 #endif
3333 
3334 static int dm_early_init(void *handle)
3335 {
3336 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3337 
3338 	switch (adev->asic_type) {
3339 	case CHIP_BONAIRE:
3340 	case CHIP_HAWAII:
3341 		adev->mode_info.num_crtc = 6;
3342 		adev->mode_info.num_hpd = 6;
3343 		adev->mode_info.num_dig = 6;
3344 		break;
3345 	case CHIP_KAVERI:
3346 		adev->mode_info.num_crtc = 4;
3347 		adev->mode_info.num_hpd = 6;
3348 		adev->mode_info.num_dig = 7;
3349 		break;
3350 	case CHIP_KABINI:
3351 	case CHIP_MULLINS:
3352 		adev->mode_info.num_crtc = 2;
3353 		adev->mode_info.num_hpd = 6;
3354 		adev->mode_info.num_dig = 6;
3355 		break;
3356 	case CHIP_FIJI:
3357 	case CHIP_TONGA:
3358 		adev->mode_info.num_crtc = 6;
3359 		adev->mode_info.num_hpd = 6;
3360 		adev->mode_info.num_dig = 7;
3361 		break;
3362 	case CHIP_CARRIZO:
3363 		adev->mode_info.num_crtc = 3;
3364 		adev->mode_info.num_hpd = 6;
3365 		adev->mode_info.num_dig = 9;
3366 		break;
3367 	case CHIP_STONEY:
3368 		adev->mode_info.num_crtc = 2;
3369 		adev->mode_info.num_hpd = 6;
3370 		adev->mode_info.num_dig = 9;
3371 		break;
3372 	case CHIP_POLARIS11:
3373 	case CHIP_POLARIS12:
3374 		adev->mode_info.num_crtc = 5;
3375 		adev->mode_info.num_hpd = 5;
3376 		adev->mode_info.num_dig = 5;
3377 		break;
3378 	case CHIP_POLARIS10:
3379 	case CHIP_VEGAM:
3380 		adev->mode_info.num_crtc = 6;
3381 		adev->mode_info.num_hpd = 6;
3382 		adev->mode_info.num_dig = 6;
3383 		break;
3384 	case CHIP_VEGA10:
3385 	case CHIP_VEGA12:
3386 	case CHIP_VEGA20:
3387 		adev->mode_info.num_crtc = 6;
3388 		adev->mode_info.num_hpd = 6;
3389 		adev->mode_info.num_dig = 6;
3390 		break;
3391 #if defined(CONFIG_DRM_AMD_DC_DCN)
3392 	case CHIP_RAVEN:
3393 		adev->mode_info.num_crtc = 4;
3394 		adev->mode_info.num_hpd = 4;
3395 		adev->mode_info.num_dig = 4;
3396 		break;
3397 #endif
3398 	case CHIP_NAVI10:
3399 	case CHIP_NAVI12:
3400 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3401 	case CHIP_SIENNA_CICHLID:
3402 	case CHIP_NAVY_FLOUNDER:
3403 #endif
3404 		adev->mode_info.num_crtc = 6;
3405 		adev->mode_info.num_hpd = 6;
3406 		adev->mode_info.num_dig = 6;
3407 		break;
3408 	case CHIP_NAVI14:
3409 		adev->mode_info.num_crtc = 5;
3410 		adev->mode_info.num_hpd = 5;
3411 		adev->mode_info.num_dig = 5;
3412 		break;
3413 	case CHIP_RENOIR:
3414 		adev->mode_info.num_crtc = 4;
3415 		adev->mode_info.num_hpd = 4;
3416 		adev->mode_info.num_dig = 4;
3417 		break;
3418 	default:
3419 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3420 		return -EINVAL;
3421 	}
3422 
3423 	amdgpu_dm_set_irq_funcs(adev);
3424 
3425 	if (adev->mode_info.funcs == NULL)
3426 		adev->mode_info.funcs = &dm_display_funcs;
3427 
3428 	/*
3429 	 * Note: Do NOT change adev->audio_endpt_rreg and
3430 	 * adev->audio_endpt_wreg because they are initialised in
3431 	 * amdgpu_device_init()
3432 	 */
3433 #if defined(CONFIG_DEBUG_KERNEL_DC)
3434 	device_create_file(
3435 		adev->ddev->dev,
3436 		&dev_attr_s3_debug);
3437 #endif
3438 
3439 	return 0;
3440 }
3441 
3442 static bool modeset_required(struct drm_crtc_state *crtc_state,
3443 			     struct dc_stream_state *new_stream,
3444 			     struct dc_stream_state *old_stream)
3445 {
3446 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3447 		return false;
3448 
3449 	if (!crtc_state->enable)
3450 		return false;
3451 
3452 	return crtc_state->active;
3453 }
3454 
3455 static bool modereset_required(struct drm_crtc_state *crtc_state)
3456 {
3457 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3458 		return false;
3459 
3460 	return !crtc_state->enable || !crtc_state->active;
3461 }
3462 
3463 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3464 {
3465 	drm_encoder_cleanup(encoder);
3466 	kfree(encoder);
3467 }
3468 
3469 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3470 	.destroy = amdgpu_dm_encoder_destroy,
3471 };
3472 
3473 
3474 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3475 				struct dc_scaling_info *scaling_info)
3476 {
3477 	int scale_w, scale_h;
3478 
3479 	memset(scaling_info, 0, sizeof(*scaling_info));
3480 
3481 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3482 	scaling_info->src_rect.x = state->src_x >> 16;
3483 	scaling_info->src_rect.y = state->src_y >> 16;
3484 
3485 	scaling_info->src_rect.width = state->src_w >> 16;
3486 	if (scaling_info->src_rect.width == 0)
3487 		return -EINVAL;
3488 
3489 	scaling_info->src_rect.height = state->src_h >> 16;
3490 	if (scaling_info->src_rect.height == 0)
3491 		return -EINVAL;
3492 
3493 	scaling_info->dst_rect.x = state->crtc_x;
3494 	scaling_info->dst_rect.y = state->crtc_y;
3495 
3496 	if (state->crtc_w == 0)
3497 		return -EINVAL;
3498 
3499 	scaling_info->dst_rect.width = state->crtc_w;
3500 
3501 	if (state->crtc_h == 0)
3502 		return -EINVAL;
3503 
3504 	scaling_info->dst_rect.height = state->crtc_h;
3505 
3506 	/* DRM doesn't specify clipping on destination output. */
3507 	scaling_info->clip_rect = scaling_info->dst_rect;
3508 
3509 	/* TODO: Validate scaling per-format with DC plane caps */
3510 	scale_w = scaling_info->dst_rect.width * 1000 /
3511 		  scaling_info->src_rect.width;
3512 
3513 	if (scale_w < 250 || scale_w > 16000)
3514 		return -EINVAL;
3515 
3516 	scale_h = scaling_info->dst_rect.height * 1000 /
3517 		  scaling_info->src_rect.height;
3518 
3519 	if (scale_h < 250 || scale_h > 16000)
3520 		return -EINVAL;
3521 
3522 	/*
3523 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3524 	 * assume reasonable defaults based on the format.
3525 	 */
3526 
3527 	return 0;
3528 }
3529 
3530 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3531 		       uint64_t *tiling_flags, bool *tmz_surface)
3532 {
3533 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3534 	int r = amdgpu_bo_reserve(rbo, false);
3535 
3536 	if (unlikely(r)) {
3537 		/* Don't show error message when returning -ERESTARTSYS */
3538 		if (r != -ERESTARTSYS)
3539 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3540 		return r;
3541 	}
3542 
3543 	if (tiling_flags)
3544 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3545 
3546 	if (tmz_surface)
3547 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3548 
3549 	amdgpu_bo_unreserve(rbo);
3550 
3551 	return r;
3552 }
3553 
3554 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3555 {
3556 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3557 
3558 	return offset ? (address + offset * 256) : 0;
3559 }
3560 
3561 static int
3562 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3563 			  const struct amdgpu_framebuffer *afb,
3564 			  const enum surface_pixel_format format,
3565 			  const enum dc_rotation_angle rotation,
3566 			  const struct plane_size *plane_size,
3567 			  const union dc_tiling_info *tiling_info,
3568 			  const uint64_t info,
3569 			  struct dc_plane_dcc_param *dcc,
3570 			  struct dc_plane_address *address,
3571 			  bool force_disable_dcc)
3572 {
3573 	struct dc *dc = adev->dm.dc;
3574 	struct dc_dcc_surface_param input;
3575 	struct dc_surface_dcc_cap output;
3576 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3577 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3578 	uint64_t dcc_address;
3579 
3580 	memset(&input, 0, sizeof(input));
3581 	memset(&output, 0, sizeof(output));
3582 
3583 	if (force_disable_dcc)
3584 		return 0;
3585 
3586 	if (!offset)
3587 		return 0;
3588 
3589 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3590 		return 0;
3591 
3592 	if (!dc->cap_funcs.get_dcc_compression_cap)
3593 		return -EINVAL;
3594 
3595 	input.format = format;
3596 	input.surface_size.width = plane_size->surface_size.width;
3597 	input.surface_size.height = plane_size->surface_size.height;
3598 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3599 
3600 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3601 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3602 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3603 		input.scan = SCAN_DIRECTION_VERTICAL;
3604 
3605 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3606 		return -EINVAL;
3607 
3608 	if (!output.capable)
3609 		return -EINVAL;
3610 
3611 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3612 		return -EINVAL;
3613 
3614 	dcc->enable = 1;
3615 	dcc->meta_pitch =
3616 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3617 	dcc->independent_64b_blks = i64b;
3618 
3619 	dcc_address = get_dcc_address(afb->address, info);
3620 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3621 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3622 
3623 	return 0;
3624 }
3625 
3626 static int
3627 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3628 			     const struct amdgpu_framebuffer *afb,
3629 			     const enum surface_pixel_format format,
3630 			     const enum dc_rotation_angle rotation,
3631 			     const uint64_t tiling_flags,
3632 			     union dc_tiling_info *tiling_info,
3633 			     struct plane_size *plane_size,
3634 			     struct dc_plane_dcc_param *dcc,
3635 			     struct dc_plane_address *address,
3636 			     bool tmz_surface,
3637 			     bool force_disable_dcc)
3638 {
3639 	const struct drm_framebuffer *fb = &afb->base;
3640 	int ret;
3641 
3642 	memset(tiling_info, 0, sizeof(*tiling_info));
3643 	memset(plane_size, 0, sizeof(*plane_size));
3644 	memset(dcc, 0, sizeof(*dcc));
3645 	memset(address, 0, sizeof(*address));
3646 
3647 	address->tmz_surface = tmz_surface;
3648 
3649 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3650 		plane_size->surface_size.x = 0;
3651 		plane_size->surface_size.y = 0;
3652 		plane_size->surface_size.width = fb->width;
3653 		plane_size->surface_size.height = fb->height;
3654 		plane_size->surface_pitch =
3655 			fb->pitches[0] / fb->format->cpp[0];
3656 
3657 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3658 		address->grph.addr.low_part = lower_32_bits(afb->address);
3659 		address->grph.addr.high_part = upper_32_bits(afb->address);
3660 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3661 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3662 
3663 		plane_size->surface_size.x = 0;
3664 		plane_size->surface_size.y = 0;
3665 		plane_size->surface_size.width = fb->width;
3666 		plane_size->surface_size.height = fb->height;
3667 		plane_size->surface_pitch =
3668 			fb->pitches[0] / fb->format->cpp[0];
3669 
3670 		plane_size->chroma_size.x = 0;
3671 		plane_size->chroma_size.y = 0;
3672 		/* TODO: set these based on surface format */
3673 		plane_size->chroma_size.width = fb->width / 2;
3674 		plane_size->chroma_size.height = fb->height / 2;
3675 
3676 		plane_size->chroma_pitch =
3677 			fb->pitches[1] / fb->format->cpp[1];
3678 
3679 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3680 		address->video_progressive.luma_addr.low_part =
3681 			lower_32_bits(afb->address);
3682 		address->video_progressive.luma_addr.high_part =
3683 			upper_32_bits(afb->address);
3684 		address->video_progressive.chroma_addr.low_part =
3685 			lower_32_bits(chroma_addr);
3686 		address->video_progressive.chroma_addr.high_part =
3687 			upper_32_bits(chroma_addr);
3688 	}
3689 
3690 	/* Fill GFX8 params */
3691 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3692 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3693 
3694 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3695 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3696 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3697 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3698 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3699 
3700 		/* XXX fix me for VI */
3701 		tiling_info->gfx8.num_banks = num_banks;
3702 		tiling_info->gfx8.array_mode =
3703 				DC_ARRAY_2D_TILED_THIN1;
3704 		tiling_info->gfx8.tile_split = tile_split;
3705 		tiling_info->gfx8.bank_width = bankw;
3706 		tiling_info->gfx8.bank_height = bankh;
3707 		tiling_info->gfx8.tile_aspect = mtaspect;
3708 		tiling_info->gfx8.tile_mode =
3709 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3710 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3711 			== DC_ARRAY_1D_TILED_THIN1) {
3712 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3713 	}
3714 
3715 	tiling_info->gfx8.pipe_config =
3716 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3717 
3718 	if (adev->asic_type == CHIP_VEGA10 ||
3719 	    adev->asic_type == CHIP_VEGA12 ||
3720 	    adev->asic_type == CHIP_VEGA20 ||
3721 	    adev->asic_type == CHIP_NAVI10 ||
3722 	    adev->asic_type == CHIP_NAVI14 ||
3723 	    adev->asic_type == CHIP_NAVI12 ||
3724 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3725 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3726 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3727 #endif
3728 	    adev->asic_type == CHIP_RENOIR ||
3729 	    adev->asic_type == CHIP_RAVEN) {
3730 		/* Fill GFX9 params */
3731 		tiling_info->gfx9.num_pipes =
3732 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3733 		tiling_info->gfx9.num_banks =
3734 			adev->gfx.config.gb_addr_config_fields.num_banks;
3735 		tiling_info->gfx9.pipe_interleave =
3736 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3737 		tiling_info->gfx9.num_shader_engines =
3738 			adev->gfx.config.gb_addr_config_fields.num_se;
3739 		tiling_info->gfx9.max_compressed_frags =
3740 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3741 		tiling_info->gfx9.num_rb_per_se =
3742 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3743 		tiling_info->gfx9.swizzle =
3744 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3745 		tiling_info->gfx9.shaderEnable = 1;
3746 
3747 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3748 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3749 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3750 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3751 #endif
3752 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3753 						plane_size, tiling_info,
3754 						tiling_flags, dcc, address,
3755 						force_disable_dcc);
3756 		if (ret)
3757 			return ret;
3758 	}
3759 
3760 	return 0;
3761 }
3762 
3763 static void
3764 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3765 			       bool *per_pixel_alpha, bool *global_alpha,
3766 			       int *global_alpha_value)
3767 {
3768 	*per_pixel_alpha = false;
3769 	*global_alpha = false;
3770 	*global_alpha_value = 0xff;
3771 
3772 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3773 		return;
3774 
3775 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3776 		static const uint32_t alpha_formats[] = {
3777 			DRM_FORMAT_ARGB8888,
3778 			DRM_FORMAT_RGBA8888,
3779 			DRM_FORMAT_ABGR8888,
3780 		};
3781 		uint32_t format = plane_state->fb->format->format;
3782 		unsigned int i;
3783 
3784 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3785 			if (format == alpha_formats[i]) {
3786 				*per_pixel_alpha = true;
3787 				break;
3788 			}
3789 		}
3790 	}
3791 
3792 	if (plane_state->alpha < 0xffff) {
3793 		*global_alpha = true;
3794 		*global_alpha_value = plane_state->alpha >> 8;
3795 	}
3796 }
3797 
3798 static int
3799 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3800 			    const enum surface_pixel_format format,
3801 			    enum dc_color_space *color_space)
3802 {
3803 	bool full_range;
3804 
3805 	*color_space = COLOR_SPACE_SRGB;
3806 
3807 	/* DRM color properties only affect non-RGB formats. */
3808 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3809 		return 0;
3810 
3811 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3812 
3813 	switch (plane_state->color_encoding) {
3814 	case DRM_COLOR_YCBCR_BT601:
3815 		if (full_range)
3816 			*color_space = COLOR_SPACE_YCBCR601;
3817 		else
3818 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3819 		break;
3820 
3821 	case DRM_COLOR_YCBCR_BT709:
3822 		if (full_range)
3823 			*color_space = COLOR_SPACE_YCBCR709;
3824 		else
3825 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3826 		break;
3827 
3828 	case DRM_COLOR_YCBCR_BT2020:
3829 		if (full_range)
3830 			*color_space = COLOR_SPACE_2020_YCBCR;
3831 		else
3832 			return -EINVAL;
3833 		break;
3834 
3835 	default:
3836 		return -EINVAL;
3837 	}
3838 
3839 	return 0;
3840 }
3841 
3842 static int
3843 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3844 			    const struct drm_plane_state *plane_state,
3845 			    const uint64_t tiling_flags,
3846 			    struct dc_plane_info *plane_info,
3847 			    struct dc_plane_address *address,
3848 			    bool tmz_surface,
3849 			    bool force_disable_dcc)
3850 {
3851 	const struct drm_framebuffer *fb = plane_state->fb;
3852 	const struct amdgpu_framebuffer *afb =
3853 		to_amdgpu_framebuffer(plane_state->fb);
3854 	struct drm_format_name_buf format_name;
3855 	int ret;
3856 
3857 	memset(plane_info, 0, sizeof(*plane_info));
3858 
3859 	switch (fb->format->format) {
3860 	case DRM_FORMAT_C8:
3861 		plane_info->format =
3862 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3863 		break;
3864 	case DRM_FORMAT_RGB565:
3865 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3866 		break;
3867 	case DRM_FORMAT_XRGB8888:
3868 	case DRM_FORMAT_ARGB8888:
3869 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3870 		break;
3871 	case DRM_FORMAT_XRGB2101010:
3872 	case DRM_FORMAT_ARGB2101010:
3873 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3874 		break;
3875 	case DRM_FORMAT_XBGR2101010:
3876 	case DRM_FORMAT_ABGR2101010:
3877 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3878 		break;
3879 	case DRM_FORMAT_XBGR8888:
3880 	case DRM_FORMAT_ABGR8888:
3881 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3882 		break;
3883 	case DRM_FORMAT_NV21:
3884 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3885 		break;
3886 	case DRM_FORMAT_NV12:
3887 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3888 		break;
3889 	case DRM_FORMAT_P010:
3890 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3891 		break;
3892 	case DRM_FORMAT_XRGB16161616F:
3893 	case DRM_FORMAT_ARGB16161616F:
3894 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3895 		break;
3896 	case DRM_FORMAT_XBGR16161616F:
3897 	case DRM_FORMAT_ABGR16161616F:
3898 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3899 		break;
3900 	default:
3901 		DRM_ERROR(
3902 			"Unsupported screen format %s\n",
3903 			drm_get_format_name(fb->format->format, &format_name));
3904 		return -EINVAL;
3905 	}
3906 
3907 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3908 	case DRM_MODE_ROTATE_0:
3909 		plane_info->rotation = ROTATION_ANGLE_0;
3910 		break;
3911 	case DRM_MODE_ROTATE_90:
3912 		plane_info->rotation = ROTATION_ANGLE_90;
3913 		break;
3914 	case DRM_MODE_ROTATE_180:
3915 		plane_info->rotation = ROTATION_ANGLE_180;
3916 		break;
3917 	case DRM_MODE_ROTATE_270:
3918 		plane_info->rotation = ROTATION_ANGLE_270;
3919 		break;
3920 	default:
3921 		plane_info->rotation = ROTATION_ANGLE_0;
3922 		break;
3923 	}
3924 
3925 	plane_info->visible = true;
3926 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3927 
3928 	plane_info->layer_index = 0;
3929 
3930 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3931 					  &plane_info->color_space);
3932 	if (ret)
3933 		return ret;
3934 
3935 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3936 					   plane_info->rotation, tiling_flags,
3937 					   &plane_info->tiling_info,
3938 					   &plane_info->plane_size,
3939 					   &plane_info->dcc, address, tmz_surface,
3940 					   force_disable_dcc);
3941 	if (ret)
3942 		return ret;
3943 
3944 	fill_blending_from_plane_state(
3945 		plane_state, &plane_info->per_pixel_alpha,
3946 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3947 
3948 	return 0;
3949 }
3950 
3951 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3952 				    struct dc_plane_state *dc_plane_state,
3953 				    struct drm_plane_state *plane_state,
3954 				    struct drm_crtc_state *crtc_state)
3955 {
3956 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3957 	const struct amdgpu_framebuffer *amdgpu_fb =
3958 		to_amdgpu_framebuffer(plane_state->fb);
3959 	struct dc_scaling_info scaling_info;
3960 	struct dc_plane_info plane_info;
3961 	uint64_t tiling_flags;
3962 	int ret;
3963 	bool tmz_surface = false;
3964 	bool force_disable_dcc = false;
3965 
3966 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3967 	if (ret)
3968 		return ret;
3969 
3970 	dc_plane_state->src_rect = scaling_info.src_rect;
3971 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3972 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3973 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3974 
3975 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3976 	if (ret)
3977 		return ret;
3978 
3979 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3980 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3981 					  &plane_info,
3982 					  &dc_plane_state->address,
3983 					  tmz_surface,
3984 					  force_disable_dcc);
3985 	if (ret)
3986 		return ret;
3987 
3988 	dc_plane_state->format = plane_info.format;
3989 	dc_plane_state->color_space = plane_info.color_space;
3990 	dc_plane_state->format = plane_info.format;
3991 	dc_plane_state->plane_size = plane_info.plane_size;
3992 	dc_plane_state->rotation = plane_info.rotation;
3993 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3994 	dc_plane_state->stereo_format = plane_info.stereo_format;
3995 	dc_plane_state->tiling_info = plane_info.tiling_info;
3996 	dc_plane_state->visible = plane_info.visible;
3997 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3998 	dc_plane_state->global_alpha = plane_info.global_alpha;
3999 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4000 	dc_plane_state->dcc = plane_info.dcc;
4001 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4002 
4003 	/*
4004 	 * Always set input transfer function, since plane state is refreshed
4005 	 * every time.
4006 	 */
4007 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4008 	if (ret)
4009 		return ret;
4010 
4011 	return 0;
4012 }
4013 
4014 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4015 					   const struct dm_connector_state *dm_state,
4016 					   struct dc_stream_state *stream)
4017 {
4018 	enum amdgpu_rmx_type rmx_type;
4019 
4020 	struct rect src = { 0 }; /* viewport in composition space*/
4021 	struct rect dst = { 0 }; /* stream addressable area */
4022 
4023 	/* no mode. nothing to be done */
4024 	if (!mode)
4025 		return;
4026 
4027 	/* Full screen scaling by default */
4028 	src.width = mode->hdisplay;
4029 	src.height = mode->vdisplay;
4030 	dst.width = stream->timing.h_addressable;
4031 	dst.height = stream->timing.v_addressable;
4032 
4033 	if (dm_state) {
4034 		rmx_type = dm_state->scaling;
4035 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4036 			if (src.width * dst.height <
4037 					src.height * dst.width) {
4038 				/* height needs less upscaling/more downscaling */
4039 				dst.width = src.width *
4040 						dst.height / src.height;
4041 			} else {
4042 				/* width needs less upscaling/more downscaling */
4043 				dst.height = src.height *
4044 						dst.width / src.width;
4045 			}
4046 		} else if (rmx_type == RMX_CENTER) {
4047 			dst = src;
4048 		}
4049 
4050 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4051 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4052 
4053 		if (dm_state->underscan_enable) {
4054 			dst.x += dm_state->underscan_hborder / 2;
4055 			dst.y += dm_state->underscan_vborder / 2;
4056 			dst.width -= dm_state->underscan_hborder;
4057 			dst.height -= dm_state->underscan_vborder;
4058 		}
4059 	}
4060 
4061 	stream->src = src;
4062 	stream->dst = dst;
4063 
4064 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4065 			dst.x, dst.y, dst.width, dst.height);
4066 
4067 }
4068 
4069 static enum dc_color_depth
4070 convert_color_depth_from_display_info(const struct drm_connector *connector,
4071 				      bool is_y420, int requested_bpc)
4072 {
4073 	uint8_t bpc;
4074 
4075 	if (is_y420) {
4076 		bpc = 8;
4077 
4078 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4079 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4080 			bpc = 16;
4081 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4082 			bpc = 12;
4083 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4084 			bpc = 10;
4085 	} else {
4086 		bpc = (uint8_t)connector->display_info.bpc;
4087 		/* Assume 8 bpc by default if no bpc is specified. */
4088 		bpc = bpc ? bpc : 8;
4089 	}
4090 
4091 	if (requested_bpc > 0) {
4092 		/*
4093 		 * Cap display bpc based on the user requested value.
4094 		 *
4095 		 * The value for state->max_bpc may not correctly updated
4096 		 * depending on when the connector gets added to the state
4097 		 * or if this was called outside of atomic check, so it
4098 		 * can't be used directly.
4099 		 */
4100 		bpc = min_t(u8, bpc, requested_bpc);
4101 
4102 		/* Round down to the nearest even number. */
4103 		bpc = bpc - (bpc & 1);
4104 	}
4105 
4106 	switch (bpc) {
4107 	case 0:
4108 		/*
4109 		 * Temporary Work around, DRM doesn't parse color depth for
4110 		 * EDID revision before 1.4
4111 		 * TODO: Fix edid parsing
4112 		 */
4113 		return COLOR_DEPTH_888;
4114 	case 6:
4115 		return COLOR_DEPTH_666;
4116 	case 8:
4117 		return COLOR_DEPTH_888;
4118 	case 10:
4119 		return COLOR_DEPTH_101010;
4120 	case 12:
4121 		return COLOR_DEPTH_121212;
4122 	case 14:
4123 		return COLOR_DEPTH_141414;
4124 	case 16:
4125 		return COLOR_DEPTH_161616;
4126 	default:
4127 		return COLOR_DEPTH_UNDEFINED;
4128 	}
4129 }
4130 
4131 static enum dc_aspect_ratio
4132 get_aspect_ratio(const struct drm_display_mode *mode_in)
4133 {
4134 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4135 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4136 }
4137 
4138 static enum dc_color_space
4139 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4140 {
4141 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4142 
4143 	switch (dc_crtc_timing->pixel_encoding)	{
4144 	case PIXEL_ENCODING_YCBCR422:
4145 	case PIXEL_ENCODING_YCBCR444:
4146 	case PIXEL_ENCODING_YCBCR420:
4147 	{
4148 		/*
4149 		 * 27030khz is the separation point between HDTV and SDTV
4150 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4151 		 * respectively
4152 		 */
4153 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4154 			if (dc_crtc_timing->flags.Y_ONLY)
4155 				color_space =
4156 					COLOR_SPACE_YCBCR709_LIMITED;
4157 			else
4158 				color_space = COLOR_SPACE_YCBCR709;
4159 		} else {
4160 			if (dc_crtc_timing->flags.Y_ONLY)
4161 				color_space =
4162 					COLOR_SPACE_YCBCR601_LIMITED;
4163 			else
4164 				color_space = COLOR_SPACE_YCBCR601;
4165 		}
4166 
4167 	}
4168 	break;
4169 	case PIXEL_ENCODING_RGB:
4170 		color_space = COLOR_SPACE_SRGB;
4171 		break;
4172 
4173 	default:
4174 		WARN_ON(1);
4175 		break;
4176 	}
4177 
4178 	return color_space;
4179 }
4180 
4181 static bool adjust_colour_depth_from_display_info(
4182 	struct dc_crtc_timing *timing_out,
4183 	const struct drm_display_info *info)
4184 {
4185 	enum dc_color_depth depth = timing_out->display_color_depth;
4186 	int normalized_clk;
4187 	do {
4188 		normalized_clk = timing_out->pix_clk_100hz / 10;
4189 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4190 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4191 			normalized_clk /= 2;
4192 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4193 		switch (depth) {
4194 		case COLOR_DEPTH_888:
4195 			break;
4196 		case COLOR_DEPTH_101010:
4197 			normalized_clk = (normalized_clk * 30) / 24;
4198 			break;
4199 		case COLOR_DEPTH_121212:
4200 			normalized_clk = (normalized_clk * 36) / 24;
4201 			break;
4202 		case COLOR_DEPTH_161616:
4203 			normalized_clk = (normalized_clk * 48) / 24;
4204 			break;
4205 		default:
4206 			/* The above depths are the only ones valid for HDMI. */
4207 			return false;
4208 		}
4209 		if (normalized_clk <= info->max_tmds_clock) {
4210 			timing_out->display_color_depth = depth;
4211 			return true;
4212 		}
4213 	} while (--depth > COLOR_DEPTH_666);
4214 	return false;
4215 }
4216 
4217 static void fill_stream_properties_from_drm_display_mode(
4218 	struct dc_stream_state *stream,
4219 	const struct drm_display_mode *mode_in,
4220 	const struct drm_connector *connector,
4221 	const struct drm_connector_state *connector_state,
4222 	const struct dc_stream_state *old_stream,
4223 	int requested_bpc)
4224 {
4225 	struct dc_crtc_timing *timing_out = &stream->timing;
4226 	const struct drm_display_info *info = &connector->display_info;
4227 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4228 	struct hdmi_vendor_infoframe hv_frame;
4229 	struct hdmi_avi_infoframe avi_frame;
4230 
4231 	memset(&hv_frame, 0, sizeof(hv_frame));
4232 	memset(&avi_frame, 0, sizeof(avi_frame));
4233 
4234 	timing_out->h_border_left = 0;
4235 	timing_out->h_border_right = 0;
4236 	timing_out->v_border_top = 0;
4237 	timing_out->v_border_bottom = 0;
4238 	/* TODO: un-hardcode */
4239 	if (drm_mode_is_420_only(info, mode_in)
4240 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4241 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4242 	else if (drm_mode_is_420_also(info, mode_in)
4243 			&& aconnector->force_yuv420_output)
4244 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4245 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4246 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4247 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4248 	else
4249 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4250 
4251 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4252 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4253 		connector,
4254 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4255 		requested_bpc);
4256 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4257 	timing_out->hdmi_vic = 0;
4258 
4259 	if(old_stream) {
4260 		timing_out->vic = old_stream->timing.vic;
4261 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4262 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4263 	} else {
4264 		timing_out->vic = drm_match_cea_mode(mode_in);
4265 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4266 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4267 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4268 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4269 	}
4270 
4271 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4272 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4273 		timing_out->vic = avi_frame.video_code;
4274 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4275 		timing_out->hdmi_vic = hv_frame.vic;
4276 	}
4277 
4278 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4279 	timing_out->h_total = mode_in->crtc_htotal;
4280 	timing_out->h_sync_width =
4281 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4282 	timing_out->h_front_porch =
4283 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4284 	timing_out->v_total = mode_in->crtc_vtotal;
4285 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4286 	timing_out->v_front_porch =
4287 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4288 	timing_out->v_sync_width =
4289 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4290 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4291 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4292 
4293 	stream->output_color_space = get_output_color_space(timing_out);
4294 
4295 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4296 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4297 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4298 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4299 		    drm_mode_is_420_also(info, mode_in) &&
4300 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4301 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4302 			adjust_colour_depth_from_display_info(timing_out, info);
4303 		}
4304 	}
4305 }
4306 
4307 static void fill_audio_info(struct audio_info *audio_info,
4308 			    const struct drm_connector *drm_connector,
4309 			    const struct dc_sink *dc_sink)
4310 {
4311 	int i = 0;
4312 	int cea_revision = 0;
4313 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4314 
4315 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4316 	audio_info->product_id = edid_caps->product_id;
4317 
4318 	cea_revision = drm_connector->display_info.cea_rev;
4319 
4320 	strscpy(audio_info->display_name,
4321 		edid_caps->display_name,
4322 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4323 
4324 	if (cea_revision >= 3) {
4325 		audio_info->mode_count = edid_caps->audio_mode_count;
4326 
4327 		for (i = 0; i < audio_info->mode_count; ++i) {
4328 			audio_info->modes[i].format_code =
4329 					(enum audio_format_code)
4330 					(edid_caps->audio_modes[i].format_code);
4331 			audio_info->modes[i].channel_count =
4332 					edid_caps->audio_modes[i].channel_count;
4333 			audio_info->modes[i].sample_rates.all =
4334 					edid_caps->audio_modes[i].sample_rate;
4335 			audio_info->modes[i].sample_size =
4336 					edid_caps->audio_modes[i].sample_size;
4337 		}
4338 	}
4339 
4340 	audio_info->flags.all = edid_caps->speaker_flags;
4341 
4342 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4343 	if (drm_connector->latency_present[0]) {
4344 		audio_info->video_latency = drm_connector->video_latency[0];
4345 		audio_info->audio_latency = drm_connector->audio_latency[0];
4346 	}
4347 
4348 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4349 
4350 }
4351 
4352 static void
4353 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4354 				      struct drm_display_mode *dst_mode)
4355 {
4356 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4357 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4358 	dst_mode->crtc_clock = src_mode->crtc_clock;
4359 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4360 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4361 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4362 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4363 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4364 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4365 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4366 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4367 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4368 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4369 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4370 }
4371 
4372 static void
4373 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4374 					const struct drm_display_mode *native_mode,
4375 					bool scale_enabled)
4376 {
4377 	if (scale_enabled) {
4378 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4379 	} else if (native_mode->clock == drm_mode->clock &&
4380 			native_mode->htotal == drm_mode->htotal &&
4381 			native_mode->vtotal == drm_mode->vtotal) {
4382 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4383 	} else {
4384 		/* no scaling nor amdgpu inserted, no need to patch */
4385 	}
4386 }
4387 
4388 static struct dc_sink *
4389 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4390 {
4391 	struct dc_sink_init_data sink_init_data = { 0 };
4392 	struct dc_sink *sink = NULL;
4393 	sink_init_data.link = aconnector->dc_link;
4394 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4395 
4396 	sink = dc_sink_create(&sink_init_data);
4397 	if (!sink) {
4398 		DRM_ERROR("Failed to create sink!\n");
4399 		return NULL;
4400 	}
4401 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4402 
4403 	return sink;
4404 }
4405 
4406 static void set_multisync_trigger_params(
4407 		struct dc_stream_state *stream)
4408 {
4409 	if (stream->triggered_crtc_reset.enabled) {
4410 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4411 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4412 	}
4413 }
4414 
4415 static void set_master_stream(struct dc_stream_state *stream_set[],
4416 			      int stream_count)
4417 {
4418 	int j, highest_rfr = 0, master_stream = 0;
4419 
4420 	for (j = 0;  j < stream_count; j++) {
4421 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4422 			int refresh_rate = 0;
4423 
4424 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4425 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4426 			if (refresh_rate > highest_rfr) {
4427 				highest_rfr = refresh_rate;
4428 				master_stream = j;
4429 			}
4430 		}
4431 	}
4432 	for (j = 0;  j < stream_count; j++) {
4433 		if (stream_set[j])
4434 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4435 	}
4436 }
4437 
4438 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4439 {
4440 	int i = 0;
4441 
4442 	if (context->stream_count < 2)
4443 		return;
4444 	for (i = 0; i < context->stream_count ; i++) {
4445 		if (!context->streams[i])
4446 			continue;
4447 		/*
4448 		 * TODO: add a function to read AMD VSDB bits and set
4449 		 * crtc_sync_master.multi_sync_enabled flag
4450 		 * For now it's set to false
4451 		 */
4452 		set_multisync_trigger_params(context->streams[i]);
4453 	}
4454 	set_master_stream(context->streams, context->stream_count);
4455 }
4456 
4457 static struct dc_stream_state *
4458 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4459 		       const struct drm_display_mode *drm_mode,
4460 		       const struct dm_connector_state *dm_state,
4461 		       const struct dc_stream_state *old_stream,
4462 		       int requested_bpc)
4463 {
4464 	struct drm_display_mode *preferred_mode = NULL;
4465 	struct drm_connector *drm_connector;
4466 	const struct drm_connector_state *con_state =
4467 		dm_state ? &dm_state->base : NULL;
4468 	struct dc_stream_state *stream = NULL;
4469 	struct drm_display_mode mode = *drm_mode;
4470 	bool native_mode_found = false;
4471 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4472 	int mode_refresh;
4473 	int preferred_refresh = 0;
4474 #if defined(CONFIG_DRM_AMD_DC_DCN)
4475 	struct dsc_dec_dpcd_caps dsc_caps;
4476 #endif
4477 	uint32_t link_bandwidth_kbps;
4478 
4479 	struct dc_sink *sink = NULL;
4480 	if (aconnector == NULL) {
4481 		DRM_ERROR("aconnector is NULL!\n");
4482 		return stream;
4483 	}
4484 
4485 	drm_connector = &aconnector->base;
4486 
4487 	if (!aconnector->dc_sink) {
4488 		sink = create_fake_sink(aconnector);
4489 		if (!sink)
4490 			return stream;
4491 	} else {
4492 		sink = aconnector->dc_sink;
4493 		dc_sink_retain(sink);
4494 	}
4495 
4496 	stream = dc_create_stream_for_sink(sink);
4497 
4498 	if (stream == NULL) {
4499 		DRM_ERROR("Failed to create stream for sink!\n");
4500 		goto finish;
4501 	}
4502 
4503 	stream->dm_stream_context = aconnector;
4504 
4505 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4506 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4507 
4508 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4509 		/* Search for preferred mode */
4510 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4511 			native_mode_found = true;
4512 			break;
4513 		}
4514 	}
4515 	if (!native_mode_found)
4516 		preferred_mode = list_first_entry_or_null(
4517 				&aconnector->base.modes,
4518 				struct drm_display_mode,
4519 				head);
4520 
4521 	mode_refresh = drm_mode_vrefresh(&mode);
4522 
4523 	if (preferred_mode == NULL) {
4524 		/*
4525 		 * This may not be an error, the use case is when we have no
4526 		 * usermode calls to reset and set mode upon hotplug. In this
4527 		 * case, we call set mode ourselves to restore the previous mode
4528 		 * and the modelist may not be filled in in time.
4529 		 */
4530 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4531 	} else {
4532 		decide_crtc_timing_for_drm_display_mode(
4533 				&mode, preferred_mode,
4534 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4535 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4536 	}
4537 
4538 	if (!dm_state)
4539 		drm_mode_set_crtcinfo(&mode, 0);
4540 
4541 	/*
4542 	* If scaling is enabled and refresh rate didn't change
4543 	* we copy the vic and polarities of the old timings
4544 	*/
4545 	if (!scale || mode_refresh != preferred_refresh)
4546 		fill_stream_properties_from_drm_display_mode(stream,
4547 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4548 	else
4549 		fill_stream_properties_from_drm_display_mode(stream,
4550 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4551 
4552 	stream->timing.flags.DSC = 0;
4553 
4554 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4555 #if defined(CONFIG_DRM_AMD_DC_DCN)
4556 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4557 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4558 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4559 				      &dsc_caps);
4560 #endif
4561 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4562 							     dc_link_get_link_cap(aconnector->dc_link));
4563 
4564 #if defined(CONFIG_DRM_AMD_DC_DCN)
4565 		if (dsc_caps.is_dsc_supported)
4566 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4567 						  &dsc_caps,
4568 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4569 						  link_bandwidth_kbps,
4570 						  &stream->timing,
4571 						  &stream->timing.dsc_cfg))
4572 				stream->timing.flags.DSC = 1;
4573 #endif
4574 	}
4575 
4576 	update_stream_scaling_settings(&mode, dm_state, stream);
4577 
4578 	fill_audio_info(
4579 		&stream->audio_info,
4580 		drm_connector,
4581 		sink);
4582 
4583 	update_stream_signal(stream, sink);
4584 
4585 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4586 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4587 	if (stream->link->psr_settings.psr_feature_enabled) {
4588 		//
4589 		// should decide stream support vsc sdp colorimetry capability
4590 		// before building vsc info packet
4591 		//
4592 		stream->use_vsc_sdp_for_colorimetry = false;
4593 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4594 			stream->use_vsc_sdp_for_colorimetry =
4595 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4596 		} else {
4597 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4598 				stream->use_vsc_sdp_for_colorimetry = true;
4599 		}
4600 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4601 	}
4602 finish:
4603 	dc_sink_release(sink);
4604 
4605 	return stream;
4606 }
4607 
4608 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4609 {
4610 	drm_crtc_cleanup(crtc);
4611 	kfree(crtc);
4612 }
4613 
4614 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4615 				  struct drm_crtc_state *state)
4616 {
4617 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4618 
4619 	/* TODO Destroy dc_stream objects are stream object is flattened */
4620 	if (cur->stream)
4621 		dc_stream_release(cur->stream);
4622 
4623 
4624 	__drm_atomic_helper_crtc_destroy_state(state);
4625 
4626 
4627 	kfree(state);
4628 }
4629 
4630 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4631 {
4632 	struct dm_crtc_state *state;
4633 
4634 	if (crtc->state)
4635 		dm_crtc_destroy_state(crtc, crtc->state);
4636 
4637 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4638 	if (WARN_ON(!state))
4639 		return;
4640 
4641 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4642 }
4643 
4644 static struct drm_crtc_state *
4645 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4646 {
4647 	struct dm_crtc_state *state, *cur;
4648 
4649 	cur = to_dm_crtc_state(crtc->state);
4650 
4651 	if (WARN_ON(!crtc->state))
4652 		return NULL;
4653 
4654 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4655 	if (!state)
4656 		return NULL;
4657 
4658 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4659 
4660 	if (cur->stream) {
4661 		state->stream = cur->stream;
4662 		dc_stream_retain(state->stream);
4663 	}
4664 
4665 	state->active_planes = cur->active_planes;
4666 	state->vrr_params = cur->vrr_params;
4667 	state->vrr_infopacket = cur->vrr_infopacket;
4668 	state->abm_level = cur->abm_level;
4669 	state->vrr_supported = cur->vrr_supported;
4670 	state->freesync_config = cur->freesync_config;
4671 	state->crc_src = cur->crc_src;
4672 	state->cm_has_degamma = cur->cm_has_degamma;
4673 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4674 
4675 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4676 
4677 	return &state->base;
4678 }
4679 
4680 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4681 {
4682 	enum dc_irq_source irq_source;
4683 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4684 	struct amdgpu_device *adev = crtc->dev->dev_private;
4685 	int rc;
4686 
4687 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4688 
4689 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4690 
4691 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4692 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4693 	return rc;
4694 }
4695 
4696 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4697 {
4698 	enum dc_irq_source irq_source;
4699 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4700 	struct amdgpu_device *adev = crtc->dev->dev_private;
4701 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4702 	int rc = 0;
4703 
4704 	if (enable) {
4705 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4706 		if (amdgpu_dm_vrr_active(acrtc_state))
4707 			rc = dm_set_vupdate_irq(crtc, true);
4708 	} else {
4709 		/* vblank irq off -> vupdate irq off */
4710 		rc = dm_set_vupdate_irq(crtc, false);
4711 	}
4712 
4713 	if (rc)
4714 		return rc;
4715 
4716 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4717 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4718 }
4719 
4720 static int dm_enable_vblank(struct drm_crtc *crtc)
4721 {
4722 	return dm_set_vblank(crtc, true);
4723 }
4724 
4725 static void dm_disable_vblank(struct drm_crtc *crtc)
4726 {
4727 	dm_set_vblank(crtc, false);
4728 }
4729 
4730 /* Implemented only the options currently availible for the driver */
4731 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4732 	.reset = dm_crtc_reset_state,
4733 	.destroy = amdgpu_dm_crtc_destroy,
4734 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4735 	.set_config = drm_atomic_helper_set_config,
4736 	.page_flip = drm_atomic_helper_page_flip,
4737 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4738 	.atomic_destroy_state = dm_crtc_destroy_state,
4739 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4740 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4741 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4742 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4743 	.enable_vblank = dm_enable_vblank,
4744 	.disable_vblank = dm_disable_vblank,
4745 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4746 };
4747 
4748 static enum drm_connector_status
4749 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4750 {
4751 	bool connected;
4752 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4753 
4754 	/*
4755 	 * Notes:
4756 	 * 1. This interface is NOT called in context of HPD irq.
4757 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4758 	 * makes it a bad place for *any* MST-related activity.
4759 	 */
4760 
4761 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4762 	    !aconnector->fake_enable)
4763 		connected = (aconnector->dc_sink != NULL);
4764 	else
4765 		connected = (aconnector->base.force == DRM_FORCE_ON);
4766 
4767 	return (connected ? connector_status_connected :
4768 			connector_status_disconnected);
4769 }
4770 
4771 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4772 					    struct drm_connector_state *connector_state,
4773 					    struct drm_property *property,
4774 					    uint64_t val)
4775 {
4776 	struct drm_device *dev = connector->dev;
4777 	struct amdgpu_device *adev = dev->dev_private;
4778 	struct dm_connector_state *dm_old_state =
4779 		to_dm_connector_state(connector->state);
4780 	struct dm_connector_state *dm_new_state =
4781 		to_dm_connector_state(connector_state);
4782 
4783 	int ret = -EINVAL;
4784 
4785 	if (property == dev->mode_config.scaling_mode_property) {
4786 		enum amdgpu_rmx_type rmx_type;
4787 
4788 		switch (val) {
4789 		case DRM_MODE_SCALE_CENTER:
4790 			rmx_type = RMX_CENTER;
4791 			break;
4792 		case DRM_MODE_SCALE_ASPECT:
4793 			rmx_type = RMX_ASPECT;
4794 			break;
4795 		case DRM_MODE_SCALE_FULLSCREEN:
4796 			rmx_type = RMX_FULL;
4797 			break;
4798 		case DRM_MODE_SCALE_NONE:
4799 		default:
4800 			rmx_type = RMX_OFF;
4801 			break;
4802 		}
4803 
4804 		if (dm_old_state->scaling == rmx_type)
4805 			return 0;
4806 
4807 		dm_new_state->scaling = rmx_type;
4808 		ret = 0;
4809 	} else if (property == adev->mode_info.underscan_hborder_property) {
4810 		dm_new_state->underscan_hborder = val;
4811 		ret = 0;
4812 	} else if (property == adev->mode_info.underscan_vborder_property) {
4813 		dm_new_state->underscan_vborder = val;
4814 		ret = 0;
4815 	} else if (property == adev->mode_info.underscan_property) {
4816 		dm_new_state->underscan_enable = val;
4817 		ret = 0;
4818 	} else if (property == adev->mode_info.abm_level_property) {
4819 		dm_new_state->abm_level = val;
4820 		ret = 0;
4821 	}
4822 
4823 	return ret;
4824 }
4825 
4826 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4827 					    const struct drm_connector_state *state,
4828 					    struct drm_property *property,
4829 					    uint64_t *val)
4830 {
4831 	struct drm_device *dev = connector->dev;
4832 	struct amdgpu_device *adev = dev->dev_private;
4833 	struct dm_connector_state *dm_state =
4834 		to_dm_connector_state(state);
4835 	int ret = -EINVAL;
4836 
4837 	if (property == dev->mode_config.scaling_mode_property) {
4838 		switch (dm_state->scaling) {
4839 		case RMX_CENTER:
4840 			*val = DRM_MODE_SCALE_CENTER;
4841 			break;
4842 		case RMX_ASPECT:
4843 			*val = DRM_MODE_SCALE_ASPECT;
4844 			break;
4845 		case RMX_FULL:
4846 			*val = DRM_MODE_SCALE_FULLSCREEN;
4847 			break;
4848 		case RMX_OFF:
4849 		default:
4850 			*val = DRM_MODE_SCALE_NONE;
4851 			break;
4852 		}
4853 		ret = 0;
4854 	} else if (property == adev->mode_info.underscan_hborder_property) {
4855 		*val = dm_state->underscan_hborder;
4856 		ret = 0;
4857 	} else if (property == adev->mode_info.underscan_vborder_property) {
4858 		*val = dm_state->underscan_vborder;
4859 		ret = 0;
4860 	} else if (property == adev->mode_info.underscan_property) {
4861 		*val = dm_state->underscan_enable;
4862 		ret = 0;
4863 	} else if (property == adev->mode_info.abm_level_property) {
4864 		*val = dm_state->abm_level;
4865 		ret = 0;
4866 	}
4867 
4868 	return ret;
4869 }
4870 
4871 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4872 {
4873 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4874 
4875 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4876 }
4877 
4878 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4879 {
4880 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4881 	const struct dc_link *link = aconnector->dc_link;
4882 	struct amdgpu_device *adev = connector->dev->dev_private;
4883 	struct amdgpu_display_manager *dm = &adev->dm;
4884 
4885 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4886 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4887 
4888 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4889 	    link->type != dc_connection_none &&
4890 	    dm->backlight_dev) {
4891 		backlight_device_unregister(dm->backlight_dev);
4892 		dm->backlight_dev = NULL;
4893 	}
4894 #endif
4895 
4896 	if (aconnector->dc_em_sink)
4897 		dc_sink_release(aconnector->dc_em_sink);
4898 	aconnector->dc_em_sink = NULL;
4899 	if (aconnector->dc_sink)
4900 		dc_sink_release(aconnector->dc_sink);
4901 	aconnector->dc_sink = NULL;
4902 
4903 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4904 	drm_connector_unregister(connector);
4905 	drm_connector_cleanup(connector);
4906 	if (aconnector->i2c) {
4907 		i2c_del_adapter(&aconnector->i2c->base);
4908 		kfree(aconnector->i2c);
4909 	}
4910 	kfree(aconnector->dm_dp_aux.aux.name);
4911 
4912 	kfree(connector);
4913 }
4914 
4915 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4916 {
4917 	struct dm_connector_state *state =
4918 		to_dm_connector_state(connector->state);
4919 
4920 	if (connector->state)
4921 		__drm_atomic_helper_connector_destroy_state(connector->state);
4922 
4923 	kfree(state);
4924 
4925 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4926 
4927 	if (state) {
4928 		state->scaling = RMX_OFF;
4929 		state->underscan_enable = false;
4930 		state->underscan_hborder = 0;
4931 		state->underscan_vborder = 0;
4932 		state->base.max_requested_bpc = 8;
4933 		state->vcpi_slots = 0;
4934 		state->pbn = 0;
4935 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4936 			state->abm_level = amdgpu_dm_abm_level;
4937 
4938 		__drm_atomic_helper_connector_reset(connector, &state->base);
4939 	}
4940 }
4941 
4942 struct drm_connector_state *
4943 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4944 {
4945 	struct dm_connector_state *state =
4946 		to_dm_connector_state(connector->state);
4947 
4948 	struct dm_connector_state *new_state =
4949 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4950 
4951 	if (!new_state)
4952 		return NULL;
4953 
4954 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4955 
4956 	new_state->freesync_capable = state->freesync_capable;
4957 	new_state->abm_level = state->abm_level;
4958 	new_state->scaling = state->scaling;
4959 	new_state->underscan_enable = state->underscan_enable;
4960 	new_state->underscan_hborder = state->underscan_hborder;
4961 	new_state->underscan_vborder = state->underscan_vborder;
4962 	new_state->vcpi_slots = state->vcpi_slots;
4963 	new_state->pbn = state->pbn;
4964 	return &new_state->base;
4965 }
4966 
4967 static int
4968 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4969 {
4970 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4971 		to_amdgpu_dm_connector(connector);
4972 	int r;
4973 
4974 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4975 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4976 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4977 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4978 		if (r)
4979 			return r;
4980 	}
4981 
4982 #if defined(CONFIG_DEBUG_FS)
4983 	connector_debugfs_init(amdgpu_dm_connector);
4984 #endif
4985 
4986 	return 0;
4987 }
4988 
4989 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4990 	.reset = amdgpu_dm_connector_funcs_reset,
4991 	.detect = amdgpu_dm_connector_detect,
4992 	.fill_modes = drm_helper_probe_single_connector_modes,
4993 	.destroy = amdgpu_dm_connector_destroy,
4994 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4995 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4996 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4997 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4998 	.late_register = amdgpu_dm_connector_late_register,
4999 	.early_unregister = amdgpu_dm_connector_unregister
5000 };
5001 
5002 static int get_modes(struct drm_connector *connector)
5003 {
5004 	return amdgpu_dm_connector_get_modes(connector);
5005 }
5006 
5007 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5008 {
5009 	struct dc_sink_init_data init_params = {
5010 			.link = aconnector->dc_link,
5011 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5012 	};
5013 	struct edid *edid;
5014 
5015 	if (!aconnector->base.edid_blob_ptr) {
5016 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5017 				aconnector->base.name);
5018 
5019 		aconnector->base.force = DRM_FORCE_OFF;
5020 		aconnector->base.override_edid = false;
5021 		return;
5022 	}
5023 
5024 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5025 
5026 	aconnector->edid = edid;
5027 
5028 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5029 		aconnector->dc_link,
5030 		(uint8_t *)edid,
5031 		(edid->extensions + 1) * EDID_LENGTH,
5032 		&init_params);
5033 
5034 	if (aconnector->base.force == DRM_FORCE_ON) {
5035 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5036 		aconnector->dc_link->local_sink :
5037 		aconnector->dc_em_sink;
5038 		dc_sink_retain(aconnector->dc_sink);
5039 	}
5040 }
5041 
5042 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5043 {
5044 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5045 
5046 	/*
5047 	 * In case of headless boot with force on for DP managed connector
5048 	 * Those settings have to be != 0 to get initial modeset
5049 	 */
5050 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5051 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5052 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5053 	}
5054 
5055 
5056 	aconnector->base.override_edid = true;
5057 	create_eml_sink(aconnector);
5058 }
5059 
5060 static struct dc_stream_state *
5061 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5062 				const struct drm_display_mode *drm_mode,
5063 				const struct dm_connector_state *dm_state,
5064 				const struct dc_stream_state *old_stream)
5065 {
5066 	struct drm_connector *connector = &aconnector->base;
5067 	struct amdgpu_device *adev = connector->dev->dev_private;
5068 	struct dc_stream_state *stream;
5069 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5070 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5071 	enum dc_status dc_result = DC_OK;
5072 
5073 	do {
5074 		stream = create_stream_for_sink(aconnector, drm_mode,
5075 						dm_state, old_stream,
5076 						requested_bpc);
5077 		if (stream == NULL) {
5078 			DRM_ERROR("Failed to create stream for sink!\n");
5079 			break;
5080 		}
5081 
5082 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5083 
5084 		if (dc_result != DC_OK) {
5085 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5086 				      drm_mode->hdisplay,
5087 				      drm_mode->vdisplay,
5088 				      drm_mode->clock,
5089 				      dc_result,
5090 				      dc_status_to_str(dc_result));
5091 
5092 			dc_stream_release(stream);
5093 			stream = NULL;
5094 			requested_bpc -= 2; /* lower bpc to retry validation */
5095 		}
5096 
5097 	} while (stream == NULL && requested_bpc >= 6);
5098 
5099 	return stream;
5100 }
5101 
5102 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5103 				   struct drm_display_mode *mode)
5104 {
5105 	int result = MODE_ERROR;
5106 	struct dc_sink *dc_sink;
5107 	/* TODO: Unhardcode stream count */
5108 	struct dc_stream_state *stream;
5109 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5110 
5111 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5112 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5113 		return result;
5114 
5115 	/*
5116 	 * Only run this the first time mode_valid is called to initilialize
5117 	 * EDID mgmt
5118 	 */
5119 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5120 		!aconnector->dc_em_sink)
5121 		handle_edid_mgmt(aconnector);
5122 
5123 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5124 
5125 	if (dc_sink == NULL) {
5126 		DRM_ERROR("dc_sink is NULL!\n");
5127 		goto fail;
5128 	}
5129 
5130 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5131 	if (stream) {
5132 		dc_stream_release(stream);
5133 		result = MODE_OK;
5134 	}
5135 
5136 fail:
5137 	/* TODO: error handling*/
5138 	return result;
5139 }
5140 
5141 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5142 				struct dc_info_packet *out)
5143 {
5144 	struct hdmi_drm_infoframe frame;
5145 	unsigned char buf[30]; /* 26 + 4 */
5146 	ssize_t len;
5147 	int ret, i;
5148 
5149 	memset(out, 0, sizeof(*out));
5150 
5151 	if (!state->hdr_output_metadata)
5152 		return 0;
5153 
5154 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5155 	if (ret)
5156 		return ret;
5157 
5158 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5159 	if (len < 0)
5160 		return (int)len;
5161 
5162 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5163 	if (len != 30)
5164 		return -EINVAL;
5165 
5166 	/* Prepare the infopacket for DC. */
5167 	switch (state->connector->connector_type) {
5168 	case DRM_MODE_CONNECTOR_HDMIA:
5169 		out->hb0 = 0x87; /* type */
5170 		out->hb1 = 0x01; /* version */
5171 		out->hb2 = 0x1A; /* length */
5172 		out->sb[0] = buf[3]; /* checksum */
5173 		i = 1;
5174 		break;
5175 
5176 	case DRM_MODE_CONNECTOR_DisplayPort:
5177 	case DRM_MODE_CONNECTOR_eDP:
5178 		out->hb0 = 0x00; /* sdp id, zero */
5179 		out->hb1 = 0x87; /* type */
5180 		out->hb2 = 0x1D; /* payload len - 1 */
5181 		out->hb3 = (0x13 << 2); /* sdp version */
5182 		out->sb[0] = 0x01; /* version */
5183 		out->sb[1] = 0x1A; /* length */
5184 		i = 2;
5185 		break;
5186 
5187 	default:
5188 		return -EINVAL;
5189 	}
5190 
5191 	memcpy(&out->sb[i], &buf[4], 26);
5192 	out->valid = true;
5193 
5194 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5195 		       sizeof(out->sb), false);
5196 
5197 	return 0;
5198 }
5199 
5200 static bool
5201 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5202 			  const struct drm_connector_state *new_state)
5203 {
5204 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5205 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5206 
5207 	if (old_blob != new_blob) {
5208 		if (old_blob && new_blob &&
5209 		    old_blob->length == new_blob->length)
5210 			return memcmp(old_blob->data, new_blob->data,
5211 				      old_blob->length);
5212 
5213 		return true;
5214 	}
5215 
5216 	return false;
5217 }
5218 
5219 static int
5220 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5221 				 struct drm_atomic_state *state)
5222 {
5223 	struct drm_connector_state *new_con_state =
5224 		drm_atomic_get_new_connector_state(state, conn);
5225 	struct drm_connector_state *old_con_state =
5226 		drm_atomic_get_old_connector_state(state, conn);
5227 	struct drm_crtc *crtc = new_con_state->crtc;
5228 	struct drm_crtc_state *new_crtc_state;
5229 	int ret;
5230 
5231 	if (!crtc)
5232 		return 0;
5233 
5234 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5235 		struct dc_info_packet hdr_infopacket;
5236 
5237 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5238 		if (ret)
5239 			return ret;
5240 
5241 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5242 		if (IS_ERR(new_crtc_state))
5243 			return PTR_ERR(new_crtc_state);
5244 
5245 		/*
5246 		 * DC considers the stream backends changed if the
5247 		 * static metadata changes. Forcing the modeset also
5248 		 * gives a simple way for userspace to switch from
5249 		 * 8bpc to 10bpc when setting the metadata to enter
5250 		 * or exit HDR.
5251 		 *
5252 		 * Changing the static metadata after it's been
5253 		 * set is permissible, however. So only force a
5254 		 * modeset if we're entering or exiting HDR.
5255 		 */
5256 		new_crtc_state->mode_changed =
5257 			!old_con_state->hdr_output_metadata ||
5258 			!new_con_state->hdr_output_metadata;
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 static const struct drm_connector_helper_funcs
5265 amdgpu_dm_connector_helper_funcs = {
5266 	/*
5267 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5268 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5269 	 * are missing after user start lightdm. So we need to renew modes list.
5270 	 * in get_modes call back, not just return the modes count
5271 	 */
5272 	.get_modes = get_modes,
5273 	.mode_valid = amdgpu_dm_connector_mode_valid,
5274 	.atomic_check = amdgpu_dm_connector_atomic_check,
5275 };
5276 
5277 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5278 {
5279 }
5280 
5281 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5282 {
5283 	struct drm_atomic_state *state = new_crtc_state->state;
5284 	struct drm_plane *plane;
5285 	int num_active = 0;
5286 
5287 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5288 		struct drm_plane_state *new_plane_state;
5289 
5290 		/* Cursor planes are "fake". */
5291 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5292 			continue;
5293 
5294 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5295 
5296 		if (!new_plane_state) {
5297 			/*
5298 			 * The plane is enable on the CRTC and hasn't changed
5299 			 * state. This means that it previously passed
5300 			 * validation and is therefore enabled.
5301 			 */
5302 			num_active += 1;
5303 			continue;
5304 		}
5305 
5306 		/* We need a framebuffer to be considered enabled. */
5307 		num_active += (new_plane_state->fb != NULL);
5308 	}
5309 
5310 	return num_active;
5311 }
5312 
5313 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5314 					 struct drm_crtc_state *new_crtc_state)
5315 {
5316 	struct dm_crtc_state *dm_new_crtc_state =
5317 		to_dm_crtc_state(new_crtc_state);
5318 
5319 	dm_new_crtc_state->active_planes = 0;
5320 
5321 	if (!dm_new_crtc_state->stream)
5322 		return;
5323 
5324 	dm_new_crtc_state->active_planes =
5325 		count_crtc_active_planes(new_crtc_state);
5326 }
5327 
5328 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5329 				       struct drm_crtc_state *state)
5330 {
5331 	struct amdgpu_device *adev = crtc->dev->dev_private;
5332 	struct dc *dc = adev->dm.dc;
5333 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5334 	int ret = -EINVAL;
5335 
5336 	dm_update_crtc_active_planes(crtc, state);
5337 
5338 	if (unlikely(!dm_crtc_state->stream &&
5339 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5340 		WARN_ON(1);
5341 		return ret;
5342 	}
5343 
5344 	/*
5345 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5346 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5347 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5348 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5349 	 */
5350 	if (state->enable &&
5351 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5352 		return -EINVAL;
5353 
5354 	/* In some use cases, like reset, no stream is attached */
5355 	if (!dm_crtc_state->stream)
5356 		return 0;
5357 
5358 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5359 		return 0;
5360 
5361 	return ret;
5362 }
5363 
5364 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5365 				      const struct drm_display_mode *mode,
5366 				      struct drm_display_mode *adjusted_mode)
5367 {
5368 	return true;
5369 }
5370 
5371 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5372 	.disable = dm_crtc_helper_disable,
5373 	.atomic_check = dm_crtc_helper_atomic_check,
5374 	.mode_fixup = dm_crtc_helper_mode_fixup,
5375 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5376 };
5377 
5378 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5379 {
5380 
5381 }
5382 
5383 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5384 {
5385 	switch (display_color_depth) {
5386 		case COLOR_DEPTH_666:
5387 			return 6;
5388 		case COLOR_DEPTH_888:
5389 			return 8;
5390 		case COLOR_DEPTH_101010:
5391 			return 10;
5392 		case COLOR_DEPTH_121212:
5393 			return 12;
5394 		case COLOR_DEPTH_141414:
5395 			return 14;
5396 		case COLOR_DEPTH_161616:
5397 			return 16;
5398 		default:
5399 			break;
5400 		}
5401 	return 0;
5402 }
5403 
5404 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5405 					  struct drm_crtc_state *crtc_state,
5406 					  struct drm_connector_state *conn_state)
5407 {
5408 	struct drm_atomic_state *state = crtc_state->state;
5409 	struct drm_connector *connector = conn_state->connector;
5410 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5411 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5412 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5413 	struct drm_dp_mst_topology_mgr *mst_mgr;
5414 	struct drm_dp_mst_port *mst_port;
5415 	enum dc_color_depth color_depth;
5416 	int clock, bpp = 0;
5417 	bool is_y420 = false;
5418 
5419 	if (!aconnector->port || !aconnector->dc_sink)
5420 		return 0;
5421 
5422 	mst_port = aconnector->port;
5423 	mst_mgr = &aconnector->mst_port->mst_mgr;
5424 
5425 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5426 		return 0;
5427 
5428 	if (!state->duplicated) {
5429 		int max_bpc = conn_state->max_requested_bpc;
5430 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5431 				aconnector->force_yuv420_output;
5432 		color_depth = convert_color_depth_from_display_info(connector,
5433 								    is_y420,
5434 								    max_bpc);
5435 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5436 		clock = adjusted_mode->clock;
5437 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5438 	}
5439 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5440 									   mst_mgr,
5441 									   mst_port,
5442 									   dm_new_connector_state->pbn,
5443 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5444 	if (dm_new_connector_state->vcpi_slots < 0) {
5445 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5446 		return dm_new_connector_state->vcpi_slots;
5447 	}
5448 	return 0;
5449 }
5450 
5451 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5452 	.disable = dm_encoder_helper_disable,
5453 	.atomic_check = dm_encoder_helper_atomic_check
5454 };
5455 
5456 #if defined(CONFIG_DRM_AMD_DC_DCN)
5457 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5458 					    struct dc_state *dc_state)
5459 {
5460 	struct dc_stream_state *stream = NULL;
5461 	struct drm_connector *connector;
5462 	struct drm_connector_state *new_con_state, *old_con_state;
5463 	struct amdgpu_dm_connector *aconnector;
5464 	struct dm_connector_state *dm_conn_state;
5465 	int i, j, clock, bpp;
5466 	int vcpi, pbn_div, pbn = 0;
5467 
5468 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5469 
5470 		aconnector = to_amdgpu_dm_connector(connector);
5471 
5472 		if (!aconnector->port)
5473 			continue;
5474 
5475 		if (!new_con_state || !new_con_state->crtc)
5476 			continue;
5477 
5478 		dm_conn_state = to_dm_connector_state(new_con_state);
5479 
5480 		for (j = 0; j < dc_state->stream_count; j++) {
5481 			stream = dc_state->streams[j];
5482 			if (!stream)
5483 				continue;
5484 
5485 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5486 				break;
5487 
5488 			stream = NULL;
5489 		}
5490 
5491 		if (!stream)
5492 			continue;
5493 
5494 		if (stream->timing.flags.DSC != 1) {
5495 			drm_dp_mst_atomic_enable_dsc(state,
5496 						     aconnector->port,
5497 						     dm_conn_state->pbn,
5498 						     0,
5499 						     false);
5500 			continue;
5501 		}
5502 
5503 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5504 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5505 		clock = stream->timing.pix_clk_100hz / 10;
5506 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5507 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5508 						    aconnector->port,
5509 						    pbn, pbn_div,
5510 						    true);
5511 		if (vcpi < 0)
5512 			return vcpi;
5513 
5514 		dm_conn_state->pbn = pbn;
5515 		dm_conn_state->vcpi_slots = vcpi;
5516 	}
5517 	return 0;
5518 }
5519 #endif
5520 
5521 static void dm_drm_plane_reset(struct drm_plane *plane)
5522 {
5523 	struct dm_plane_state *amdgpu_state = NULL;
5524 
5525 	if (plane->state)
5526 		plane->funcs->atomic_destroy_state(plane, plane->state);
5527 
5528 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5529 	WARN_ON(amdgpu_state == NULL);
5530 
5531 	if (amdgpu_state)
5532 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5533 }
5534 
5535 static struct drm_plane_state *
5536 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5537 {
5538 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5539 
5540 	old_dm_plane_state = to_dm_plane_state(plane->state);
5541 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5542 	if (!dm_plane_state)
5543 		return NULL;
5544 
5545 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5546 
5547 	if (old_dm_plane_state->dc_state) {
5548 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5549 		dc_plane_state_retain(dm_plane_state->dc_state);
5550 	}
5551 
5552 	return &dm_plane_state->base;
5553 }
5554 
5555 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5556 				struct drm_plane_state *state)
5557 {
5558 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5559 
5560 	if (dm_plane_state->dc_state)
5561 		dc_plane_state_release(dm_plane_state->dc_state);
5562 
5563 	drm_atomic_helper_plane_destroy_state(plane, state);
5564 }
5565 
5566 static const struct drm_plane_funcs dm_plane_funcs = {
5567 	.update_plane	= drm_atomic_helper_update_plane,
5568 	.disable_plane	= drm_atomic_helper_disable_plane,
5569 	.destroy	= drm_primary_helper_destroy,
5570 	.reset = dm_drm_plane_reset,
5571 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5572 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5573 };
5574 
5575 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5576 				      struct drm_plane_state *new_state)
5577 {
5578 	struct amdgpu_framebuffer *afb;
5579 	struct drm_gem_object *obj;
5580 	struct amdgpu_device *adev;
5581 	struct amdgpu_bo *rbo;
5582 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5583 	struct list_head list;
5584 	struct ttm_validate_buffer tv;
5585 	struct ww_acquire_ctx ticket;
5586 	uint64_t tiling_flags;
5587 	uint32_t domain;
5588 	int r;
5589 	bool tmz_surface = false;
5590 	bool force_disable_dcc = false;
5591 
5592 	dm_plane_state_old = to_dm_plane_state(plane->state);
5593 	dm_plane_state_new = to_dm_plane_state(new_state);
5594 
5595 	if (!new_state->fb) {
5596 		DRM_DEBUG_DRIVER("No FB bound\n");
5597 		return 0;
5598 	}
5599 
5600 	afb = to_amdgpu_framebuffer(new_state->fb);
5601 	obj = new_state->fb->obj[0];
5602 	rbo = gem_to_amdgpu_bo(obj);
5603 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5604 	INIT_LIST_HEAD(&list);
5605 
5606 	tv.bo = &rbo->tbo;
5607 	tv.num_shared = 1;
5608 	list_add(&tv.head, &list);
5609 
5610 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5611 	if (r) {
5612 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5613 		return r;
5614 	}
5615 
5616 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5617 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5618 	else
5619 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5620 
5621 	r = amdgpu_bo_pin(rbo, domain);
5622 	if (unlikely(r != 0)) {
5623 		if (r != -ERESTARTSYS)
5624 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5625 		ttm_eu_backoff_reservation(&ticket, &list);
5626 		return r;
5627 	}
5628 
5629 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5630 	if (unlikely(r != 0)) {
5631 		amdgpu_bo_unpin(rbo);
5632 		ttm_eu_backoff_reservation(&ticket, &list);
5633 		DRM_ERROR("%p bind failed\n", rbo);
5634 		return r;
5635 	}
5636 
5637 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5638 
5639 	tmz_surface = amdgpu_bo_encrypted(rbo);
5640 
5641 	ttm_eu_backoff_reservation(&ticket, &list);
5642 
5643 	afb->address = amdgpu_bo_gpu_offset(rbo);
5644 
5645 	amdgpu_bo_ref(rbo);
5646 
5647 	if (dm_plane_state_new->dc_state &&
5648 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5649 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5650 
5651 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5652 		fill_plane_buffer_attributes(
5653 			adev, afb, plane_state->format, plane_state->rotation,
5654 			tiling_flags, &plane_state->tiling_info,
5655 			&plane_state->plane_size, &plane_state->dcc,
5656 			&plane_state->address, tmz_surface,
5657 			force_disable_dcc);
5658 	}
5659 
5660 	return 0;
5661 }
5662 
5663 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5664 				       struct drm_plane_state *old_state)
5665 {
5666 	struct amdgpu_bo *rbo;
5667 	int r;
5668 
5669 	if (!old_state->fb)
5670 		return;
5671 
5672 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5673 	r = amdgpu_bo_reserve(rbo, false);
5674 	if (unlikely(r)) {
5675 		DRM_ERROR("failed to reserve rbo before unpin\n");
5676 		return;
5677 	}
5678 
5679 	amdgpu_bo_unpin(rbo);
5680 	amdgpu_bo_unreserve(rbo);
5681 	amdgpu_bo_unref(&rbo);
5682 }
5683 
5684 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5685 				       struct drm_crtc_state *new_crtc_state)
5686 {
5687 	int max_downscale = 0;
5688 	int max_upscale = INT_MAX;
5689 
5690 	/* TODO: These should be checked against DC plane caps */
5691 	return drm_atomic_helper_check_plane_state(
5692 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5693 }
5694 
5695 static int dm_plane_atomic_check(struct drm_plane *plane,
5696 				 struct drm_plane_state *state)
5697 {
5698 	struct amdgpu_device *adev = plane->dev->dev_private;
5699 	struct dc *dc = adev->dm.dc;
5700 	struct dm_plane_state *dm_plane_state;
5701 	struct dc_scaling_info scaling_info;
5702 	struct drm_crtc_state *new_crtc_state;
5703 	int ret;
5704 
5705 	dm_plane_state = to_dm_plane_state(state);
5706 
5707 	if (!dm_plane_state->dc_state)
5708 		return 0;
5709 
5710 	new_crtc_state =
5711 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5712 	if (!new_crtc_state)
5713 		return -EINVAL;
5714 
5715 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5716 	if (ret)
5717 		return ret;
5718 
5719 	ret = fill_dc_scaling_info(state, &scaling_info);
5720 	if (ret)
5721 		return ret;
5722 
5723 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5724 		return 0;
5725 
5726 	return -EINVAL;
5727 }
5728 
5729 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5730 				       struct drm_plane_state *new_plane_state)
5731 {
5732 	/* Only support async updates on cursor planes. */
5733 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5734 		return -EINVAL;
5735 
5736 	return 0;
5737 }
5738 
5739 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5740 					 struct drm_plane_state *new_state)
5741 {
5742 	struct drm_plane_state *old_state =
5743 		drm_atomic_get_old_plane_state(new_state->state, plane);
5744 
5745 	swap(plane->state->fb, new_state->fb);
5746 
5747 	plane->state->src_x = new_state->src_x;
5748 	plane->state->src_y = new_state->src_y;
5749 	plane->state->src_w = new_state->src_w;
5750 	plane->state->src_h = new_state->src_h;
5751 	plane->state->crtc_x = new_state->crtc_x;
5752 	plane->state->crtc_y = new_state->crtc_y;
5753 	plane->state->crtc_w = new_state->crtc_w;
5754 	plane->state->crtc_h = new_state->crtc_h;
5755 
5756 	handle_cursor_update(plane, old_state);
5757 }
5758 
5759 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5760 	.prepare_fb = dm_plane_helper_prepare_fb,
5761 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5762 	.atomic_check = dm_plane_atomic_check,
5763 	.atomic_async_check = dm_plane_atomic_async_check,
5764 	.atomic_async_update = dm_plane_atomic_async_update
5765 };
5766 
5767 /*
5768  * TODO: these are currently initialized to rgb formats only.
5769  * For future use cases we should either initialize them dynamically based on
5770  * plane capabilities, or initialize this array to all formats, so internal drm
5771  * check will succeed, and let DC implement proper check
5772  */
5773 static const uint32_t rgb_formats[] = {
5774 	DRM_FORMAT_XRGB8888,
5775 	DRM_FORMAT_ARGB8888,
5776 	DRM_FORMAT_RGBA8888,
5777 	DRM_FORMAT_XRGB2101010,
5778 	DRM_FORMAT_XBGR2101010,
5779 	DRM_FORMAT_ARGB2101010,
5780 	DRM_FORMAT_ABGR2101010,
5781 	DRM_FORMAT_XBGR8888,
5782 	DRM_FORMAT_ABGR8888,
5783 	DRM_FORMAT_RGB565,
5784 };
5785 
5786 static const uint32_t overlay_formats[] = {
5787 	DRM_FORMAT_XRGB8888,
5788 	DRM_FORMAT_ARGB8888,
5789 	DRM_FORMAT_RGBA8888,
5790 	DRM_FORMAT_XBGR8888,
5791 	DRM_FORMAT_ABGR8888,
5792 	DRM_FORMAT_RGB565
5793 };
5794 
5795 static const u32 cursor_formats[] = {
5796 	DRM_FORMAT_ARGB8888
5797 };
5798 
5799 static int get_plane_formats(const struct drm_plane *plane,
5800 			     const struct dc_plane_cap *plane_cap,
5801 			     uint32_t *formats, int max_formats)
5802 {
5803 	int i, num_formats = 0;
5804 
5805 	/*
5806 	 * TODO: Query support for each group of formats directly from
5807 	 * DC plane caps. This will require adding more formats to the
5808 	 * caps list.
5809 	 */
5810 
5811 	switch (plane->type) {
5812 	case DRM_PLANE_TYPE_PRIMARY:
5813 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5814 			if (num_formats >= max_formats)
5815 				break;
5816 
5817 			formats[num_formats++] = rgb_formats[i];
5818 		}
5819 
5820 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5821 			formats[num_formats++] = DRM_FORMAT_NV12;
5822 		if (plane_cap && plane_cap->pixel_format_support.p010)
5823 			formats[num_formats++] = DRM_FORMAT_P010;
5824 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5825 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5826 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5827 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5828 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5829 		}
5830 		break;
5831 
5832 	case DRM_PLANE_TYPE_OVERLAY:
5833 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5834 			if (num_formats >= max_formats)
5835 				break;
5836 
5837 			formats[num_formats++] = overlay_formats[i];
5838 		}
5839 		break;
5840 
5841 	case DRM_PLANE_TYPE_CURSOR:
5842 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5843 			if (num_formats >= max_formats)
5844 				break;
5845 
5846 			formats[num_formats++] = cursor_formats[i];
5847 		}
5848 		break;
5849 	}
5850 
5851 	return num_formats;
5852 }
5853 
5854 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5855 				struct drm_plane *plane,
5856 				unsigned long possible_crtcs,
5857 				const struct dc_plane_cap *plane_cap)
5858 {
5859 	uint32_t formats[32];
5860 	int num_formats;
5861 	int res = -EPERM;
5862 	unsigned int supported_rotations;
5863 
5864 	num_formats = get_plane_formats(plane, plane_cap, formats,
5865 					ARRAY_SIZE(formats));
5866 
5867 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5868 				       &dm_plane_funcs, formats, num_formats,
5869 				       NULL, plane->type, NULL);
5870 	if (res)
5871 		return res;
5872 
5873 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5874 	    plane_cap && plane_cap->per_pixel_alpha) {
5875 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5876 					  BIT(DRM_MODE_BLEND_PREMULTI);
5877 
5878 		drm_plane_create_alpha_property(plane);
5879 		drm_plane_create_blend_mode_property(plane, blend_caps);
5880 	}
5881 
5882 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5883 	    plane_cap &&
5884 	    (plane_cap->pixel_format_support.nv12 ||
5885 	     plane_cap->pixel_format_support.p010)) {
5886 		/* This only affects YUV formats. */
5887 		drm_plane_create_color_properties(
5888 			plane,
5889 			BIT(DRM_COLOR_YCBCR_BT601) |
5890 			BIT(DRM_COLOR_YCBCR_BT709) |
5891 			BIT(DRM_COLOR_YCBCR_BT2020),
5892 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5893 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5894 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5895 	}
5896 
5897 	supported_rotations =
5898 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5899 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5900 
5901 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5902 					   supported_rotations);
5903 
5904 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5905 
5906 	/* Create (reset) the plane state */
5907 	if (plane->funcs->reset)
5908 		plane->funcs->reset(plane);
5909 
5910 	return 0;
5911 }
5912 
5913 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5914 			       struct drm_plane *plane,
5915 			       uint32_t crtc_index)
5916 {
5917 	struct amdgpu_crtc *acrtc = NULL;
5918 	struct drm_plane *cursor_plane;
5919 
5920 	int res = -ENOMEM;
5921 
5922 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5923 	if (!cursor_plane)
5924 		goto fail;
5925 
5926 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5927 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5928 
5929 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5930 	if (!acrtc)
5931 		goto fail;
5932 
5933 	res = drm_crtc_init_with_planes(
5934 			dm->ddev,
5935 			&acrtc->base,
5936 			plane,
5937 			cursor_plane,
5938 			&amdgpu_dm_crtc_funcs, NULL);
5939 
5940 	if (res)
5941 		goto fail;
5942 
5943 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5944 
5945 	/* Create (reset) the plane state */
5946 	if (acrtc->base.funcs->reset)
5947 		acrtc->base.funcs->reset(&acrtc->base);
5948 
5949 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5950 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5951 
5952 	acrtc->crtc_id = crtc_index;
5953 	acrtc->base.enabled = false;
5954 	acrtc->otg_inst = -1;
5955 
5956 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5957 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5958 				   true, MAX_COLOR_LUT_ENTRIES);
5959 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5960 
5961 	return 0;
5962 
5963 fail:
5964 	kfree(acrtc);
5965 	kfree(cursor_plane);
5966 	return res;
5967 }
5968 
5969 
5970 static int to_drm_connector_type(enum signal_type st)
5971 {
5972 	switch (st) {
5973 	case SIGNAL_TYPE_HDMI_TYPE_A:
5974 		return DRM_MODE_CONNECTOR_HDMIA;
5975 	case SIGNAL_TYPE_EDP:
5976 		return DRM_MODE_CONNECTOR_eDP;
5977 	case SIGNAL_TYPE_LVDS:
5978 		return DRM_MODE_CONNECTOR_LVDS;
5979 	case SIGNAL_TYPE_RGB:
5980 		return DRM_MODE_CONNECTOR_VGA;
5981 	case SIGNAL_TYPE_DISPLAY_PORT:
5982 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5983 		return DRM_MODE_CONNECTOR_DisplayPort;
5984 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5985 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5986 		return DRM_MODE_CONNECTOR_DVID;
5987 	case SIGNAL_TYPE_VIRTUAL:
5988 		return DRM_MODE_CONNECTOR_VIRTUAL;
5989 
5990 	default:
5991 		return DRM_MODE_CONNECTOR_Unknown;
5992 	}
5993 }
5994 
5995 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5996 {
5997 	struct drm_encoder *encoder;
5998 
5999 	/* There is only one encoder per connector */
6000 	drm_connector_for_each_possible_encoder(connector, encoder)
6001 		return encoder;
6002 
6003 	return NULL;
6004 }
6005 
6006 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6007 {
6008 	struct drm_encoder *encoder;
6009 	struct amdgpu_encoder *amdgpu_encoder;
6010 
6011 	encoder = amdgpu_dm_connector_to_encoder(connector);
6012 
6013 	if (encoder == NULL)
6014 		return;
6015 
6016 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6017 
6018 	amdgpu_encoder->native_mode.clock = 0;
6019 
6020 	if (!list_empty(&connector->probed_modes)) {
6021 		struct drm_display_mode *preferred_mode = NULL;
6022 
6023 		list_for_each_entry(preferred_mode,
6024 				    &connector->probed_modes,
6025 				    head) {
6026 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6027 				amdgpu_encoder->native_mode = *preferred_mode;
6028 
6029 			break;
6030 		}
6031 
6032 	}
6033 }
6034 
6035 static struct drm_display_mode *
6036 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6037 			     char *name,
6038 			     int hdisplay, int vdisplay)
6039 {
6040 	struct drm_device *dev = encoder->dev;
6041 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6042 	struct drm_display_mode *mode = NULL;
6043 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6044 
6045 	mode = drm_mode_duplicate(dev, native_mode);
6046 
6047 	if (mode == NULL)
6048 		return NULL;
6049 
6050 	mode->hdisplay = hdisplay;
6051 	mode->vdisplay = vdisplay;
6052 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6053 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6054 
6055 	return mode;
6056 
6057 }
6058 
6059 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6060 						 struct drm_connector *connector)
6061 {
6062 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6063 	struct drm_display_mode *mode = NULL;
6064 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6065 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6066 				to_amdgpu_dm_connector(connector);
6067 	int i;
6068 	int n;
6069 	struct mode_size {
6070 		char name[DRM_DISPLAY_MODE_LEN];
6071 		int w;
6072 		int h;
6073 	} common_modes[] = {
6074 		{  "640x480",  640,  480},
6075 		{  "800x600",  800,  600},
6076 		{ "1024x768", 1024,  768},
6077 		{ "1280x720", 1280,  720},
6078 		{ "1280x800", 1280,  800},
6079 		{"1280x1024", 1280, 1024},
6080 		{ "1440x900", 1440,  900},
6081 		{"1680x1050", 1680, 1050},
6082 		{"1600x1200", 1600, 1200},
6083 		{"1920x1080", 1920, 1080},
6084 		{"1920x1200", 1920, 1200}
6085 	};
6086 
6087 	n = ARRAY_SIZE(common_modes);
6088 
6089 	for (i = 0; i < n; i++) {
6090 		struct drm_display_mode *curmode = NULL;
6091 		bool mode_existed = false;
6092 
6093 		if (common_modes[i].w > native_mode->hdisplay ||
6094 		    common_modes[i].h > native_mode->vdisplay ||
6095 		   (common_modes[i].w == native_mode->hdisplay &&
6096 		    common_modes[i].h == native_mode->vdisplay))
6097 			continue;
6098 
6099 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6100 			if (common_modes[i].w == curmode->hdisplay &&
6101 			    common_modes[i].h == curmode->vdisplay) {
6102 				mode_existed = true;
6103 				break;
6104 			}
6105 		}
6106 
6107 		if (mode_existed)
6108 			continue;
6109 
6110 		mode = amdgpu_dm_create_common_mode(encoder,
6111 				common_modes[i].name, common_modes[i].w,
6112 				common_modes[i].h);
6113 		drm_mode_probed_add(connector, mode);
6114 		amdgpu_dm_connector->num_modes++;
6115 	}
6116 }
6117 
6118 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6119 					      struct edid *edid)
6120 {
6121 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6122 			to_amdgpu_dm_connector(connector);
6123 
6124 	if (edid) {
6125 		/* empty probed_modes */
6126 		INIT_LIST_HEAD(&connector->probed_modes);
6127 		amdgpu_dm_connector->num_modes =
6128 				drm_add_edid_modes(connector, edid);
6129 
6130 		/* sorting the probed modes before calling function
6131 		 * amdgpu_dm_get_native_mode() since EDID can have
6132 		 * more than one preferred mode. The modes that are
6133 		 * later in the probed mode list could be of higher
6134 		 * and preferred resolution. For example, 3840x2160
6135 		 * resolution in base EDID preferred timing and 4096x2160
6136 		 * preferred resolution in DID extension block later.
6137 		 */
6138 		drm_mode_sort(&connector->probed_modes);
6139 		amdgpu_dm_get_native_mode(connector);
6140 	} else {
6141 		amdgpu_dm_connector->num_modes = 0;
6142 	}
6143 }
6144 
6145 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6146 {
6147 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6148 			to_amdgpu_dm_connector(connector);
6149 	struct drm_encoder *encoder;
6150 	struct edid *edid = amdgpu_dm_connector->edid;
6151 
6152 	encoder = amdgpu_dm_connector_to_encoder(connector);
6153 
6154 	if (!edid || !drm_edid_is_valid(edid)) {
6155 		amdgpu_dm_connector->num_modes =
6156 				drm_add_modes_noedid(connector, 640, 480);
6157 	} else {
6158 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6159 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6160 	}
6161 	amdgpu_dm_fbc_init(connector);
6162 
6163 	return amdgpu_dm_connector->num_modes;
6164 }
6165 
6166 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6167 				     struct amdgpu_dm_connector *aconnector,
6168 				     int connector_type,
6169 				     struct dc_link *link,
6170 				     int link_index)
6171 {
6172 	struct amdgpu_device *adev = dm->ddev->dev_private;
6173 
6174 	/*
6175 	 * Some of the properties below require access to state, like bpc.
6176 	 * Allocate some default initial connector state with our reset helper.
6177 	 */
6178 	if (aconnector->base.funcs->reset)
6179 		aconnector->base.funcs->reset(&aconnector->base);
6180 
6181 	aconnector->connector_id = link_index;
6182 	aconnector->dc_link = link;
6183 	aconnector->base.interlace_allowed = false;
6184 	aconnector->base.doublescan_allowed = false;
6185 	aconnector->base.stereo_allowed = false;
6186 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6187 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6188 	aconnector->audio_inst = -1;
6189 	mutex_init(&aconnector->hpd_lock);
6190 
6191 	/*
6192 	 * configure support HPD hot plug connector_>polled default value is 0
6193 	 * which means HPD hot plug not supported
6194 	 */
6195 	switch (connector_type) {
6196 	case DRM_MODE_CONNECTOR_HDMIA:
6197 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6198 		aconnector->base.ycbcr_420_allowed =
6199 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6200 		break;
6201 	case DRM_MODE_CONNECTOR_DisplayPort:
6202 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6203 		aconnector->base.ycbcr_420_allowed =
6204 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6205 		break;
6206 	case DRM_MODE_CONNECTOR_DVID:
6207 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6208 		break;
6209 	default:
6210 		break;
6211 	}
6212 
6213 	drm_object_attach_property(&aconnector->base.base,
6214 				dm->ddev->mode_config.scaling_mode_property,
6215 				DRM_MODE_SCALE_NONE);
6216 
6217 	drm_object_attach_property(&aconnector->base.base,
6218 				adev->mode_info.underscan_property,
6219 				UNDERSCAN_OFF);
6220 	drm_object_attach_property(&aconnector->base.base,
6221 				adev->mode_info.underscan_hborder_property,
6222 				0);
6223 	drm_object_attach_property(&aconnector->base.base,
6224 				adev->mode_info.underscan_vborder_property,
6225 				0);
6226 
6227 	if (!aconnector->mst_port)
6228 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6229 
6230 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6231 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6232 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6233 
6234 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6235 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6236 		drm_object_attach_property(&aconnector->base.base,
6237 				adev->mode_info.abm_level_property, 0);
6238 	}
6239 
6240 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6241 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6242 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6243 		drm_object_attach_property(
6244 			&aconnector->base.base,
6245 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6246 
6247 		if (!aconnector->mst_port)
6248 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6249 
6250 #ifdef CONFIG_DRM_AMD_DC_HDCP
6251 		if (adev->dm.hdcp_workqueue)
6252 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6253 #endif
6254 	}
6255 }
6256 
6257 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6258 			      struct i2c_msg *msgs, int num)
6259 {
6260 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6261 	struct ddc_service *ddc_service = i2c->ddc_service;
6262 	struct i2c_command cmd;
6263 	int i;
6264 	int result = -EIO;
6265 
6266 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6267 
6268 	if (!cmd.payloads)
6269 		return result;
6270 
6271 	cmd.number_of_payloads = num;
6272 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6273 	cmd.speed = 100;
6274 
6275 	for (i = 0; i < num; i++) {
6276 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6277 		cmd.payloads[i].address = msgs[i].addr;
6278 		cmd.payloads[i].length = msgs[i].len;
6279 		cmd.payloads[i].data = msgs[i].buf;
6280 	}
6281 
6282 	if (dc_submit_i2c(
6283 			ddc_service->ctx->dc,
6284 			ddc_service->ddc_pin->hw_info.ddc_channel,
6285 			&cmd))
6286 		result = num;
6287 
6288 	kfree(cmd.payloads);
6289 	return result;
6290 }
6291 
6292 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6293 {
6294 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6295 }
6296 
6297 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6298 	.master_xfer = amdgpu_dm_i2c_xfer,
6299 	.functionality = amdgpu_dm_i2c_func,
6300 };
6301 
6302 static struct amdgpu_i2c_adapter *
6303 create_i2c(struct ddc_service *ddc_service,
6304 	   int link_index,
6305 	   int *res)
6306 {
6307 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6308 	struct amdgpu_i2c_adapter *i2c;
6309 
6310 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6311 	if (!i2c)
6312 		return NULL;
6313 	i2c->base.owner = THIS_MODULE;
6314 	i2c->base.class = I2C_CLASS_DDC;
6315 	i2c->base.dev.parent = &adev->pdev->dev;
6316 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6317 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6318 	i2c_set_adapdata(&i2c->base, i2c);
6319 	i2c->ddc_service = ddc_service;
6320 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6321 
6322 	return i2c;
6323 }
6324 
6325 
6326 /*
6327  * Note: this function assumes that dc_link_detect() was called for the
6328  * dc_link which will be represented by this aconnector.
6329  */
6330 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6331 				    struct amdgpu_dm_connector *aconnector,
6332 				    uint32_t link_index,
6333 				    struct amdgpu_encoder *aencoder)
6334 {
6335 	int res = 0;
6336 	int connector_type;
6337 	struct dc *dc = dm->dc;
6338 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6339 	struct amdgpu_i2c_adapter *i2c;
6340 
6341 	link->priv = aconnector;
6342 
6343 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6344 
6345 	i2c = create_i2c(link->ddc, link->link_index, &res);
6346 	if (!i2c) {
6347 		DRM_ERROR("Failed to create i2c adapter data\n");
6348 		return -ENOMEM;
6349 	}
6350 
6351 	aconnector->i2c = i2c;
6352 	res = i2c_add_adapter(&i2c->base);
6353 
6354 	if (res) {
6355 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6356 		goto out_free;
6357 	}
6358 
6359 	connector_type = to_drm_connector_type(link->connector_signal);
6360 
6361 	res = drm_connector_init_with_ddc(
6362 			dm->ddev,
6363 			&aconnector->base,
6364 			&amdgpu_dm_connector_funcs,
6365 			connector_type,
6366 			&i2c->base);
6367 
6368 	if (res) {
6369 		DRM_ERROR("connector_init failed\n");
6370 		aconnector->connector_id = -1;
6371 		goto out_free;
6372 	}
6373 
6374 	drm_connector_helper_add(
6375 			&aconnector->base,
6376 			&amdgpu_dm_connector_helper_funcs);
6377 
6378 	amdgpu_dm_connector_init_helper(
6379 		dm,
6380 		aconnector,
6381 		connector_type,
6382 		link,
6383 		link_index);
6384 
6385 	drm_connector_attach_encoder(
6386 		&aconnector->base, &aencoder->base);
6387 
6388 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6389 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6390 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6391 
6392 out_free:
6393 	if (res) {
6394 		kfree(i2c);
6395 		aconnector->i2c = NULL;
6396 	}
6397 	return res;
6398 }
6399 
6400 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6401 {
6402 	switch (adev->mode_info.num_crtc) {
6403 	case 1:
6404 		return 0x1;
6405 	case 2:
6406 		return 0x3;
6407 	case 3:
6408 		return 0x7;
6409 	case 4:
6410 		return 0xf;
6411 	case 5:
6412 		return 0x1f;
6413 	case 6:
6414 	default:
6415 		return 0x3f;
6416 	}
6417 }
6418 
6419 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6420 				  struct amdgpu_encoder *aencoder,
6421 				  uint32_t link_index)
6422 {
6423 	struct amdgpu_device *adev = dev->dev_private;
6424 
6425 	int res = drm_encoder_init(dev,
6426 				   &aencoder->base,
6427 				   &amdgpu_dm_encoder_funcs,
6428 				   DRM_MODE_ENCODER_TMDS,
6429 				   NULL);
6430 
6431 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6432 
6433 	if (!res)
6434 		aencoder->encoder_id = link_index;
6435 	else
6436 		aencoder->encoder_id = -1;
6437 
6438 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6439 
6440 	return res;
6441 }
6442 
6443 static void manage_dm_interrupts(struct amdgpu_device *adev,
6444 				 struct amdgpu_crtc *acrtc,
6445 				 bool enable)
6446 {
6447 	/*
6448 	 * We have no guarantee that the frontend index maps to the same
6449 	 * backend index - some even map to more than one.
6450 	 *
6451 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6452 	 */
6453 	int irq_type =
6454 		amdgpu_display_crtc_idx_to_irq_type(
6455 			adev,
6456 			acrtc->crtc_id);
6457 
6458 	if (enable) {
6459 		drm_crtc_vblank_on(&acrtc->base);
6460 		amdgpu_irq_get(
6461 			adev,
6462 			&adev->pageflip_irq,
6463 			irq_type);
6464 	} else {
6465 
6466 		amdgpu_irq_put(
6467 			adev,
6468 			&adev->pageflip_irq,
6469 			irq_type);
6470 		drm_crtc_vblank_off(&acrtc->base);
6471 	}
6472 }
6473 
6474 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6475 				      struct amdgpu_crtc *acrtc)
6476 {
6477 	int irq_type =
6478 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6479 
6480 	/**
6481 	 * This reads the current state for the IRQ and force reapplies
6482 	 * the setting to hardware.
6483 	 */
6484 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6485 }
6486 
6487 static bool
6488 is_scaling_state_different(const struct dm_connector_state *dm_state,
6489 			   const struct dm_connector_state *old_dm_state)
6490 {
6491 	if (dm_state->scaling != old_dm_state->scaling)
6492 		return true;
6493 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6494 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6495 			return true;
6496 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6497 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6498 			return true;
6499 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6500 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6501 		return true;
6502 	return false;
6503 }
6504 
6505 #ifdef CONFIG_DRM_AMD_DC_HDCP
6506 static bool is_content_protection_different(struct drm_connector_state *state,
6507 					    const struct drm_connector_state *old_state,
6508 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6509 {
6510 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6511 
6512 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6513 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6514 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6515 		return true;
6516 	}
6517 
6518 	/* CP is being re enabled, ignore this */
6519 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6520 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6521 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6522 		return false;
6523 	}
6524 
6525 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6526 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6527 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6528 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6529 
6530 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6531 	 * hot-plug, headless s3, dpms
6532 	 */
6533 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6534 	    aconnector->dc_sink != NULL)
6535 		return true;
6536 
6537 	if (old_state->content_protection == state->content_protection)
6538 		return false;
6539 
6540 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6541 		return true;
6542 
6543 	return false;
6544 }
6545 
6546 #endif
6547 static void remove_stream(struct amdgpu_device *adev,
6548 			  struct amdgpu_crtc *acrtc,
6549 			  struct dc_stream_state *stream)
6550 {
6551 	/* this is the update mode case */
6552 
6553 	acrtc->otg_inst = -1;
6554 	acrtc->enabled = false;
6555 }
6556 
6557 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6558 			       struct dc_cursor_position *position)
6559 {
6560 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6561 	int x, y;
6562 	int xorigin = 0, yorigin = 0;
6563 
6564 	position->enable = false;
6565 	position->x = 0;
6566 	position->y = 0;
6567 
6568 	if (!crtc || !plane->state->fb)
6569 		return 0;
6570 
6571 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6572 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6573 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6574 			  __func__,
6575 			  plane->state->crtc_w,
6576 			  plane->state->crtc_h);
6577 		return -EINVAL;
6578 	}
6579 
6580 	x = plane->state->crtc_x;
6581 	y = plane->state->crtc_y;
6582 
6583 	if (x <= -amdgpu_crtc->max_cursor_width ||
6584 	    y <= -amdgpu_crtc->max_cursor_height)
6585 		return 0;
6586 
6587 	if (x < 0) {
6588 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6589 		x = 0;
6590 	}
6591 	if (y < 0) {
6592 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6593 		y = 0;
6594 	}
6595 	position->enable = true;
6596 	position->translate_by_source = true;
6597 	position->x = x;
6598 	position->y = y;
6599 	position->x_hotspot = xorigin;
6600 	position->y_hotspot = yorigin;
6601 
6602 	return 0;
6603 }
6604 
6605 static void handle_cursor_update(struct drm_plane *plane,
6606 				 struct drm_plane_state *old_plane_state)
6607 {
6608 	struct amdgpu_device *adev = plane->dev->dev_private;
6609 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6610 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6611 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6612 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6613 	uint64_t address = afb ? afb->address : 0;
6614 	struct dc_cursor_position position;
6615 	struct dc_cursor_attributes attributes;
6616 	int ret;
6617 
6618 	if (!plane->state->fb && !old_plane_state->fb)
6619 		return;
6620 
6621 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6622 			 __func__,
6623 			 amdgpu_crtc->crtc_id,
6624 			 plane->state->crtc_w,
6625 			 plane->state->crtc_h);
6626 
6627 	ret = get_cursor_position(plane, crtc, &position);
6628 	if (ret)
6629 		return;
6630 
6631 	if (!position.enable) {
6632 		/* turn off cursor */
6633 		if (crtc_state && crtc_state->stream) {
6634 			mutex_lock(&adev->dm.dc_lock);
6635 			dc_stream_set_cursor_position(crtc_state->stream,
6636 						      &position);
6637 			mutex_unlock(&adev->dm.dc_lock);
6638 		}
6639 		return;
6640 	}
6641 
6642 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6643 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6644 
6645 	memset(&attributes, 0, sizeof(attributes));
6646 	attributes.address.high_part = upper_32_bits(address);
6647 	attributes.address.low_part  = lower_32_bits(address);
6648 	attributes.width             = plane->state->crtc_w;
6649 	attributes.height            = plane->state->crtc_h;
6650 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6651 	attributes.rotation_angle    = 0;
6652 	attributes.attribute_flags.value = 0;
6653 
6654 	attributes.pitch = attributes.width;
6655 
6656 	if (crtc_state->stream) {
6657 		mutex_lock(&adev->dm.dc_lock);
6658 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6659 							 &attributes))
6660 			DRM_ERROR("DC failed to set cursor attributes\n");
6661 
6662 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6663 						   &position))
6664 			DRM_ERROR("DC failed to set cursor position\n");
6665 		mutex_unlock(&adev->dm.dc_lock);
6666 	}
6667 }
6668 
6669 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6670 {
6671 
6672 	assert_spin_locked(&acrtc->base.dev->event_lock);
6673 	WARN_ON(acrtc->event);
6674 
6675 	acrtc->event = acrtc->base.state->event;
6676 
6677 	/* Set the flip status */
6678 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6679 
6680 	/* Mark this event as consumed */
6681 	acrtc->base.state->event = NULL;
6682 
6683 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6684 						 acrtc->crtc_id);
6685 }
6686 
6687 static void update_freesync_state_on_stream(
6688 	struct amdgpu_display_manager *dm,
6689 	struct dm_crtc_state *new_crtc_state,
6690 	struct dc_stream_state *new_stream,
6691 	struct dc_plane_state *surface,
6692 	u32 flip_timestamp_in_us)
6693 {
6694 	struct mod_vrr_params vrr_params;
6695 	struct dc_info_packet vrr_infopacket = {0};
6696 	struct amdgpu_device *adev = dm->adev;
6697 	unsigned long flags;
6698 
6699 	if (!new_stream)
6700 		return;
6701 
6702 	/*
6703 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6704 	 * For now it's sufficient to just guard against these conditions.
6705 	 */
6706 
6707 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6708 		return;
6709 
6710 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6711 	vrr_params = new_crtc_state->vrr_params;
6712 
6713 	if (surface) {
6714 		mod_freesync_handle_preflip(
6715 			dm->freesync_module,
6716 			surface,
6717 			new_stream,
6718 			flip_timestamp_in_us,
6719 			&vrr_params);
6720 
6721 		if (adev->family < AMDGPU_FAMILY_AI &&
6722 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6723 			mod_freesync_handle_v_update(dm->freesync_module,
6724 						     new_stream, &vrr_params);
6725 
6726 			/* Need to call this before the frame ends. */
6727 			dc_stream_adjust_vmin_vmax(dm->dc,
6728 						   new_crtc_state->stream,
6729 						   &vrr_params.adjust);
6730 		}
6731 	}
6732 
6733 	mod_freesync_build_vrr_infopacket(
6734 		dm->freesync_module,
6735 		new_stream,
6736 		&vrr_params,
6737 		PACKET_TYPE_VRR,
6738 		TRANSFER_FUNC_UNKNOWN,
6739 		&vrr_infopacket);
6740 
6741 	new_crtc_state->freesync_timing_changed |=
6742 		(memcmp(&new_crtc_state->vrr_params.adjust,
6743 			&vrr_params.adjust,
6744 			sizeof(vrr_params.adjust)) != 0);
6745 
6746 	new_crtc_state->freesync_vrr_info_changed |=
6747 		(memcmp(&new_crtc_state->vrr_infopacket,
6748 			&vrr_infopacket,
6749 			sizeof(vrr_infopacket)) != 0);
6750 
6751 	new_crtc_state->vrr_params = vrr_params;
6752 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6753 
6754 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6755 	new_stream->vrr_infopacket = vrr_infopacket;
6756 
6757 	if (new_crtc_state->freesync_vrr_info_changed)
6758 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6759 			      new_crtc_state->base.crtc->base.id,
6760 			      (int)new_crtc_state->base.vrr_enabled,
6761 			      (int)vrr_params.state);
6762 
6763 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6764 }
6765 
6766 static void pre_update_freesync_state_on_stream(
6767 	struct amdgpu_display_manager *dm,
6768 	struct dm_crtc_state *new_crtc_state)
6769 {
6770 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6771 	struct mod_vrr_params vrr_params;
6772 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6773 	struct amdgpu_device *adev = dm->adev;
6774 	unsigned long flags;
6775 
6776 	if (!new_stream)
6777 		return;
6778 
6779 	/*
6780 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6781 	 * For now it's sufficient to just guard against these conditions.
6782 	 */
6783 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6784 		return;
6785 
6786 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6787 	vrr_params = new_crtc_state->vrr_params;
6788 
6789 	if (new_crtc_state->vrr_supported &&
6790 	    config.min_refresh_in_uhz &&
6791 	    config.max_refresh_in_uhz) {
6792 		config.state = new_crtc_state->base.vrr_enabled ?
6793 			VRR_STATE_ACTIVE_VARIABLE :
6794 			VRR_STATE_INACTIVE;
6795 	} else {
6796 		config.state = VRR_STATE_UNSUPPORTED;
6797 	}
6798 
6799 	mod_freesync_build_vrr_params(dm->freesync_module,
6800 				      new_stream,
6801 				      &config, &vrr_params);
6802 
6803 	new_crtc_state->freesync_timing_changed |=
6804 		(memcmp(&new_crtc_state->vrr_params.adjust,
6805 			&vrr_params.adjust,
6806 			sizeof(vrr_params.adjust)) != 0);
6807 
6808 	new_crtc_state->vrr_params = vrr_params;
6809 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6810 }
6811 
6812 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6813 					    struct dm_crtc_state *new_state)
6814 {
6815 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6816 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6817 
6818 	if (!old_vrr_active && new_vrr_active) {
6819 		/* Transition VRR inactive -> active:
6820 		 * While VRR is active, we must not disable vblank irq, as a
6821 		 * reenable after disable would compute bogus vblank/pflip
6822 		 * timestamps if it likely happened inside display front-porch.
6823 		 *
6824 		 * We also need vupdate irq for the actual core vblank handling
6825 		 * at end of vblank.
6826 		 */
6827 		dm_set_vupdate_irq(new_state->base.crtc, true);
6828 		drm_crtc_vblank_get(new_state->base.crtc);
6829 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6830 				 __func__, new_state->base.crtc->base.id);
6831 	} else if (old_vrr_active && !new_vrr_active) {
6832 		/* Transition VRR active -> inactive:
6833 		 * Allow vblank irq disable again for fixed refresh rate.
6834 		 */
6835 		dm_set_vupdate_irq(new_state->base.crtc, false);
6836 		drm_crtc_vblank_put(new_state->base.crtc);
6837 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6838 				 __func__, new_state->base.crtc->base.id);
6839 	}
6840 }
6841 
6842 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6843 {
6844 	struct drm_plane *plane;
6845 	struct drm_plane_state *old_plane_state, *new_plane_state;
6846 	int i;
6847 
6848 	/*
6849 	 * TODO: Make this per-stream so we don't issue redundant updates for
6850 	 * commits with multiple streams.
6851 	 */
6852 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6853 				       new_plane_state, i)
6854 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6855 			handle_cursor_update(plane, old_plane_state);
6856 }
6857 
6858 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6859 				    struct dc_state *dc_state,
6860 				    struct drm_device *dev,
6861 				    struct amdgpu_display_manager *dm,
6862 				    struct drm_crtc *pcrtc,
6863 				    bool wait_for_vblank)
6864 {
6865 	uint32_t i;
6866 	uint64_t timestamp_ns;
6867 	struct drm_plane *plane;
6868 	struct drm_plane_state *old_plane_state, *new_plane_state;
6869 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6870 	struct drm_crtc_state *new_pcrtc_state =
6871 			drm_atomic_get_new_crtc_state(state, pcrtc);
6872 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6873 	struct dm_crtc_state *dm_old_crtc_state =
6874 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6875 	int planes_count = 0, vpos, hpos;
6876 	long r;
6877 	unsigned long flags;
6878 	struct amdgpu_bo *abo;
6879 	uint64_t tiling_flags;
6880 	bool tmz_surface = false;
6881 	uint32_t target_vblank, last_flip_vblank;
6882 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6883 	bool pflip_present = false;
6884 	struct {
6885 		struct dc_surface_update surface_updates[MAX_SURFACES];
6886 		struct dc_plane_info plane_infos[MAX_SURFACES];
6887 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6888 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6889 		struct dc_stream_update stream_update;
6890 	} *bundle;
6891 
6892 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6893 
6894 	if (!bundle) {
6895 		dm_error("Failed to allocate update bundle\n");
6896 		goto cleanup;
6897 	}
6898 
6899 	/*
6900 	 * Disable the cursor first if we're disabling all the planes.
6901 	 * It'll remain on the screen after the planes are re-enabled
6902 	 * if we don't.
6903 	 */
6904 	if (acrtc_state->active_planes == 0)
6905 		amdgpu_dm_commit_cursors(state);
6906 
6907 	/* update planes when needed */
6908 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6909 		struct drm_crtc *crtc = new_plane_state->crtc;
6910 		struct drm_crtc_state *new_crtc_state;
6911 		struct drm_framebuffer *fb = new_plane_state->fb;
6912 		bool plane_needs_flip;
6913 		struct dc_plane_state *dc_plane;
6914 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6915 
6916 		/* Cursor plane is handled after stream updates */
6917 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6918 			continue;
6919 
6920 		if (!fb || !crtc || pcrtc != crtc)
6921 			continue;
6922 
6923 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6924 		if (!new_crtc_state->active)
6925 			continue;
6926 
6927 		dc_plane = dm_new_plane_state->dc_state;
6928 
6929 		bundle->surface_updates[planes_count].surface = dc_plane;
6930 		if (new_pcrtc_state->color_mgmt_changed) {
6931 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6932 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6933 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6934 		}
6935 
6936 		fill_dc_scaling_info(new_plane_state,
6937 				     &bundle->scaling_infos[planes_count]);
6938 
6939 		bundle->surface_updates[planes_count].scaling_info =
6940 			&bundle->scaling_infos[planes_count];
6941 
6942 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6943 
6944 		pflip_present = pflip_present || plane_needs_flip;
6945 
6946 		if (!plane_needs_flip) {
6947 			planes_count += 1;
6948 			continue;
6949 		}
6950 
6951 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6952 
6953 		/*
6954 		 * Wait for all fences on this FB. Do limited wait to avoid
6955 		 * deadlock during GPU reset when this fence will not signal
6956 		 * but we hold reservation lock for the BO.
6957 		 */
6958 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6959 							false,
6960 							msecs_to_jiffies(5000));
6961 		if (unlikely(r <= 0))
6962 			DRM_ERROR("Waiting for fences timed out!");
6963 
6964 		/*
6965 		 * TODO This might fail and hence better not used, wait
6966 		 * explicitly on fences instead
6967 		 * and in general should be called for
6968 		 * blocking commit to as per framework helpers
6969 		 */
6970 		r = amdgpu_bo_reserve(abo, true);
6971 		if (unlikely(r != 0))
6972 			DRM_ERROR("failed to reserve buffer before flip\n");
6973 
6974 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6975 
6976 		tmz_surface = amdgpu_bo_encrypted(abo);
6977 
6978 		amdgpu_bo_unreserve(abo);
6979 
6980 		fill_dc_plane_info_and_addr(
6981 			dm->adev, new_plane_state, tiling_flags,
6982 			&bundle->plane_infos[planes_count],
6983 			&bundle->flip_addrs[planes_count].address,
6984 			tmz_surface,
6985 			false);
6986 
6987 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6988 				 new_plane_state->plane->index,
6989 				 bundle->plane_infos[planes_count].dcc.enable);
6990 
6991 		bundle->surface_updates[planes_count].plane_info =
6992 			&bundle->plane_infos[planes_count];
6993 
6994 		/*
6995 		 * Only allow immediate flips for fast updates that don't
6996 		 * change FB pitch, DCC state, rotation or mirroing.
6997 		 */
6998 		bundle->flip_addrs[planes_count].flip_immediate =
6999 			crtc->state->async_flip &&
7000 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7001 
7002 		timestamp_ns = ktime_get_ns();
7003 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7004 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7005 		bundle->surface_updates[planes_count].surface = dc_plane;
7006 
7007 		if (!bundle->surface_updates[planes_count].surface) {
7008 			DRM_ERROR("No surface for CRTC: id=%d\n",
7009 					acrtc_attach->crtc_id);
7010 			continue;
7011 		}
7012 
7013 		if (plane == pcrtc->primary)
7014 			update_freesync_state_on_stream(
7015 				dm,
7016 				acrtc_state,
7017 				acrtc_state->stream,
7018 				dc_plane,
7019 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7020 
7021 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7022 				 __func__,
7023 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7024 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7025 
7026 		planes_count += 1;
7027 
7028 	}
7029 
7030 	if (pflip_present) {
7031 		if (!vrr_active) {
7032 			/* Use old throttling in non-vrr fixed refresh rate mode
7033 			 * to keep flip scheduling based on target vblank counts
7034 			 * working in a backwards compatible way, e.g., for
7035 			 * clients using the GLX_OML_sync_control extension or
7036 			 * DRI3/Present extension with defined target_msc.
7037 			 */
7038 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7039 		}
7040 		else {
7041 			/* For variable refresh rate mode only:
7042 			 * Get vblank of last completed flip to avoid > 1 vrr
7043 			 * flips per video frame by use of throttling, but allow
7044 			 * flip programming anywhere in the possibly large
7045 			 * variable vrr vblank interval for fine-grained flip
7046 			 * timing control and more opportunity to avoid stutter
7047 			 * on late submission of flips.
7048 			 */
7049 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7050 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7051 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7052 		}
7053 
7054 		target_vblank = last_flip_vblank + wait_for_vblank;
7055 
7056 		/*
7057 		 * Wait until we're out of the vertical blank period before the one
7058 		 * targeted by the flip
7059 		 */
7060 		while ((acrtc_attach->enabled &&
7061 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7062 							    0, &vpos, &hpos, NULL,
7063 							    NULL, &pcrtc->hwmode)
7064 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7065 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7066 			(int)(target_vblank -
7067 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7068 			usleep_range(1000, 1100);
7069 		}
7070 
7071 		/**
7072 		 * Prepare the flip event for the pageflip interrupt to handle.
7073 		 *
7074 		 * This only works in the case where we've already turned on the
7075 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7076 		 * from 0 -> n planes we have to skip a hardware generated event
7077 		 * and rely on sending it from software.
7078 		 */
7079 		if (acrtc_attach->base.state->event &&
7080 		    acrtc_state->active_planes > 0) {
7081 			drm_crtc_vblank_get(pcrtc);
7082 
7083 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7084 
7085 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7086 			prepare_flip_isr(acrtc_attach);
7087 
7088 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7089 		}
7090 
7091 		if (acrtc_state->stream) {
7092 			if (acrtc_state->freesync_vrr_info_changed)
7093 				bundle->stream_update.vrr_infopacket =
7094 					&acrtc_state->stream->vrr_infopacket;
7095 		}
7096 	}
7097 
7098 	/* Update the planes if changed or disable if we don't have any. */
7099 	if ((planes_count || acrtc_state->active_planes == 0) &&
7100 		acrtc_state->stream) {
7101 		bundle->stream_update.stream = acrtc_state->stream;
7102 		if (new_pcrtc_state->mode_changed) {
7103 			bundle->stream_update.src = acrtc_state->stream->src;
7104 			bundle->stream_update.dst = acrtc_state->stream->dst;
7105 		}
7106 
7107 		if (new_pcrtc_state->color_mgmt_changed) {
7108 			/*
7109 			 * TODO: This isn't fully correct since we've actually
7110 			 * already modified the stream in place.
7111 			 */
7112 			bundle->stream_update.gamut_remap =
7113 				&acrtc_state->stream->gamut_remap_matrix;
7114 			bundle->stream_update.output_csc_transform =
7115 				&acrtc_state->stream->csc_color_matrix;
7116 			bundle->stream_update.out_transfer_func =
7117 				acrtc_state->stream->out_transfer_func;
7118 		}
7119 
7120 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7121 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7122 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7123 
7124 		/*
7125 		 * If FreeSync state on the stream has changed then we need to
7126 		 * re-adjust the min/max bounds now that DC doesn't handle this
7127 		 * as part of commit.
7128 		 */
7129 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7130 		    amdgpu_dm_vrr_active(acrtc_state)) {
7131 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7132 			dc_stream_adjust_vmin_vmax(
7133 				dm->dc, acrtc_state->stream,
7134 				&acrtc_state->vrr_params.adjust);
7135 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7136 		}
7137 		mutex_lock(&dm->dc_lock);
7138 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7139 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7140 			amdgpu_dm_psr_disable(acrtc_state->stream);
7141 
7142 		dc_commit_updates_for_stream(dm->dc,
7143 						     bundle->surface_updates,
7144 						     planes_count,
7145 						     acrtc_state->stream,
7146 						     &bundle->stream_update,
7147 						     dc_state);
7148 
7149 		/**
7150 		 * Enable or disable the interrupts on the backend.
7151 		 *
7152 		 * Most pipes are put into power gating when unused.
7153 		 *
7154 		 * When power gating is enabled on a pipe we lose the
7155 		 * interrupt enablement state when power gating is disabled.
7156 		 *
7157 		 * So we need to update the IRQ control state in hardware
7158 		 * whenever the pipe turns on (since it could be previously
7159 		 * power gated) or off (since some pipes can't be power gated
7160 		 * on some ASICs).
7161 		 */
7162 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7163 			dm_update_pflip_irq_state(
7164 				(struct amdgpu_device *)dev->dev_private,
7165 				acrtc_attach);
7166 
7167 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7168 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7169 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7170 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7171 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7172 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7173 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7174 			amdgpu_dm_psr_enable(acrtc_state->stream);
7175 		}
7176 
7177 		mutex_unlock(&dm->dc_lock);
7178 	}
7179 
7180 	/*
7181 	 * Update cursor state *after* programming all the planes.
7182 	 * This avoids redundant programming in the case where we're going
7183 	 * to be disabling a single plane - those pipes are being disabled.
7184 	 */
7185 	if (acrtc_state->active_planes)
7186 		amdgpu_dm_commit_cursors(state);
7187 
7188 cleanup:
7189 	kfree(bundle);
7190 }
7191 
7192 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7193 				   struct drm_atomic_state *state)
7194 {
7195 	struct amdgpu_device *adev = dev->dev_private;
7196 	struct amdgpu_dm_connector *aconnector;
7197 	struct drm_connector *connector;
7198 	struct drm_connector_state *old_con_state, *new_con_state;
7199 	struct drm_crtc_state *new_crtc_state;
7200 	struct dm_crtc_state *new_dm_crtc_state;
7201 	const struct dc_stream_status *status;
7202 	int i, inst;
7203 
7204 	/* Notify device removals. */
7205 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7206 		if (old_con_state->crtc != new_con_state->crtc) {
7207 			/* CRTC changes require notification. */
7208 			goto notify;
7209 		}
7210 
7211 		if (!new_con_state->crtc)
7212 			continue;
7213 
7214 		new_crtc_state = drm_atomic_get_new_crtc_state(
7215 			state, new_con_state->crtc);
7216 
7217 		if (!new_crtc_state)
7218 			continue;
7219 
7220 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7221 			continue;
7222 
7223 	notify:
7224 		aconnector = to_amdgpu_dm_connector(connector);
7225 
7226 		mutex_lock(&adev->dm.audio_lock);
7227 		inst = aconnector->audio_inst;
7228 		aconnector->audio_inst = -1;
7229 		mutex_unlock(&adev->dm.audio_lock);
7230 
7231 		amdgpu_dm_audio_eld_notify(adev, inst);
7232 	}
7233 
7234 	/* Notify audio device additions. */
7235 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7236 		if (!new_con_state->crtc)
7237 			continue;
7238 
7239 		new_crtc_state = drm_atomic_get_new_crtc_state(
7240 			state, new_con_state->crtc);
7241 
7242 		if (!new_crtc_state)
7243 			continue;
7244 
7245 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7246 			continue;
7247 
7248 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7249 		if (!new_dm_crtc_state->stream)
7250 			continue;
7251 
7252 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7253 		if (!status)
7254 			continue;
7255 
7256 		aconnector = to_amdgpu_dm_connector(connector);
7257 
7258 		mutex_lock(&adev->dm.audio_lock);
7259 		inst = status->audio_inst;
7260 		aconnector->audio_inst = inst;
7261 		mutex_unlock(&adev->dm.audio_lock);
7262 
7263 		amdgpu_dm_audio_eld_notify(adev, inst);
7264 	}
7265 }
7266 
7267 /*
7268  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7269  * @crtc_state: the DRM CRTC state
7270  * @stream_state: the DC stream state.
7271  *
7272  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7273  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7274  */
7275 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7276 						struct dc_stream_state *stream_state)
7277 {
7278 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7279 }
7280 
7281 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7282 				   struct drm_atomic_state *state,
7283 				   bool nonblock)
7284 {
7285 	struct drm_crtc *crtc;
7286 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7287 	struct amdgpu_device *adev = dev->dev_private;
7288 	int i;
7289 
7290 	/*
7291 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7292 	 * a modeset, being disabled, or have no active planes.
7293 	 *
7294 	 * It's done in atomic commit rather than commit tail for now since
7295 	 * some of these interrupt handlers access the current CRTC state and
7296 	 * potentially the stream pointer itself.
7297 	 *
7298 	 * Since the atomic state is swapped within atomic commit and not within
7299 	 * commit tail this would leave to new state (that hasn't been committed yet)
7300 	 * being accesssed from within the handlers.
7301 	 *
7302 	 * TODO: Fix this so we can do this in commit tail and not have to block
7303 	 * in atomic check.
7304 	 */
7305 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7306 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7307 
7308 		if (old_crtc_state->active &&
7309 		    (!new_crtc_state->active ||
7310 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7311 			manage_dm_interrupts(adev, acrtc, false);
7312 	}
7313 	/*
7314 	 * Add check here for SoC's that support hardware cursor plane, to
7315 	 * unset legacy_cursor_update
7316 	 */
7317 
7318 	return drm_atomic_helper_commit(dev, state, nonblock);
7319 
7320 	/*TODO Handle EINTR, reenable IRQ*/
7321 }
7322 
7323 /**
7324  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7325  * @state: The atomic state to commit
7326  *
7327  * This will tell DC to commit the constructed DC state from atomic_check,
7328  * programming the hardware. Any failures here implies a hardware failure, since
7329  * atomic check should have filtered anything non-kosher.
7330  */
7331 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7332 {
7333 	struct drm_device *dev = state->dev;
7334 	struct amdgpu_device *adev = dev->dev_private;
7335 	struct amdgpu_display_manager *dm = &adev->dm;
7336 	struct dm_atomic_state *dm_state;
7337 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7338 	uint32_t i, j;
7339 	struct drm_crtc *crtc;
7340 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7341 	unsigned long flags;
7342 	bool wait_for_vblank = true;
7343 	struct drm_connector *connector;
7344 	struct drm_connector_state *old_con_state, *new_con_state;
7345 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7346 	int crtc_disable_count = 0;
7347 
7348 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7349 
7350 	dm_state = dm_atomic_get_new_state(state);
7351 	if (dm_state && dm_state->context) {
7352 		dc_state = dm_state->context;
7353 	} else {
7354 		/* No state changes, retain current state. */
7355 		dc_state_temp = dc_create_state(dm->dc);
7356 		ASSERT(dc_state_temp);
7357 		dc_state = dc_state_temp;
7358 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7359 	}
7360 
7361 	/* update changed items */
7362 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7363 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7364 
7365 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7366 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7367 
7368 		DRM_DEBUG_DRIVER(
7369 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7370 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7371 			"connectors_changed:%d\n",
7372 			acrtc->crtc_id,
7373 			new_crtc_state->enable,
7374 			new_crtc_state->active,
7375 			new_crtc_state->planes_changed,
7376 			new_crtc_state->mode_changed,
7377 			new_crtc_state->active_changed,
7378 			new_crtc_state->connectors_changed);
7379 
7380 		/* Copy all transient state flags into dc state */
7381 		if (dm_new_crtc_state->stream) {
7382 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7383 							    dm_new_crtc_state->stream);
7384 		}
7385 
7386 		/* handles headless hotplug case, updating new_state and
7387 		 * aconnector as needed
7388 		 */
7389 
7390 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7391 
7392 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7393 
7394 			if (!dm_new_crtc_state->stream) {
7395 				/*
7396 				 * this could happen because of issues with
7397 				 * userspace notifications delivery.
7398 				 * In this case userspace tries to set mode on
7399 				 * display which is disconnected in fact.
7400 				 * dc_sink is NULL in this case on aconnector.
7401 				 * We expect reset mode will come soon.
7402 				 *
7403 				 * This can also happen when unplug is done
7404 				 * during resume sequence ended
7405 				 *
7406 				 * In this case, we want to pretend we still
7407 				 * have a sink to keep the pipe running so that
7408 				 * hw state is consistent with the sw state
7409 				 */
7410 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7411 						__func__, acrtc->base.base.id);
7412 				continue;
7413 			}
7414 
7415 			if (dm_old_crtc_state->stream)
7416 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7417 
7418 			pm_runtime_get_noresume(dev->dev);
7419 
7420 			acrtc->enabled = true;
7421 			acrtc->hw_mode = new_crtc_state->mode;
7422 			crtc->hwmode = new_crtc_state->mode;
7423 		} else if (modereset_required(new_crtc_state)) {
7424 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7425 			/* i.e. reset mode */
7426 			if (dm_old_crtc_state->stream) {
7427 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7428 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7429 
7430 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7431 			}
7432 		}
7433 	} /* for_each_crtc_in_state() */
7434 
7435 	if (dc_state) {
7436 		dm_enable_per_frame_crtc_master_sync(dc_state);
7437 		mutex_lock(&dm->dc_lock);
7438 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7439 		mutex_unlock(&dm->dc_lock);
7440 	}
7441 
7442 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7443 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7444 
7445 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7446 
7447 		if (dm_new_crtc_state->stream != NULL) {
7448 			const struct dc_stream_status *status =
7449 					dc_stream_get_status(dm_new_crtc_state->stream);
7450 
7451 			if (!status)
7452 				status = dc_stream_get_status_from_state(dc_state,
7453 									 dm_new_crtc_state->stream);
7454 
7455 			if (!status)
7456 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7457 			else
7458 				acrtc->otg_inst = status->primary_otg_inst;
7459 		}
7460 	}
7461 #ifdef CONFIG_DRM_AMD_DC_HDCP
7462 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7463 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7464 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7465 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7466 
7467 		new_crtc_state = NULL;
7468 
7469 		if (acrtc)
7470 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7471 
7472 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7473 
7474 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7475 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7476 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7477 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7478 			continue;
7479 		}
7480 
7481 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7482 			hdcp_update_display(
7483 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7484 				new_con_state->hdcp_content_type,
7485 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7486 													 : false);
7487 	}
7488 #endif
7489 
7490 	/* Handle connector state changes */
7491 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7492 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7493 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7494 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7495 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7496 		struct dc_stream_update stream_update;
7497 		struct dc_info_packet hdr_packet;
7498 		struct dc_stream_status *status = NULL;
7499 		bool abm_changed, hdr_changed, scaling_changed;
7500 
7501 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7502 		memset(&stream_update, 0, sizeof(stream_update));
7503 
7504 		if (acrtc) {
7505 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7506 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7507 		}
7508 
7509 		/* Skip any modesets/resets */
7510 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7511 			continue;
7512 
7513 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7514 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7515 
7516 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7517 							     dm_old_con_state);
7518 
7519 		abm_changed = dm_new_crtc_state->abm_level !=
7520 			      dm_old_crtc_state->abm_level;
7521 
7522 		hdr_changed =
7523 			is_hdr_metadata_different(old_con_state, new_con_state);
7524 
7525 		if (!scaling_changed && !abm_changed && !hdr_changed)
7526 			continue;
7527 
7528 		stream_update.stream = dm_new_crtc_state->stream;
7529 		if (scaling_changed) {
7530 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7531 					dm_new_con_state, dm_new_crtc_state->stream);
7532 
7533 			stream_update.src = dm_new_crtc_state->stream->src;
7534 			stream_update.dst = dm_new_crtc_state->stream->dst;
7535 		}
7536 
7537 		if (abm_changed) {
7538 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7539 
7540 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7541 		}
7542 
7543 		if (hdr_changed) {
7544 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7545 			stream_update.hdr_static_metadata = &hdr_packet;
7546 		}
7547 
7548 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7549 		WARN_ON(!status);
7550 		WARN_ON(!status->plane_count);
7551 
7552 		/*
7553 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7554 		 * Here we create an empty update on each plane.
7555 		 * To fix this, DC should permit updating only stream properties.
7556 		 */
7557 		for (j = 0; j < status->plane_count; j++)
7558 			dummy_updates[j].surface = status->plane_states[0];
7559 
7560 
7561 		mutex_lock(&dm->dc_lock);
7562 		dc_commit_updates_for_stream(dm->dc,
7563 						     dummy_updates,
7564 						     status->plane_count,
7565 						     dm_new_crtc_state->stream,
7566 						     &stream_update,
7567 						     dc_state);
7568 		mutex_unlock(&dm->dc_lock);
7569 	}
7570 
7571 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7572 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7573 				      new_crtc_state, i) {
7574 		if (old_crtc_state->active && !new_crtc_state->active)
7575 			crtc_disable_count++;
7576 
7577 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7578 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7579 
7580 		/* Update freesync active state. */
7581 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7582 
7583 		/* Handle vrr on->off / off->on transitions */
7584 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7585 						dm_new_crtc_state);
7586 	}
7587 
7588 	/**
7589 	 * Enable interrupts for CRTCs that are newly enabled or went through
7590 	 * a modeset. It was intentionally deferred until after the front end
7591 	 * state was modified to wait until the OTG was on and so the IRQ
7592 	 * handlers didn't access stale or invalid state.
7593 	 */
7594 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7595 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7596 
7597 		if (new_crtc_state->active &&
7598 		    (!old_crtc_state->active ||
7599 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7600 			manage_dm_interrupts(adev, acrtc, true);
7601 #ifdef CONFIG_DEBUG_FS
7602 			/**
7603 			 * Frontend may have changed so reapply the CRC capture
7604 			 * settings for the stream.
7605 			 */
7606 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7607 
7608 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7609 				amdgpu_dm_crtc_configure_crc_source(
7610 					crtc, dm_new_crtc_state,
7611 					dm_new_crtc_state->crc_src);
7612 			}
7613 #endif
7614 		}
7615 	}
7616 
7617 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7618 		if (new_crtc_state->async_flip)
7619 			wait_for_vblank = false;
7620 
7621 	/* update planes when needed per crtc*/
7622 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7623 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7624 
7625 		if (dm_new_crtc_state->stream)
7626 			amdgpu_dm_commit_planes(state, dc_state, dev,
7627 						dm, crtc, wait_for_vblank);
7628 	}
7629 
7630 	/* Update audio instances for each connector. */
7631 	amdgpu_dm_commit_audio(dev, state);
7632 
7633 	/*
7634 	 * send vblank event on all events not handled in flip and
7635 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7636 	 */
7637 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7638 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7639 
7640 		if (new_crtc_state->event)
7641 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7642 
7643 		new_crtc_state->event = NULL;
7644 	}
7645 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7646 
7647 	/* Signal HW programming completion */
7648 	drm_atomic_helper_commit_hw_done(state);
7649 
7650 	if (wait_for_vblank)
7651 		drm_atomic_helper_wait_for_flip_done(dev, state);
7652 
7653 	drm_atomic_helper_cleanup_planes(dev, state);
7654 
7655 	/*
7656 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7657 	 * so we can put the GPU into runtime suspend if we're not driving any
7658 	 * displays anymore
7659 	 */
7660 	for (i = 0; i < crtc_disable_count; i++)
7661 		pm_runtime_put_autosuspend(dev->dev);
7662 	pm_runtime_mark_last_busy(dev->dev);
7663 
7664 	if (dc_state_temp)
7665 		dc_release_state(dc_state_temp);
7666 }
7667 
7668 
7669 static int dm_force_atomic_commit(struct drm_connector *connector)
7670 {
7671 	int ret = 0;
7672 	struct drm_device *ddev = connector->dev;
7673 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7674 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7675 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7676 	struct drm_connector_state *conn_state;
7677 	struct drm_crtc_state *crtc_state;
7678 	struct drm_plane_state *plane_state;
7679 
7680 	if (!state)
7681 		return -ENOMEM;
7682 
7683 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7684 
7685 	/* Construct an atomic state to restore previous display setting */
7686 
7687 	/*
7688 	 * Attach connectors to drm_atomic_state
7689 	 */
7690 	conn_state = drm_atomic_get_connector_state(state, connector);
7691 
7692 	ret = PTR_ERR_OR_ZERO(conn_state);
7693 	if (ret)
7694 		goto err;
7695 
7696 	/* Attach crtc to drm_atomic_state*/
7697 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7698 
7699 	ret = PTR_ERR_OR_ZERO(crtc_state);
7700 	if (ret)
7701 		goto err;
7702 
7703 	/* force a restore */
7704 	crtc_state->mode_changed = true;
7705 
7706 	/* Attach plane to drm_atomic_state */
7707 	plane_state = drm_atomic_get_plane_state(state, plane);
7708 
7709 	ret = PTR_ERR_OR_ZERO(plane_state);
7710 	if (ret)
7711 		goto err;
7712 
7713 
7714 	/* Call commit internally with the state we just constructed */
7715 	ret = drm_atomic_commit(state);
7716 	if (!ret)
7717 		return 0;
7718 
7719 err:
7720 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7721 	drm_atomic_state_put(state);
7722 
7723 	return ret;
7724 }
7725 
7726 /*
7727  * This function handles all cases when set mode does not come upon hotplug.
7728  * This includes when a display is unplugged then plugged back into the
7729  * same port and when running without usermode desktop manager supprot
7730  */
7731 void dm_restore_drm_connector_state(struct drm_device *dev,
7732 				    struct drm_connector *connector)
7733 {
7734 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7735 	struct amdgpu_crtc *disconnected_acrtc;
7736 	struct dm_crtc_state *acrtc_state;
7737 
7738 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7739 		return;
7740 
7741 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7742 	if (!disconnected_acrtc)
7743 		return;
7744 
7745 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7746 	if (!acrtc_state->stream)
7747 		return;
7748 
7749 	/*
7750 	 * If the previous sink is not released and different from the current,
7751 	 * we deduce we are in a state where we can not rely on usermode call
7752 	 * to turn on the display, so we do it here
7753 	 */
7754 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7755 		dm_force_atomic_commit(&aconnector->base);
7756 }
7757 
7758 /*
7759  * Grabs all modesetting locks to serialize against any blocking commits,
7760  * Waits for completion of all non blocking commits.
7761  */
7762 static int do_aquire_global_lock(struct drm_device *dev,
7763 				 struct drm_atomic_state *state)
7764 {
7765 	struct drm_crtc *crtc;
7766 	struct drm_crtc_commit *commit;
7767 	long ret;
7768 
7769 	/*
7770 	 * Adding all modeset locks to aquire_ctx will
7771 	 * ensure that when the framework release it the
7772 	 * extra locks we are locking here will get released to
7773 	 */
7774 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7775 	if (ret)
7776 		return ret;
7777 
7778 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7779 		spin_lock(&crtc->commit_lock);
7780 		commit = list_first_entry_or_null(&crtc->commit_list,
7781 				struct drm_crtc_commit, commit_entry);
7782 		if (commit)
7783 			drm_crtc_commit_get(commit);
7784 		spin_unlock(&crtc->commit_lock);
7785 
7786 		if (!commit)
7787 			continue;
7788 
7789 		/*
7790 		 * Make sure all pending HW programming completed and
7791 		 * page flips done
7792 		 */
7793 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7794 
7795 		if (ret > 0)
7796 			ret = wait_for_completion_interruptible_timeout(
7797 					&commit->flip_done, 10*HZ);
7798 
7799 		if (ret == 0)
7800 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7801 				  "timed out\n", crtc->base.id, crtc->name);
7802 
7803 		drm_crtc_commit_put(commit);
7804 	}
7805 
7806 	return ret < 0 ? ret : 0;
7807 }
7808 
7809 static void get_freesync_config_for_crtc(
7810 	struct dm_crtc_state *new_crtc_state,
7811 	struct dm_connector_state *new_con_state)
7812 {
7813 	struct mod_freesync_config config = {0};
7814 	struct amdgpu_dm_connector *aconnector =
7815 			to_amdgpu_dm_connector(new_con_state->base.connector);
7816 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7817 	int vrefresh = drm_mode_vrefresh(mode);
7818 
7819 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7820 					vrefresh >= aconnector->min_vfreq &&
7821 					vrefresh <= aconnector->max_vfreq;
7822 
7823 	if (new_crtc_state->vrr_supported) {
7824 		new_crtc_state->stream->ignore_msa_timing_param = true;
7825 		config.state = new_crtc_state->base.vrr_enabled ?
7826 				VRR_STATE_ACTIVE_VARIABLE :
7827 				VRR_STATE_INACTIVE;
7828 		config.min_refresh_in_uhz =
7829 				aconnector->min_vfreq * 1000000;
7830 		config.max_refresh_in_uhz =
7831 				aconnector->max_vfreq * 1000000;
7832 		config.vsif_supported = true;
7833 		config.btr = true;
7834 	}
7835 
7836 	new_crtc_state->freesync_config = config;
7837 }
7838 
7839 static void reset_freesync_config_for_crtc(
7840 	struct dm_crtc_state *new_crtc_state)
7841 {
7842 	new_crtc_state->vrr_supported = false;
7843 
7844 	memset(&new_crtc_state->vrr_params, 0,
7845 	       sizeof(new_crtc_state->vrr_params));
7846 	memset(&new_crtc_state->vrr_infopacket, 0,
7847 	       sizeof(new_crtc_state->vrr_infopacket));
7848 }
7849 
7850 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7851 				struct drm_atomic_state *state,
7852 				struct drm_crtc *crtc,
7853 				struct drm_crtc_state *old_crtc_state,
7854 				struct drm_crtc_state *new_crtc_state,
7855 				bool enable,
7856 				bool *lock_and_validation_needed)
7857 {
7858 	struct dm_atomic_state *dm_state = NULL;
7859 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7860 	struct dc_stream_state *new_stream;
7861 	int ret = 0;
7862 
7863 	/*
7864 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7865 	 * update changed items
7866 	 */
7867 	struct amdgpu_crtc *acrtc = NULL;
7868 	struct amdgpu_dm_connector *aconnector = NULL;
7869 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7870 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7871 
7872 	new_stream = NULL;
7873 
7874 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7875 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7876 	acrtc = to_amdgpu_crtc(crtc);
7877 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7878 
7879 	/* TODO This hack should go away */
7880 	if (aconnector && enable) {
7881 		/* Make sure fake sink is created in plug-in scenario */
7882 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7883 							    &aconnector->base);
7884 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7885 							    &aconnector->base);
7886 
7887 		if (IS_ERR(drm_new_conn_state)) {
7888 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7889 			goto fail;
7890 		}
7891 
7892 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7893 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7894 
7895 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7896 			goto skip_modeset;
7897 
7898 		new_stream = create_validate_stream_for_sink(aconnector,
7899 							     &new_crtc_state->mode,
7900 							     dm_new_conn_state,
7901 							     dm_old_crtc_state->stream);
7902 
7903 		/*
7904 		 * we can have no stream on ACTION_SET if a display
7905 		 * was disconnected during S3, in this case it is not an
7906 		 * error, the OS will be updated after detection, and
7907 		 * will do the right thing on next atomic commit
7908 		 */
7909 
7910 		if (!new_stream) {
7911 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7912 					__func__, acrtc->base.base.id);
7913 			ret = -ENOMEM;
7914 			goto fail;
7915 		}
7916 
7917 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7918 
7919 		ret = fill_hdr_info_packet(drm_new_conn_state,
7920 					   &new_stream->hdr_static_metadata);
7921 		if (ret)
7922 			goto fail;
7923 
7924 		/*
7925 		 * If we already removed the old stream from the context
7926 		 * (and set the new stream to NULL) then we can't reuse
7927 		 * the old stream even if the stream and scaling are unchanged.
7928 		 * We'll hit the BUG_ON and black screen.
7929 		 *
7930 		 * TODO: Refactor this function to allow this check to work
7931 		 * in all conditions.
7932 		 */
7933 		if (dm_new_crtc_state->stream &&
7934 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7935 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7936 			new_crtc_state->mode_changed = false;
7937 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7938 					 new_crtc_state->mode_changed);
7939 		}
7940 	}
7941 
7942 	/* mode_changed flag may get updated above, need to check again */
7943 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7944 		goto skip_modeset;
7945 
7946 	DRM_DEBUG_DRIVER(
7947 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7948 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7949 		"connectors_changed:%d\n",
7950 		acrtc->crtc_id,
7951 		new_crtc_state->enable,
7952 		new_crtc_state->active,
7953 		new_crtc_state->planes_changed,
7954 		new_crtc_state->mode_changed,
7955 		new_crtc_state->active_changed,
7956 		new_crtc_state->connectors_changed);
7957 
7958 	/* Remove stream for any changed/disabled CRTC */
7959 	if (!enable) {
7960 
7961 		if (!dm_old_crtc_state->stream)
7962 			goto skip_modeset;
7963 
7964 		ret = dm_atomic_get_state(state, &dm_state);
7965 		if (ret)
7966 			goto fail;
7967 
7968 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7969 				crtc->base.id);
7970 
7971 		/* i.e. reset mode */
7972 		if (dc_remove_stream_from_ctx(
7973 				dm->dc,
7974 				dm_state->context,
7975 				dm_old_crtc_state->stream) != DC_OK) {
7976 			ret = -EINVAL;
7977 			goto fail;
7978 		}
7979 
7980 		dc_stream_release(dm_old_crtc_state->stream);
7981 		dm_new_crtc_state->stream = NULL;
7982 
7983 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7984 
7985 		*lock_and_validation_needed = true;
7986 
7987 	} else {/* Add stream for any updated/enabled CRTC */
7988 		/*
7989 		 * Quick fix to prevent NULL pointer on new_stream when
7990 		 * added MST connectors not found in existing crtc_state in the chained mode
7991 		 * TODO: need to dig out the root cause of that
7992 		 */
7993 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7994 			goto skip_modeset;
7995 
7996 		if (modereset_required(new_crtc_state))
7997 			goto skip_modeset;
7998 
7999 		if (modeset_required(new_crtc_state, new_stream,
8000 				     dm_old_crtc_state->stream)) {
8001 
8002 			WARN_ON(dm_new_crtc_state->stream);
8003 
8004 			ret = dm_atomic_get_state(state, &dm_state);
8005 			if (ret)
8006 				goto fail;
8007 
8008 			dm_new_crtc_state->stream = new_stream;
8009 
8010 			dc_stream_retain(new_stream);
8011 
8012 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8013 						crtc->base.id);
8014 
8015 			if (dc_add_stream_to_ctx(
8016 					dm->dc,
8017 					dm_state->context,
8018 					dm_new_crtc_state->stream) != DC_OK) {
8019 				ret = -EINVAL;
8020 				goto fail;
8021 			}
8022 
8023 			*lock_and_validation_needed = true;
8024 		}
8025 	}
8026 
8027 skip_modeset:
8028 	/* Release extra reference */
8029 	if (new_stream)
8030 		 dc_stream_release(new_stream);
8031 
8032 	/*
8033 	 * We want to do dc stream updates that do not require a
8034 	 * full modeset below.
8035 	 */
8036 	if (!(enable && aconnector && new_crtc_state->enable &&
8037 	      new_crtc_state->active))
8038 		return 0;
8039 	/*
8040 	 * Given above conditions, the dc state cannot be NULL because:
8041 	 * 1. We're in the process of enabling CRTCs (just been added
8042 	 *    to the dc context, or already is on the context)
8043 	 * 2. Has a valid connector attached, and
8044 	 * 3. Is currently active and enabled.
8045 	 * => The dc stream state currently exists.
8046 	 */
8047 	BUG_ON(dm_new_crtc_state->stream == NULL);
8048 
8049 	/* Scaling or underscan settings */
8050 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8051 		update_stream_scaling_settings(
8052 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8053 
8054 	/* ABM settings */
8055 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8056 
8057 	/*
8058 	 * Color management settings. We also update color properties
8059 	 * when a modeset is needed, to ensure it gets reprogrammed.
8060 	 */
8061 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8062 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8063 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8064 		if (ret)
8065 			goto fail;
8066 	}
8067 
8068 	/* Update Freesync settings. */
8069 	get_freesync_config_for_crtc(dm_new_crtc_state,
8070 				     dm_new_conn_state);
8071 
8072 	return ret;
8073 
8074 fail:
8075 	if (new_stream)
8076 		dc_stream_release(new_stream);
8077 	return ret;
8078 }
8079 
8080 static bool should_reset_plane(struct drm_atomic_state *state,
8081 			       struct drm_plane *plane,
8082 			       struct drm_plane_state *old_plane_state,
8083 			       struct drm_plane_state *new_plane_state)
8084 {
8085 	struct drm_plane *other;
8086 	struct drm_plane_state *old_other_state, *new_other_state;
8087 	struct drm_crtc_state *new_crtc_state;
8088 	int i;
8089 
8090 	/*
8091 	 * TODO: Remove this hack once the checks below are sufficient
8092 	 * enough to determine when we need to reset all the planes on
8093 	 * the stream.
8094 	 */
8095 	if (state->allow_modeset)
8096 		return true;
8097 
8098 	/* Exit early if we know that we're adding or removing the plane. */
8099 	if (old_plane_state->crtc != new_plane_state->crtc)
8100 		return true;
8101 
8102 	/* old crtc == new_crtc == NULL, plane not in context. */
8103 	if (!new_plane_state->crtc)
8104 		return false;
8105 
8106 	new_crtc_state =
8107 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8108 
8109 	if (!new_crtc_state)
8110 		return true;
8111 
8112 	/* CRTC Degamma changes currently require us to recreate planes. */
8113 	if (new_crtc_state->color_mgmt_changed)
8114 		return true;
8115 
8116 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8117 		return true;
8118 
8119 	/*
8120 	 * If there are any new primary or overlay planes being added or
8121 	 * removed then the z-order can potentially change. To ensure
8122 	 * correct z-order and pipe acquisition the current DC architecture
8123 	 * requires us to remove and recreate all existing planes.
8124 	 *
8125 	 * TODO: Come up with a more elegant solution for this.
8126 	 */
8127 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8128 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8129 			continue;
8130 
8131 		if (old_other_state->crtc != new_plane_state->crtc &&
8132 		    new_other_state->crtc != new_plane_state->crtc)
8133 			continue;
8134 
8135 		if (old_other_state->crtc != new_other_state->crtc)
8136 			return true;
8137 
8138 		/* TODO: Remove this once we can handle fast format changes. */
8139 		if (old_other_state->fb && new_other_state->fb &&
8140 		    old_other_state->fb->format != new_other_state->fb->format)
8141 			return true;
8142 	}
8143 
8144 	return false;
8145 }
8146 
8147 static int dm_update_plane_state(struct dc *dc,
8148 				 struct drm_atomic_state *state,
8149 				 struct drm_plane *plane,
8150 				 struct drm_plane_state *old_plane_state,
8151 				 struct drm_plane_state *new_plane_state,
8152 				 bool enable,
8153 				 bool *lock_and_validation_needed)
8154 {
8155 
8156 	struct dm_atomic_state *dm_state = NULL;
8157 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8158 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8159 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8160 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8161 	struct amdgpu_crtc *new_acrtc;
8162 	bool needs_reset;
8163 	int ret = 0;
8164 
8165 
8166 	new_plane_crtc = new_plane_state->crtc;
8167 	old_plane_crtc = old_plane_state->crtc;
8168 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8169 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8170 
8171 	/*TODO Implement better atomic check for cursor plane */
8172 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8173 		if (!enable || !new_plane_crtc ||
8174 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8175 			return 0;
8176 
8177 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8178 
8179 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8180 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8181 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8182 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8183 			return -EINVAL;
8184 		}
8185 
8186 		return 0;
8187 	}
8188 
8189 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8190 					 new_plane_state);
8191 
8192 	/* Remove any changed/removed planes */
8193 	if (!enable) {
8194 		if (!needs_reset)
8195 			return 0;
8196 
8197 		if (!old_plane_crtc)
8198 			return 0;
8199 
8200 		old_crtc_state = drm_atomic_get_old_crtc_state(
8201 				state, old_plane_crtc);
8202 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8203 
8204 		if (!dm_old_crtc_state->stream)
8205 			return 0;
8206 
8207 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8208 				plane->base.id, old_plane_crtc->base.id);
8209 
8210 		ret = dm_atomic_get_state(state, &dm_state);
8211 		if (ret)
8212 			return ret;
8213 
8214 		if (!dc_remove_plane_from_context(
8215 				dc,
8216 				dm_old_crtc_state->stream,
8217 				dm_old_plane_state->dc_state,
8218 				dm_state->context)) {
8219 
8220 			ret = EINVAL;
8221 			return ret;
8222 		}
8223 
8224 
8225 		dc_plane_state_release(dm_old_plane_state->dc_state);
8226 		dm_new_plane_state->dc_state = NULL;
8227 
8228 		*lock_and_validation_needed = true;
8229 
8230 	} else { /* Add new planes */
8231 		struct dc_plane_state *dc_new_plane_state;
8232 
8233 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8234 			return 0;
8235 
8236 		if (!new_plane_crtc)
8237 			return 0;
8238 
8239 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8240 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8241 
8242 		if (!dm_new_crtc_state->stream)
8243 			return 0;
8244 
8245 		if (!needs_reset)
8246 			return 0;
8247 
8248 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8249 		if (ret)
8250 			return ret;
8251 
8252 		WARN_ON(dm_new_plane_state->dc_state);
8253 
8254 		dc_new_plane_state = dc_create_plane_state(dc);
8255 		if (!dc_new_plane_state)
8256 			return -ENOMEM;
8257 
8258 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8259 				plane->base.id, new_plane_crtc->base.id);
8260 
8261 		ret = fill_dc_plane_attributes(
8262 			new_plane_crtc->dev->dev_private,
8263 			dc_new_plane_state,
8264 			new_plane_state,
8265 			new_crtc_state);
8266 		if (ret) {
8267 			dc_plane_state_release(dc_new_plane_state);
8268 			return ret;
8269 		}
8270 
8271 		ret = dm_atomic_get_state(state, &dm_state);
8272 		if (ret) {
8273 			dc_plane_state_release(dc_new_plane_state);
8274 			return ret;
8275 		}
8276 
8277 		/*
8278 		 * Any atomic check errors that occur after this will
8279 		 * not need a release. The plane state will be attached
8280 		 * to the stream, and therefore part of the atomic
8281 		 * state. It'll be released when the atomic state is
8282 		 * cleaned.
8283 		 */
8284 		if (!dc_add_plane_to_context(
8285 				dc,
8286 				dm_new_crtc_state->stream,
8287 				dc_new_plane_state,
8288 				dm_state->context)) {
8289 
8290 			dc_plane_state_release(dc_new_plane_state);
8291 			return -EINVAL;
8292 		}
8293 
8294 		dm_new_plane_state->dc_state = dc_new_plane_state;
8295 
8296 		/* Tell DC to do a full surface update every time there
8297 		 * is a plane change. Inefficient, but works for now.
8298 		 */
8299 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8300 
8301 		*lock_and_validation_needed = true;
8302 	}
8303 
8304 
8305 	return ret;
8306 }
8307 
8308 static int
8309 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8310 				    struct drm_atomic_state *state,
8311 				    enum surface_update_type *out_type)
8312 {
8313 	struct dc *dc = dm->dc;
8314 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8315 	int i, j, num_plane, ret = 0;
8316 	struct drm_plane_state *old_plane_state, *new_plane_state;
8317 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8318 	struct drm_crtc *new_plane_crtc;
8319 	struct drm_plane *plane;
8320 
8321 	struct drm_crtc *crtc;
8322 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8323 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8324 	struct dc_stream_status *status = NULL;
8325 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8326 	struct surface_info_bundle {
8327 		struct dc_surface_update surface_updates[MAX_SURFACES];
8328 		struct dc_plane_info plane_infos[MAX_SURFACES];
8329 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8330 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8331 		struct dc_stream_update stream_update;
8332 	} *bundle;
8333 
8334 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8335 
8336 	if (!bundle) {
8337 		DRM_ERROR("Failed to allocate update bundle\n");
8338 		/* Set type to FULL to avoid crashing in DC*/
8339 		update_type = UPDATE_TYPE_FULL;
8340 		goto cleanup;
8341 	}
8342 
8343 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8344 
8345 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8346 
8347 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8348 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8349 		num_plane = 0;
8350 
8351 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8352 			update_type = UPDATE_TYPE_FULL;
8353 			goto cleanup;
8354 		}
8355 
8356 		if (!new_dm_crtc_state->stream)
8357 			continue;
8358 
8359 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8360 			const struct amdgpu_framebuffer *amdgpu_fb =
8361 				to_amdgpu_framebuffer(new_plane_state->fb);
8362 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8363 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8364 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8365 			uint64_t tiling_flags;
8366 			bool tmz_surface = false;
8367 
8368 			new_plane_crtc = new_plane_state->crtc;
8369 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8370 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8371 
8372 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8373 				continue;
8374 
8375 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8376 				update_type = UPDATE_TYPE_FULL;
8377 				goto cleanup;
8378 			}
8379 
8380 			if (crtc != new_plane_crtc)
8381 				continue;
8382 
8383 			bundle->surface_updates[num_plane].surface =
8384 					new_dm_plane_state->dc_state;
8385 
8386 			if (new_crtc_state->mode_changed) {
8387 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8388 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8389 			}
8390 
8391 			if (new_crtc_state->color_mgmt_changed) {
8392 				bundle->surface_updates[num_plane].gamma =
8393 						new_dm_plane_state->dc_state->gamma_correction;
8394 				bundle->surface_updates[num_plane].in_transfer_func =
8395 						new_dm_plane_state->dc_state->in_transfer_func;
8396 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8397 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8398 				bundle->stream_update.gamut_remap =
8399 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8400 				bundle->stream_update.output_csc_transform =
8401 						&new_dm_crtc_state->stream->csc_color_matrix;
8402 				bundle->stream_update.out_transfer_func =
8403 						new_dm_crtc_state->stream->out_transfer_func;
8404 			}
8405 
8406 			ret = fill_dc_scaling_info(new_plane_state,
8407 						   scaling_info);
8408 			if (ret)
8409 				goto cleanup;
8410 
8411 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8412 
8413 			if (amdgpu_fb) {
8414 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8415 				if (ret)
8416 					goto cleanup;
8417 
8418 				ret = fill_dc_plane_info_and_addr(
8419 					dm->adev, new_plane_state, tiling_flags,
8420 					plane_info,
8421 					&flip_addr->address, tmz_surface,
8422 					false);
8423 				if (ret)
8424 					goto cleanup;
8425 
8426 				bundle->surface_updates[num_plane].plane_info = plane_info;
8427 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8428 			}
8429 
8430 			num_plane++;
8431 		}
8432 
8433 		if (num_plane == 0)
8434 			continue;
8435 
8436 		ret = dm_atomic_get_state(state, &dm_state);
8437 		if (ret)
8438 			goto cleanup;
8439 
8440 		old_dm_state = dm_atomic_get_old_state(state);
8441 		if (!old_dm_state) {
8442 			ret = -EINVAL;
8443 			goto cleanup;
8444 		}
8445 
8446 		status = dc_stream_get_status_from_state(old_dm_state->context,
8447 							 new_dm_crtc_state->stream);
8448 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8449 		/*
8450 		 * TODO: DC modifies the surface during this call so we need
8451 		 * to lock here - find a way to do this without locking.
8452 		 */
8453 		mutex_lock(&dm->dc_lock);
8454 		update_type = dc_check_update_surfaces_for_stream(
8455 				dc,	bundle->surface_updates, num_plane,
8456 				&bundle->stream_update, status);
8457 		mutex_unlock(&dm->dc_lock);
8458 
8459 		if (update_type > UPDATE_TYPE_MED) {
8460 			update_type = UPDATE_TYPE_FULL;
8461 			goto cleanup;
8462 		}
8463 	}
8464 
8465 cleanup:
8466 	kfree(bundle);
8467 
8468 	*out_type = update_type;
8469 	return ret;
8470 }
8471 #if defined(CONFIG_DRM_AMD_DC_DCN)
8472 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8473 {
8474 	struct drm_connector *connector;
8475 	struct drm_connector_state *conn_state;
8476 	struct amdgpu_dm_connector *aconnector = NULL;
8477 	int i;
8478 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8479 		if (conn_state->crtc != crtc)
8480 			continue;
8481 
8482 		aconnector = to_amdgpu_dm_connector(connector);
8483 		if (!aconnector->port || !aconnector->mst_port)
8484 			aconnector = NULL;
8485 		else
8486 			break;
8487 	}
8488 
8489 	if (!aconnector)
8490 		return 0;
8491 
8492 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8493 }
8494 #endif
8495 
8496 /**
8497  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8498  * @dev: The DRM device
8499  * @state: The atomic state to commit
8500  *
8501  * Validate that the given atomic state is programmable by DC into hardware.
8502  * This involves constructing a &struct dc_state reflecting the new hardware
8503  * state we wish to commit, then querying DC to see if it is programmable. It's
8504  * important not to modify the existing DC state. Otherwise, atomic_check
8505  * may unexpectedly commit hardware changes.
8506  *
8507  * When validating the DC state, it's important that the right locks are
8508  * acquired. For full updates case which removes/adds/updates streams on one
8509  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8510  * that any such full update commit will wait for completion of any outstanding
8511  * flip using DRMs synchronization events. See
8512  * dm_determine_update_type_for_commit()
8513  *
8514  * Note that DM adds the affected connectors for all CRTCs in state, when that
8515  * might not seem necessary. This is because DC stream creation requires the
8516  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8517  * be possible but non-trivial - a possible TODO item.
8518  *
8519  * Return: -Error code if validation failed.
8520  */
8521 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8522 				  struct drm_atomic_state *state)
8523 {
8524 	struct amdgpu_device *adev = dev->dev_private;
8525 	struct dm_atomic_state *dm_state = NULL;
8526 	struct dc *dc = adev->dm.dc;
8527 	struct drm_connector *connector;
8528 	struct drm_connector_state *old_con_state, *new_con_state;
8529 	struct drm_crtc *crtc;
8530 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8531 	struct drm_plane *plane;
8532 	struct drm_plane_state *old_plane_state, *new_plane_state;
8533 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8534 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8535 	enum dc_status status;
8536 	int ret, i;
8537 
8538 	/*
8539 	 * This bool will be set for true for any modeset/reset
8540 	 * or plane update which implies non fast surface update.
8541 	 */
8542 	bool lock_and_validation_needed = false;
8543 
8544 	ret = drm_atomic_helper_check_modeset(dev, state);
8545 	if (ret)
8546 		goto fail;
8547 
8548 	/* Check connector changes */
8549 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8550 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8551 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8552 
8553 		/* Skip connectors that are disabled or part of modeset already. */
8554 		if (!old_con_state->crtc && !new_con_state->crtc)
8555 			continue;
8556 
8557 		if (!new_con_state->crtc)
8558 			continue;
8559 
8560 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8561 		if (IS_ERR(new_crtc_state)) {
8562 			ret = PTR_ERR(new_crtc_state);
8563 			goto fail;
8564 		}
8565 
8566 		if (dm_old_con_state->abm_level !=
8567 		    dm_new_con_state->abm_level)
8568 			new_crtc_state->connectors_changed = true;
8569 	}
8570 
8571 #if defined(CONFIG_DRM_AMD_DC_DCN)
8572 	if (adev->asic_type >= CHIP_NAVI10) {
8573 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8574 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8575 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8576 				if (ret)
8577 					goto fail;
8578 			}
8579 		}
8580 	}
8581 #endif
8582 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8583 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8584 		    !new_crtc_state->color_mgmt_changed &&
8585 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8586 			continue;
8587 
8588 		if (!new_crtc_state->enable)
8589 			continue;
8590 
8591 		ret = drm_atomic_add_affected_connectors(state, crtc);
8592 		if (ret)
8593 			return ret;
8594 
8595 		ret = drm_atomic_add_affected_planes(state, crtc);
8596 		if (ret)
8597 			goto fail;
8598 	}
8599 
8600 	/*
8601 	 * Add all primary and overlay planes on the CRTC to the state
8602 	 * whenever a plane is enabled to maintain correct z-ordering
8603 	 * and to enable fast surface updates.
8604 	 */
8605 	drm_for_each_crtc(crtc, dev) {
8606 		bool modified = false;
8607 
8608 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8609 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8610 				continue;
8611 
8612 			if (new_plane_state->crtc == crtc ||
8613 			    old_plane_state->crtc == crtc) {
8614 				modified = true;
8615 				break;
8616 			}
8617 		}
8618 
8619 		if (!modified)
8620 			continue;
8621 
8622 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8623 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8624 				continue;
8625 
8626 			new_plane_state =
8627 				drm_atomic_get_plane_state(state, plane);
8628 
8629 			if (IS_ERR(new_plane_state)) {
8630 				ret = PTR_ERR(new_plane_state);
8631 				goto fail;
8632 			}
8633 		}
8634 	}
8635 
8636 	/* Remove exiting planes if they are modified */
8637 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8638 		ret = dm_update_plane_state(dc, state, plane,
8639 					    old_plane_state,
8640 					    new_plane_state,
8641 					    false,
8642 					    &lock_and_validation_needed);
8643 		if (ret)
8644 			goto fail;
8645 	}
8646 
8647 	/* Disable all crtcs which require disable */
8648 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8649 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8650 					   old_crtc_state,
8651 					   new_crtc_state,
8652 					   false,
8653 					   &lock_and_validation_needed);
8654 		if (ret)
8655 			goto fail;
8656 	}
8657 
8658 	/* Enable all crtcs which require enable */
8659 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8660 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8661 					   old_crtc_state,
8662 					   new_crtc_state,
8663 					   true,
8664 					   &lock_and_validation_needed);
8665 		if (ret)
8666 			goto fail;
8667 	}
8668 
8669 	/* Add new/modified planes */
8670 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8671 		ret = dm_update_plane_state(dc, state, plane,
8672 					    old_plane_state,
8673 					    new_plane_state,
8674 					    true,
8675 					    &lock_and_validation_needed);
8676 		if (ret)
8677 			goto fail;
8678 	}
8679 
8680 	/* Run this here since we want to validate the streams we created */
8681 	ret = drm_atomic_helper_check_planes(dev, state);
8682 	if (ret)
8683 		goto fail;
8684 
8685 	if (state->legacy_cursor_update) {
8686 		/*
8687 		 * This is a fast cursor update coming from the plane update
8688 		 * helper, check if it can be done asynchronously for better
8689 		 * performance.
8690 		 */
8691 		state->async_update =
8692 			!drm_atomic_helper_async_check(dev, state);
8693 
8694 		/*
8695 		 * Skip the remaining global validation if this is an async
8696 		 * update. Cursor updates can be done without affecting
8697 		 * state or bandwidth calcs and this avoids the performance
8698 		 * penalty of locking the private state object and
8699 		 * allocating a new dc_state.
8700 		 */
8701 		if (state->async_update)
8702 			return 0;
8703 	}
8704 
8705 	/* Check scaling and underscan changes*/
8706 	/* TODO Removed scaling changes validation due to inability to commit
8707 	 * new stream into context w\o causing full reset. Need to
8708 	 * decide how to handle.
8709 	 */
8710 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8711 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8712 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8713 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8714 
8715 		/* Skip any modesets/resets */
8716 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8717 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8718 			continue;
8719 
8720 		/* Skip any thing not scale or underscan changes */
8721 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8722 			continue;
8723 
8724 		overall_update_type = UPDATE_TYPE_FULL;
8725 		lock_and_validation_needed = true;
8726 	}
8727 
8728 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8729 	if (ret)
8730 		goto fail;
8731 
8732 	if (overall_update_type < update_type)
8733 		overall_update_type = update_type;
8734 
8735 	/*
8736 	 * lock_and_validation_needed was an old way to determine if we need to set
8737 	 * the global lock. Leaving it in to check if we broke any corner cases
8738 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8739 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8740 	 */
8741 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8742 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8743 
8744 	if (overall_update_type > UPDATE_TYPE_FAST) {
8745 		ret = dm_atomic_get_state(state, &dm_state);
8746 		if (ret)
8747 			goto fail;
8748 
8749 		ret = do_aquire_global_lock(dev, state);
8750 		if (ret)
8751 			goto fail;
8752 
8753 #if defined(CONFIG_DRM_AMD_DC_DCN)
8754 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8755 			goto fail;
8756 
8757 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8758 		if (ret)
8759 			goto fail;
8760 #endif
8761 
8762 		/*
8763 		 * Perform validation of MST topology in the state:
8764 		 * We need to perform MST atomic check before calling
8765 		 * dc_validate_global_state(), or there is a chance
8766 		 * to get stuck in an infinite loop and hang eventually.
8767 		 */
8768 		ret = drm_dp_mst_atomic_check(state);
8769 		if (ret)
8770 			goto fail;
8771 		status = dc_validate_global_state(dc, dm_state->context, false);
8772 		if (status != DC_OK) {
8773 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8774 				       dc_status_to_str(status), status);
8775 			ret = -EINVAL;
8776 			goto fail;
8777 		}
8778 	} else {
8779 		/*
8780 		 * The commit is a fast update. Fast updates shouldn't change
8781 		 * the DC context, affect global validation, and can have their
8782 		 * commit work done in parallel with other commits not touching
8783 		 * the same resource. If we have a new DC context as part of
8784 		 * the DM atomic state from validation we need to free it and
8785 		 * retain the existing one instead.
8786 		 *
8787 		 * Furthermore, since the DM atomic state only contains the DC
8788 		 * context and can safely be annulled, we can free the state
8789 		 * and clear the associated private object now to free
8790 		 * some memory and avoid a possible use-after-free later.
8791 		 */
8792 
8793 		for (i = 0; i < state->num_private_objs; i++) {
8794 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8795 
8796 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8797 				int j = state->num_private_objs-1;
8798 
8799 				dm_atomic_destroy_state(obj,
8800 						state->private_objs[i].state);
8801 
8802 				/* If i is not at the end of the array then the
8803 				 * last element needs to be moved to where i was
8804 				 * before the array can safely be truncated.
8805 				 */
8806 				if (i != j)
8807 					state->private_objs[i] =
8808 						state->private_objs[j];
8809 
8810 				state->private_objs[j].ptr = NULL;
8811 				state->private_objs[j].state = NULL;
8812 				state->private_objs[j].old_state = NULL;
8813 				state->private_objs[j].new_state = NULL;
8814 
8815 				state->num_private_objs = j;
8816 				break;
8817 			}
8818 		}
8819 	}
8820 
8821 	/* Store the overall update type for use later in atomic check. */
8822 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8823 		struct dm_crtc_state *dm_new_crtc_state =
8824 			to_dm_crtc_state(new_crtc_state);
8825 
8826 		dm_new_crtc_state->update_type = (int)overall_update_type;
8827 	}
8828 
8829 	/* Must be success */
8830 	WARN_ON(ret);
8831 	return ret;
8832 
8833 fail:
8834 	if (ret == -EDEADLK)
8835 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8836 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8837 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8838 	else
8839 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8840 
8841 	return ret;
8842 }
8843 
8844 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8845 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8846 {
8847 	uint8_t dpcd_data;
8848 	bool capable = false;
8849 
8850 	if (amdgpu_dm_connector->dc_link &&
8851 		dm_helpers_dp_read_dpcd(
8852 				NULL,
8853 				amdgpu_dm_connector->dc_link,
8854 				DP_DOWN_STREAM_PORT_COUNT,
8855 				&dpcd_data,
8856 				sizeof(dpcd_data))) {
8857 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8858 	}
8859 
8860 	return capable;
8861 }
8862 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8863 					struct edid *edid)
8864 {
8865 	int i;
8866 	bool edid_check_required;
8867 	struct detailed_timing *timing;
8868 	struct detailed_non_pixel *data;
8869 	struct detailed_data_monitor_range *range;
8870 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8871 			to_amdgpu_dm_connector(connector);
8872 	struct dm_connector_state *dm_con_state = NULL;
8873 
8874 	struct drm_device *dev = connector->dev;
8875 	struct amdgpu_device *adev = dev->dev_private;
8876 	bool freesync_capable = false;
8877 
8878 	if (!connector->state) {
8879 		DRM_ERROR("%s - Connector has no state", __func__);
8880 		goto update;
8881 	}
8882 
8883 	if (!edid) {
8884 		dm_con_state = to_dm_connector_state(connector->state);
8885 
8886 		amdgpu_dm_connector->min_vfreq = 0;
8887 		amdgpu_dm_connector->max_vfreq = 0;
8888 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8889 
8890 		goto update;
8891 	}
8892 
8893 	dm_con_state = to_dm_connector_state(connector->state);
8894 
8895 	edid_check_required = false;
8896 	if (!amdgpu_dm_connector->dc_sink) {
8897 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8898 		goto update;
8899 	}
8900 	if (!adev->dm.freesync_module)
8901 		goto update;
8902 	/*
8903 	 * if edid non zero restrict freesync only for dp and edp
8904 	 */
8905 	if (edid) {
8906 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8907 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8908 			edid_check_required = is_dp_capable_without_timing_msa(
8909 						adev->dm.dc,
8910 						amdgpu_dm_connector);
8911 		}
8912 	}
8913 	if (edid_check_required == true && (edid->version > 1 ||
8914 	   (edid->version == 1 && edid->revision > 1))) {
8915 		for (i = 0; i < 4; i++) {
8916 
8917 			timing	= &edid->detailed_timings[i];
8918 			data	= &timing->data.other_data;
8919 			range	= &data->data.range;
8920 			/*
8921 			 * Check if monitor has continuous frequency mode
8922 			 */
8923 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8924 				continue;
8925 			/*
8926 			 * Check for flag range limits only. If flag == 1 then
8927 			 * no additional timing information provided.
8928 			 * Default GTF, GTF Secondary curve and CVT are not
8929 			 * supported
8930 			 */
8931 			if (range->flags != 1)
8932 				continue;
8933 
8934 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8935 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8936 			amdgpu_dm_connector->pixel_clock_mhz =
8937 				range->pixel_clock_mhz * 10;
8938 			break;
8939 		}
8940 
8941 		if (amdgpu_dm_connector->max_vfreq -
8942 		    amdgpu_dm_connector->min_vfreq > 10) {
8943 
8944 			freesync_capable = true;
8945 		}
8946 	}
8947 
8948 update:
8949 	if (dm_con_state)
8950 		dm_con_state->freesync_capable = freesync_capable;
8951 
8952 	if (connector->vrr_capable_property)
8953 		drm_connector_set_vrr_capable_property(connector,
8954 						       freesync_capable);
8955 }
8956 
8957 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8958 {
8959 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8960 
8961 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8962 		return;
8963 	if (link->type == dc_connection_none)
8964 		return;
8965 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8966 					dpcd_data, sizeof(dpcd_data))) {
8967 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8968 
8969 		if (dpcd_data[0] == 0) {
8970 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8971 			link->psr_settings.psr_feature_enabled = false;
8972 		} else {
8973 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8974 			link->psr_settings.psr_feature_enabled = true;
8975 		}
8976 
8977 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8978 	}
8979 }
8980 
8981 /*
8982  * amdgpu_dm_link_setup_psr() - configure psr link
8983  * @stream: stream state
8984  *
8985  * Return: true if success
8986  */
8987 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8988 {
8989 	struct dc_link *link = NULL;
8990 	struct psr_config psr_config = {0};
8991 	struct psr_context psr_context = {0};
8992 	bool ret = false;
8993 
8994 	if (stream == NULL)
8995 		return false;
8996 
8997 	link = stream->link;
8998 
8999 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9000 
9001 	if (psr_config.psr_version > 0) {
9002 		psr_config.psr_exit_link_training_required = 0x1;
9003 		psr_config.psr_frame_capture_indication_req = 0;
9004 		psr_config.psr_rfb_setup_time = 0x37;
9005 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9006 		psr_config.allow_smu_optimizations = 0x0;
9007 
9008 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9009 
9010 	}
9011 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9012 
9013 	return ret;
9014 }
9015 
9016 /*
9017  * amdgpu_dm_psr_enable() - enable psr f/w
9018  * @stream: stream state
9019  *
9020  * Return: true if success
9021  */
9022 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9023 {
9024 	struct dc_link *link = stream->link;
9025 	unsigned int vsync_rate_hz = 0;
9026 	struct dc_static_screen_params params = {0};
9027 	/* Calculate number of static frames before generating interrupt to
9028 	 * enter PSR.
9029 	 */
9030 	// Init fail safe of 2 frames static
9031 	unsigned int num_frames_static = 2;
9032 
9033 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9034 
9035 	vsync_rate_hz = div64_u64(div64_u64((
9036 			stream->timing.pix_clk_100hz * 100),
9037 			stream->timing.v_total),
9038 			stream->timing.h_total);
9039 
9040 	/* Round up
9041 	 * Calculate number of frames such that at least 30 ms of time has
9042 	 * passed.
9043 	 */
9044 	if (vsync_rate_hz != 0) {
9045 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9046 		num_frames_static = (30000 / frame_time_microsec) + 1;
9047 	}
9048 
9049 	params.triggers.cursor_update = true;
9050 	params.triggers.overlay_update = true;
9051 	params.triggers.surface_update = true;
9052 	params.num_frames = num_frames_static;
9053 
9054 	dc_stream_set_static_screen_params(link->ctx->dc,
9055 					   &stream, 1,
9056 					   &params);
9057 
9058 	return dc_link_set_psr_allow_active(link, true, false);
9059 }
9060 
9061 /*
9062  * amdgpu_dm_psr_disable() - disable psr f/w
9063  * @stream:  stream state
9064  *
9065  * Return: true if success
9066  */
9067 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9068 {
9069 
9070 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9071 
9072 	return dc_link_set_psr_allow_active(stream->link, false, true);
9073 }
9074