1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140 
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 				struct drm_plane *plane,
143 				unsigned long possible_crtcs,
144 				const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 			       struct drm_plane *plane,
147 			       uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
150 				    uint32_t link_index,
151 				    struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 				  struct amdgpu_encoder *aencoder,
154 				  uint32_t link_index);
155 
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157 
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 				   struct drm_atomic_state *state,
160 				   bool nonblock);
161 
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163 
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 				  struct drm_atomic_state *state);
166 
167 static void handle_cursor_update(struct drm_plane *plane,
168 				 struct drm_plane_state *old_plane_state);
169 
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 
175 
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 	if (crtc >= adev->mode_info.num_crtc)
192 		return 0;
193 	else {
194 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 				acrtc->base.state);
197 
198 
199 		if (acrtc_state->stream == NULL) {
200 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 				  crtc);
202 			return 0;
203 		}
204 
205 		return dc_stream_get_vblank_counter(acrtc_state->stream);
206 	}
207 }
208 
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 				  u32 *vbl, u32 *position)
211 {
212 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
213 
214 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 		return -EINVAL;
216 	else {
217 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 						acrtc->base.state);
220 
221 		if (acrtc_state->stream ==  NULL) {
222 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 				  crtc);
224 			return 0;
225 		}
226 
227 		/*
228 		 * TODO rework base driver to use values directly.
229 		 * for now parse it back into reg-format
230 		 */
231 		dc_stream_get_scanoutpos(acrtc_state->stream,
232 					 &v_blank_start,
233 					 &v_blank_end,
234 					 &h_position,
235 					 &v_position);
236 
237 		*position = v_position | (h_position << 16);
238 		*vbl = v_blank_start | (v_blank_end << 16);
239 	}
240 
241 	return 0;
242 }
243 
244 static bool dm_is_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return true;
248 }
249 
250 static int dm_wait_for_idle(void *handle)
251 {
252 	/* XXX todo */
253 	return 0;
254 }
255 
256 static bool dm_check_soft_reset(void *handle)
257 {
258 	return false;
259 }
260 
261 static int dm_soft_reset(void *handle)
262 {
263 	/* XXX todo */
264 	return 0;
265 }
266 
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 		     int otg_inst)
270 {
271 	struct drm_device *dev = adev->ddev;
272 	struct drm_crtc *crtc;
273 	struct amdgpu_crtc *amdgpu_crtc;
274 
275 	if (otg_inst == -1) {
276 		WARN_ON(1);
277 		return adev->mode_info.crtcs[0];
278 	}
279 
280 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 		amdgpu_crtc = to_amdgpu_crtc(crtc);
282 
283 		if (amdgpu_crtc->otg_inst == otg_inst)
284 			return amdgpu_crtc;
285 	}
286 
287 	return NULL;
288 }
289 
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295 
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 	struct amdgpu_crtc *amdgpu_crtc;
306 	struct common_irq_params *irq_params = interrupt_params;
307 	struct amdgpu_device *adev = irq_params->adev;
308 	unsigned long flags;
309 	struct drm_pending_vblank_event *e;
310 	struct dm_crtc_state *acrtc_state;
311 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 	bool vrr_active;
313 
314 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315 
316 	/* IRQ could occur when in initial stage */
317 	/* TODO work and BO cleanup */
318 	if (amdgpu_crtc == NULL) {
319 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 		return;
321 	}
322 
323 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
324 
325 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 						 amdgpu_crtc->pflip_status,
328 						 AMDGPU_FLIP_SUBMITTED,
329 						 amdgpu_crtc->crtc_id,
330 						 amdgpu_crtc);
331 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 		return;
333 	}
334 
335 	/* page flip completed. */
336 	e = amdgpu_crtc->event;
337 	amdgpu_crtc->event = NULL;
338 
339 	if (!e)
340 		WARN_ON(1);
341 
342 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344 
345 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 	if (!vrr_active ||
347 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 				      &v_blank_end, &hpos, &vpos) ||
349 	    (vpos < v_blank_start)) {
350 		/* Update to correct count and vblank timestamp if racing with
351 		 * vblank irq. This also updates to the correct vblank timestamp
352 		 * even in VRR mode, as scanout is past the front-porch atm.
353 		 */
354 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355 
356 		/* Wake up userspace by sending the pageflip event with proper
357 		 * count and timestamp of vblank of flip completion.
358 		 */
359 		if (e) {
360 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361 
362 			/* Event sent, so done with vblank for this flip */
363 			drm_crtc_vblank_put(&amdgpu_crtc->base);
364 		}
365 	} else if (e) {
366 		/* VRR active and inside front-porch: vblank count and
367 		 * timestamp for pageflip event will only be up to date after
368 		 * drm_crtc_handle_vblank() has been executed from late vblank
369 		 * irq handler after start of back-porch (vline 0). We queue the
370 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 		 * updated timestamp and count, once it runs after us.
372 		 *
373 		 * We need to open-code this instead of using the helper
374 		 * drm_crtc_arm_vblank_event(), as that helper would
375 		 * call drm_crtc_accurate_vblank_count(), which we must
376 		 * not call in VRR mode while we are in front-porch!
377 		 */
378 
379 		/* sequence will be replaced by real count during send-out. */
380 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 		e->pipe = amdgpu_crtc->crtc_id;
382 
383 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 		e = NULL;
385 	}
386 
387 	/* Keep track of vblank of this flip for flip throttling. We use the
388 	 * cooked hw counter, as that one incremented at start of this vblank
389 	 * of pageflip completion, so last_flip_vblank is the forbidden count
390 	 * for queueing new pageflips if vsync + VRR is enabled.
391 	 */
392 	amdgpu_crtc->last_flip_vblank =
393 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394 
395 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397 
398 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 			 vrr_active, (int) !e);
401 }
402 
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 	struct common_irq_params *irq_params = interrupt_params;
406 	struct amdgpu_device *adev = irq_params->adev;
407 	struct amdgpu_crtc *acrtc;
408 	struct dm_crtc_state *acrtc_state;
409 	unsigned long flags;
410 
411 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 
413 	if (acrtc) {
414 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
415 
416 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 			      acrtc->crtc_id,
418 			      amdgpu_dm_vrr_active(acrtc_state));
419 
420 		/* Core vblank handling is done here after end of front-porch in
421 		 * vrr mode, as vblank timestamping will give valid results
422 		 * while now done after front-porch. This will also deliver
423 		 * page-flip completion events that have been queued to us
424 		 * if a pageflip happened inside front-porch.
425 		 */
426 		if (amdgpu_dm_vrr_active(acrtc_state)) {
427 			drm_crtc_handle_vblank(&acrtc->base);
428 
429 			/* BTR processing for pre-DCE12 ASICs */
430 			if (acrtc_state->stream &&
431 			    adev->family < AMDGPU_FAMILY_AI) {
432 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 				mod_freesync_handle_v_update(
434 				    adev->dm.freesync_module,
435 				    acrtc_state->stream,
436 				    &acrtc_state->vrr_params);
437 
438 				dc_stream_adjust_vmin_vmax(
439 				    adev->dm.dc,
440 				    acrtc_state->stream,
441 				    &acrtc_state->vrr_params.adjust);
442 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 			}
444 		}
445 	}
446 }
447 
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct dm_crtc_state *acrtc_state;
461 	unsigned long flags;
462 
463 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 	if (!acrtc)
465 		return;
466 
467 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
468 
469 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 			 amdgpu_dm_vrr_active(acrtc_state),
471 			 acrtc_state->active_planes);
472 
473 	/**
474 	 * Core vblank handling at start of front-porch is only possible
475 	 * in non-vrr mode, as only there vblank timestamping will give
476 	 * valid results while done in front-porch. Otherwise defer it
477 	 * to dm_vupdate_high_irq after end of front-porch.
478 	 */
479 	if (!amdgpu_dm_vrr_active(acrtc_state))
480 		drm_crtc_handle_vblank(&acrtc->base);
481 
482 	/**
483 	 * Following stuff must happen at start of vblank, for crc
484 	 * computation and below-the-range btr support in vrr mode.
485 	 */
486 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487 
488 	/* BTR updates need to happen before VUPDATE on Vega and above. */
489 	if (adev->family < AMDGPU_FAMILY_AI)
490 		return;
491 
492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
493 
494 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 		mod_freesync_handle_v_update(adev->dm.freesync_module,
497 					     acrtc_state->stream,
498 					     &acrtc_state->vrr_params);
499 
500 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 					   &acrtc_state->vrr_params.adjust);
502 	}
503 
504 	/*
505 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 	 * In that case, pageflip completion interrupts won't fire and pageflip
507 	 * completion events won't get delivered. Prevent this by sending
508 	 * pending pageflip events from here if a flip is still pending.
509 	 *
510 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 	 * avoid race conditions between flip programming and completion,
512 	 * which could cause too early flip completion events.
513 	 */
514 	if (adev->family >= AMDGPU_FAMILY_RV &&
515 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 	    acrtc_state->active_planes == 0) {
517 		if (acrtc->event) {
518 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 			acrtc->event = NULL;
520 			drm_crtc_vblank_put(&acrtc->base);
521 		}
522 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 	}
524 
525 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527 
528 static int dm_set_clockgating_state(void *handle,
529 		  enum amd_clockgating_state state)
530 {
531 	return 0;
532 }
533 
534 static int dm_set_powergating_state(void *handle,
535 		  enum amd_powergating_state state)
536 {
537 	return 0;
538 }
539 
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542 
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 	struct drm_device *dev = connector->dev;
547 	struct amdgpu_device *adev = dev->dev_private;
548 	struct dm_comressor_info *compressor = &adev->dm.compressor;
549 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 	struct drm_display_mode *mode;
551 	unsigned long max_size = 0;
552 
553 	if (adev->dm.dc->fbc_compressor == NULL)
554 		return;
555 
556 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 		return;
558 
559 	if (compressor->bo_ptr)
560 		return;
561 
562 
563 	list_for_each_entry(mode, &connector->modes, head) {
564 		if (max_size < mode->htotal * mode->vtotal)
565 			max_size = mode->htotal * mode->vtotal;
566 	}
567 
568 	if (max_size) {
569 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 			    &compressor->gpu_addr, &compressor->cpu_addr);
572 
573 		if (r)
574 			DRM_ERROR("DM: Failed to initialize FBC\n");
575 		else {
576 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 		}
579 
580 	}
581 
582 }
583 
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 					  int pipe, bool *enabled,
586 					  unsigned char *buf, int max_bytes)
587 {
588 	struct drm_device *dev = dev_get_drvdata(kdev);
589 	struct amdgpu_device *adev = dev->dev_private;
590 	struct drm_connector *connector;
591 	struct drm_connector_list_iter conn_iter;
592 	struct amdgpu_dm_connector *aconnector;
593 	int ret = 0;
594 
595 	*enabled = false;
596 
597 	mutex_lock(&adev->dm.audio_lock);
598 
599 	drm_connector_list_iter_begin(dev, &conn_iter);
600 	drm_for_each_connector_iter(connector, &conn_iter) {
601 		aconnector = to_amdgpu_dm_connector(connector);
602 		if (aconnector->audio_inst != port)
603 			continue;
604 
605 		*enabled = true;
606 		ret = drm_eld_size(connector->eld);
607 		memcpy(buf, connector->eld, min(max_bytes, ret));
608 
609 		break;
610 	}
611 	drm_connector_list_iter_end(&conn_iter);
612 
613 	mutex_unlock(&adev->dm.audio_lock);
614 
615 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616 
617 	return ret;
618 }
619 
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 	.get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623 
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 				       struct device *hda_kdev, void *data)
626 {
627 	struct drm_device *dev = dev_get_drvdata(kdev);
628 	struct amdgpu_device *adev = dev->dev_private;
629 	struct drm_audio_component *acomp = data;
630 
631 	acomp->ops = &amdgpu_dm_audio_component_ops;
632 	acomp->dev = kdev;
633 	adev->dm.audio_component = acomp;
634 
635 	return 0;
636 }
637 
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 					  struct device *hda_kdev, void *data)
640 {
641 	struct drm_device *dev = dev_get_drvdata(kdev);
642 	struct amdgpu_device *adev = dev->dev_private;
643 	struct drm_audio_component *acomp = data;
644 
645 	acomp->ops = NULL;
646 	acomp->dev = NULL;
647 	adev->dm.audio_component = NULL;
648 }
649 
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 	.bind	= amdgpu_dm_audio_component_bind,
652 	.unbind	= amdgpu_dm_audio_component_unbind,
653 };
654 
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 	int i, ret;
658 
659 	if (!amdgpu_audio)
660 		return 0;
661 
662 	adev->mode_info.audio.enabled = true;
663 
664 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665 
666 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 		adev->mode_info.audio.pin[i].channels = -1;
668 		adev->mode_info.audio.pin[i].rate = -1;
669 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 		adev->mode_info.audio.pin[i].status_bits = 0;
671 		adev->mode_info.audio.pin[i].category_code = 0;
672 		adev->mode_info.audio.pin[i].connected = false;
673 		adev->mode_info.audio.pin[i].id =
674 			adev->dm.dc->res_pool->audios[i]->inst;
675 		adev->mode_info.audio.pin[i].offset = 0;
676 	}
677 
678 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 	if (ret < 0)
680 		return ret;
681 
682 	adev->dm.audio_registered = true;
683 
684 	return 0;
685 }
686 
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 	if (!amdgpu_audio)
690 		return;
691 
692 	if (!adev->mode_info.audio.enabled)
693 		return;
694 
695 	if (adev->dm.audio_registered) {
696 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 		adev->dm.audio_registered = false;
698 	}
699 
700 	/* TODO: Disable audio? */
701 
702 	adev->mode_info.audio.enabled = false;
703 }
704 
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 	struct drm_audio_component *acomp = adev->dm.audio_component;
708 
709 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711 
712 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 						 pin, -1);
714 	}
715 }
716 
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 	const struct dmcub_firmware_header_v1_0 *hdr;
720 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 	struct abm *abm = adev->dm.dc->res_pool->abm;
725 	struct dmub_srv_hw_params hw_params;
726 	enum dmub_status status;
727 	const unsigned char *fw_inst_const, *fw_bss_data;
728 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 	bool has_hw_support;
730 
731 	if (!dmub_srv)
732 		/* DMUB isn't supported on the ASIC. */
733 		return 0;
734 
735 	if (!fb_info) {
736 		DRM_ERROR("No framebuffer info for DMUB service.\n");
737 		return -EINVAL;
738 	}
739 
740 	if (!dmub_fw) {
741 		/* Firmware required for DMUB support. */
742 		DRM_ERROR("No firmware provided for DMUB.\n");
743 		return -EINVAL;
744 	}
745 
746 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 	if (status != DMUB_STATUS_OK) {
748 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 		return -EINVAL;
750 	}
751 
752 	if (!has_hw_support) {
753 		DRM_INFO("DMUB unsupported on ASIC\n");
754 		return 0;
755 	}
756 
757 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758 
759 	fw_inst_const = dmub_fw->data +
760 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 			PSP_HEADER_BYTES;
762 
763 	fw_bss_data = dmub_fw->data +
764 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 		      le32_to_cpu(hdr->inst_const_bytes);
766 
767 	/* Copy firmware and bios info into FB memory. */
768 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770 
771 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772 
773 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 	 * amdgpu_ucode_init_single_fw will load dmub firmware
775 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 	 * will be done by dm_dmub_hw_init
777 	 */
778 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 				fw_inst_const_size);
781 	}
782 
783 	if (fw_bss_data_size)
784 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 		       fw_bss_data, fw_bss_data_size);
786 
787 	/* Copy firmware bios info into FB memory. */
788 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 	       adev->bios_size);
790 
791 	/* Reset regions that need to be reset. */
792 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794 
795 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797 
798 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800 
801 	/* Initialize hardware. */
802 	memset(&hw_params, 0, sizeof(hw_params));
803 	hw_params.fb_base = adev->gmc.fb_start;
804 	hw_params.fb_offset = adev->gmc.aper_base;
805 
806 	/* backdoor load firmware and trigger dmub running */
807 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 		hw_params.load_inst_const = true;
809 
810 	if (dmcu)
811 		hw_params.psp_version = dmcu->psp_version;
812 
813 	for (i = 0; i < fb_info->num_fb; ++i)
814 		hw_params.fb[i] = &fb_info->fb[i];
815 
816 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 	if (status != DMUB_STATUS_OK) {
818 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 		return -EINVAL;
820 	}
821 
822 	/* Wait for firmware load to finish. */
823 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 	if (status != DMUB_STATUS_OK)
825 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826 
827 	/* Init DMCU and ABM if available. */
828 	if (dmcu && abm) {
829 		dmcu->funcs->dmcu_init(dmcu);
830 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 	}
832 
833 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 	if (!adev->dm.dc->ctx->dmub_srv) {
835 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 		return -ENOMEM;
837 	}
838 
839 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 		 adev->dm.dmcub_fw_version);
841 
842 	return 0;
843 }
844 
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 	struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 	struct dc_callback_init init_params;
850 #endif
851 	int r;
852 
853 	adev->dm.ddev = adev->ddev;
854 	adev->dm.adev = adev;
855 
856 	/* Zero all the fields */
857 	memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 	memset(&init_params, 0, sizeof(init_params));
860 #endif
861 
862 	mutex_init(&adev->dm.dc_lock);
863 	mutex_init(&adev->dm.audio_lock);
864 
865 	if(amdgpu_dm_irq_init(adev)) {
866 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 		goto error;
868 	}
869 
870 	init_data.asic_id.chip_family = adev->family;
871 
872 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874 
875 	init_data.asic_id.vram_width = adev->gmc.vram_width;
876 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 	init_data.asic_id.atombios_base_address =
878 		adev->mode_info.atom_context->bios;
879 
880 	init_data.driver = adev;
881 
882 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883 
884 	if (!adev->dm.cgs_device) {
885 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 		goto error;
887 	}
888 
889 	init_data.cgs_device = adev->dm.cgs_device;
890 
891 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892 
893 	switch (adev->asic_type) {
894 	case CHIP_CARRIZO:
895 	case CHIP_STONEY:
896 	case CHIP_RAVEN:
897 	case CHIP_RENOIR:
898 		init_data.flags.gpu_vm_support = true;
899 		break;
900 	default:
901 		break;
902 	}
903 
904 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 		init_data.flags.fbc_support = true;
906 
907 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 		init_data.flags.multi_mon_pp_mclk_switch = true;
909 
910 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 		init_data.flags.disable_fractional_pwm = true;
912 
913 	init_data.flags.power_down_display_on_boot = true;
914 
915 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916 
917 	/* Display Core create. */
918 	adev->dm.dc = dc_create(&init_data);
919 
920 	if (adev->dm.dc) {
921 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 	} else {
923 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 		goto error;
925 	}
926 
927 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 	}
931 
932 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 		adev->dm.dc->debug.disable_stutter = true;
937 
938 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 		adev->dm.dc->debug.disable_dsc = true;
940 
941 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 		adev->dm.dc->debug.disable_clock_gate = true;
943 
944 	r = dm_dmub_hw_init(adev);
945 	if (r) {
946 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 		goto error;
948 	}
949 
950 	dc_hardware_init(adev->dm.dc);
951 
952 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 	if (!adev->dm.freesync_module) {
954 		DRM_ERROR(
955 		"amdgpu: failed to initialize freesync_module.\n");
956 	} else
957 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 				adev->dm.freesync_module);
959 
960 	amdgpu_dm_init_color_mod();
961 
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 	if (adev->asic_type >= CHIP_RAVEN) {
964 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965 
966 		if (!adev->dm.hdcp_workqueue)
967 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 		else
969 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 
971 		dc_init_callbacks(adev->dm.dc, &init_params);
972 	}
973 #endif
974 	if (amdgpu_dm_initialize_drm_device(adev)) {
975 		DRM_ERROR(
976 		"amdgpu: failed to initialize sw for display support.\n");
977 		goto error;
978 	}
979 
980 	/* Update the actual used number of crtc */
981 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 
983 	/* create fake encoders for MST */
984 	dm_dp_create_fake_mst_encoders(adev);
985 
986 	/* TODO: Add_display_info? */
987 
988 	/* TODO use dynamic cursor width */
989 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991 
992 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 		DRM_ERROR(
994 		"amdgpu: failed to initialize sw for display support.\n");
995 		goto error;
996 	}
997 
998 	DRM_DEBUG_DRIVER("KMS initialized.\n");
999 
1000 	return 0;
1001 error:
1002 	amdgpu_dm_fini(adev);
1003 
1004 	return -EINVAL;
1005 }
1006 
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 	}
1014 
1015 	amdgpu_dm_audio_fini(adev);
1016 
1017 	amdgpu_dm_destroy_drm_device(&adev->dm);
1018 
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 	if (adev->dm.hdcp_workqueue) {
1021 		hdcp_destroy(adev->dm.hdcp_workqueue);
1022 		adev->dm.hdcp_workqueue = NULL;
1023 	}
1024 
1025 	if (adev->dm.dc)
1026 		dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 	if (adev->dm.dc->ctx->dmub_srv) {
1029 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 		adev->dm.dc->ctx->dmub_srv = NULL;
1031 	}
1032 
1033 	if (adev->dm.dmub_bo)
1034 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 				      &adev->dm.dmub_bo_gpu_addr,
1036 				      &adev->dm.dmub_bo_cpu_addr);
1037 
1038 	/* DC Destroy TODO: Replace destroy DAL */
1039 	if (adev->dm.dc)
1040 		dc_destroy(&adev->dm.dc);
1041 	/*
1042 	 * TODO: pageflip, vlank interrupt
1043 	 *
1044 	 * amdgpu_dm_irq_fini(adev);
1045 	 */
1046 
1047 	if (adev->dm.cgs_device) {
1048 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 		adev->dm.cgs_device = NULL;
1050 	}
1051 	if (adev->dm.freesync_module) {
1052 		mod_freesync_destroy(adev->dm.freesync_module);
1053 		adev->dm.freesync_module = NULL;
1054 	}
1055 
1056 	mutex_destroy(&adev->dm.audio_lock);
1057 	mutex_destroy(&adev->dm.dc_lock);
1058 
1059 	return;
1060 }
1061 
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 	const char *fw_name_dmcu = NULL;
1065 	int r;
1066 	const struct dmcu_firmware_header_v1_0 *hdr;
1067 
1068 	switch(adev->asic_type) {
1069 	case CHIP_BONAIRE:
1070 	case CHIP_HAWAII:
1071 	case CHIP_KAVERI:
1072 	case CHIP_KABINI:
1073 	case CHIP_MULLINS:
1074 	case CHIP_TONGA:
1075 	case CHIP_FIJI:
1076 	case CHIP_CARRIZO:
1077 	case CHIP_STONEY:
1078 	case CHIP_POLARIS11:
1079 	case CHIP_POLARIS10:
1080 	case CHIP_POLARIS12:
1081 	case CHIP_VEGAM:
1082 	case CHIP_VEGA10:
1083 	case CHIP_VEGA12:
1084 	case CHIP_VEGA20:
1085 	case CHIP_NAVI10:
1086 	case CHIP_NAVI14:
1087 	case CHIP_RENOIR:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 	case CHIP_SIENNA_CICHLID:
1090 	case CHIP_NAVY_FLOUNDER:
1091 #endif
1092 		return 0;
1093 	case CHIP_NAVI12:
1094 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1095 		break;
1096 	case CHIP_RAVEN:
1097 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1100 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1101 		else
1102 			return 0;
1103 		break;
1104 	default:
1105 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1106 		return -EINVAL;
1107 	}
1108 
1109 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1111 		return 0;
1112 	}
1113 
1114 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1115 	if (r == -ENOENT) {
1116 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 		adev->dm.fw_dmcu = NULL;
1119 		return 0;
1120 	}
1121 	if (r) {
1122 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1123 			fw_name_dmcu);
1124 		return r;
1125 	}
1126 
1127 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1128 	if (r) {
1129 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1130 			fw_name_dmcu);
1131 		release_firmware(adev->dm.fw_dmcu);
1132 		adev->dm.fw_dmcu = NULL;
1133 		return r;
1134 	}
1135 
1136 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1137 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1138 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1139 	adev->firmware.fw_size +=
1140 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1141 
1142 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1143 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1144 	adev->firmware.fw_size +=
1145 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1146 
1147 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1148 
1149 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1150 
1151 	return 0;
1152 }
1153 
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1155 {
1156 	struct amdgpu_device *adev = ctx;
1157 
1158 	return dm_read_reg(adev->dm.dc->ctx, address);
1159 }
1160 
1161 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1162 				     uint32_t value)
1163 {
1164 	struct amdgpu_device *adev = ctx;
1165 
1166 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1167 }
1168 
1169 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1170 {
1171 	struct dmub_srv_create_params create_params;
1172 	struct dmub_srv_region_params region_params;
1173 	struct dmub_srv_region_info region_info;
1174 	struct dmub_srv_fb_params fb_params;
1175 	struct dmub_srv_fb_info *fb_info;
1176 	struct dmub_srv *dmub_srv;
1177 	const struct dmcub_firmware_header_v1_0 *hdr;
1178 	const char *fw_name_dmub;
1179 	enum dmub_asic dmub_asic;
1180 	enum dmub_status status;
1181 	int r;
1182 
1183 	switch (adev->asic_type) {
1184 	case CHIP_RENOIR:
1185 		dmub_asic = DMUB_ASIC_DCN21;
1186 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1187 		break;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 	case CHIP_SIENNA_CICHLID:
1190 		dmub_asic = DMUB_ASIC_DCN30;
1191 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1192 		break;
1193 	case CHIP_NAVY_FLOUNDER:
1194 		dmub_asic = DMUB_ASIC_DCN30;
1195 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1196 		break;
1197 #endif
1198 
1199 	default:
1200 		/* ASIC doesn't support DMUB. */
1201 		return 0;
1202 	}
1203 
1204 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1205 	if (r) {
1206 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1207 		return 0;
1208 	}
1209 
1210 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1211 	if (r) {
1212 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1213 		return 0;
1214 	}
1215 
1216 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1217 
1218 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1219 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1220 			AMDGPU_UCODE_ID_DMCUB;
1221 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1222 			adev->dm.dmub_fw;
1223 		adev->firmware.fw_size +=
1224 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1225 
1226 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 			 adev->dm.dmcub_fw_version);
1228 	}
1229 
1230 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231 
1232 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1233 	dmub_srv = adev->dm.dmub_srv;
1234 
1235 	if (!dmub_srv) {
1236 		DRM_ERROR("Failed to allocate DMUB service!\n");
1237 		return -ENOMEM;
1238 	}
1239 
1240 	memset(&create_params, 0, sizeof(create_params));
1241 	create_params.user_ctx = adev;
1242 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1243 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1244 	create_params.asic = dmub_asic;
1245 
1246 	/* Create the DMUB service. */
1247 	status = dmub_srv_create(dmub_srv, &create_params);
1248 	if (status != DMUB_STATUS_OK) {
1249 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1250 		return -EINVAL;
1251 	}
1252 
1253 	/* Calculate the size of all the regions for the DMUB service. */
1254 	memset(&region_params, 0, sizeof(region_params));
1255 
1256 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1257 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1258 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1259 	region_params.vbios_size = adev->bios_size;
1260 	region_params.fw_bss_data = region_params.bss_data_size ?
1261 		adev->dm.dmub_fw->data +
1262 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1263 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1264 	region_params.fw_inst_const =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		PSP_HEADER_BYTES;
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	release_firmware(adev->dm.dmub_fw);
1338 	adev->dm.dmub_fw = NULL;
1339 
1340 	release_firmware(adev->dm.fw_dmcu);
1341 	adev->dm.fw_dmcu = NULL;
1342 
1343 	return 0;
1344 }
1345 
1346 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1347 {
1348 	struct amdgpu_dm_connector *aconnector;
1349 	struct drm_connector *connector;
1350 	struct drm_connector_list_iter iter;
1351 	int ret = 0;
1352 
1353 	drm_connector_list_iter_begin(dev, &iter);
1354 	drm_for_each_connector_iter(connector, &iter) {
1355 		aconnector = to_amdgpu_dm_connector(connector);
1356 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1357 		    aconnector->mst_mgr.aux) {
1358 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1359 					 aconnector,
1360 					 aconnector->base.base.id);
1361 
1362 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1363 			if (ret < 0) {
1364 				DRM_ERROR("DM_MST: Failed to start MST\n");
1365 				aconnector->dc_link->type =
1366 					dc_connection_single;
1367 				break;
1368 			}
1369 		}
1370 	}
1371 	drm_connector_list_iter_end(&iter);
1372 
1373 	return ret;
1374 }
1375 
1376 static int dm_late_init(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	struct dmcu_iram_parameters params;
1381 	unsigned int linear_lut[16];
1382 	int i;
1383 	struct dmcu *dmcu = NULL;
1384 	bool ret = true;
1385 
1386 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1387 		return detect_mst_link_for_all_connectors(adev->ddev);
1388 
1389 	dmcu = adev->dm.dc->res_pool->dmcu;
1390 
1391 	for (i = 0; i < 16; i++)
1392 		linear_lut[i] = 0xFFFF * i / 15;
1393 
1394 	params.set = 0;
1395 	params.backlight_ramping_start = 0xCCCC;
1396 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1397 	params.backlight_lut_array_size = 16;
1398 	params.backlight_lut_array = linear_lut;
1399 
1400 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1401 	 * 0xFFFF x 0.01 = 0x28F
1402 	 */
1403 	params.min_abm_backlight = 0x28F;
1404 
1405 	/* In the case where abm is implemented on dmcub,
1406 	 * dmcu object will be null.
1407 	 * ABM 2.4 and up are implemented on dmcub.
1408 	 */
1409 	if (dmcu)
1410 		ret = dmcu_load_iram(dmcu, params);
1411 	else if (adev->dm.dc->ctx->dmub_srv)
1412 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1413 
1414 	if (!ret)
1415 		return -EINVAL;
1416 
1417 	return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419 
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 	struct amdgpu_dm_connector *aconnector;
1423 	struct drm_connector *connector;
1424 	struct drm_connector_list_iter iter;
1425 	struct drm_dp_mst_topology_mgr *mgr;
1426 	int ret;
1427 	bool need_hotplug = false;
1428 
1429 	drm_connector_list_iter_begin(dev, &iter);
1430 	drm_for_each_connector_iter(connector, &iter) {
1431 		aconnector = to_amdgpu_dm_connector(connector);
1432 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 		    aconnector->mst_port)
1434 			continue;
1435 
1436 		mgr = &aconnector->mst_mgr;
1437 
1438 		if (suspend) {
1439 			drm_dp_mst_topology_mgr_suspend(mgr);
1440 		} else {
1441 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 			if (ret < 0) {
1443 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 				need_hotplug = true;
1445 			}
1446 		}
1447 	}
1448 	drm_connector_list_iter_end(&iter);
1449 
1450 	if (need_hotplug)
1451 		drm_kms_helper_hotplug_event(dev);
1452 }
1453 
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 	struct smu_context *smu = &adev->smu;
1457 	int ret = 0;
1458 
1459 	if (!is_support_sw_smu(adev))
1460 		return 0;
1461 
1462 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 	 * on window driver dc implementation.
1464 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 	 * should be passed to smu during boot up and resume from s3.
1466 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 	 * dcn20_resource_construct
1468 	 * then call pplib functions below to pass the settings to smu:
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * smu_set_watermarks_table
1471 	 * navi10_set_watermarks_table
1472 	 * smu_write_watermarks_table
1473 	 *
1474 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 	 * dc has implemented different flow for window driver:
1476 	 * dc_hardware_init / dc_set_power_state
1477 	 * dcn10_init_hw
1478 	 * notify_wm_ranges
1479 	 * set_wm_ranges
1480 	 * -- Linux
1481 	 * smu_set_watermarks_for_clock_ranges
1482 	 * renoir_set_watermarks_table
1483 	 * smu_write_watermarks_table
1484 	 *
1485 	 * For Linux,
1486 	 * dc_hardware_init -> amdgpu_dm_init
1487 	 * dc_set_power_state --> dm_resume
1488 	 *
1489 	 * therefore, this function apply to navi10/12/14 but not Renoir
1490 	 * *
1491 	 */
1492 	switch(adev->asic_type) {
1493 	case CHIP_NAVI10:
1494 	case CHIP_NAVI14:
1495 	case CHIP_NAVI12:
1496 		break;
1497 	default:
1498 		return 0;
1499 	}
1500 
1501 	ret = smu_write_watermarks_table(smu);
1502 	if (ret) {
1503 		DRM_ERROR("Failed to update WMTABLE!\n");
1504 		return ret;
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /**
1511  * dm_hw_init() - Initialize DC device
1512  * @handle: The base driver device containing the amdgpu_dm device.
1513  *
1514  * Initialize the &struct amdgpu_display_manager device. This involves calling
1515  * the initializers of each DM component, then populating the struct with them.
1516  *
1517  * Although the function implies hardware initialization, both hardware and
1518  * software are initialized here. Splitting them out to their relevant init
1519  * hooks is a future TODO item.
1520  *
1521  * Some notable things that are initialized here:
1522  *
1523  * - Display Core, both software and hardware
1524  * - DC modules that we need (freesync and color management)
1525  * - DRM software states
1526  * - Interrupt sources and handlers
1527  * - Vblank support
1528  * - Debug FS entries, if enabled
1529  */
1530 static int dm_hw_init(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 	/* Create DAL display manager */
1534 	amdgpu_dm_init(adev);
1535 	amdgpu_dm_hpd_init(adev);
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * dm_hw_fini() - Teardown DC device
1542  * @handle: The base driver device containing the amdgpu_dm device.
1543  *
1544  * Teardown components within &struct amdgpu_display_manager that require
1545  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546  * were loaded. Also flush IRQ workqueues and disable them.
1547  */
1548 static int dm_hw_fini(void *handle)
1549 {
1550 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 
1552 	amdgpu_dm_hpd_fini(adev);
1553 
1554 	amdgpu_dm_irq_fini(adev);
1555 	amdgpu_dm_fini(adev);
1556 	return 0;
1557 }
1558 
1559 
1560 static int dm_enable_vblank(struct drm_crtc *crtc);
1561 static void dm_disable_vblank(struct drm_crtc *crtc);
1562 
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1564 				 struct dc_state *state, bool enable)
1565 {
1566 	enum dc_irq_source irq_source;
1567 	struct amdgpu_crtc *acrtc;
1568 	int rc = -EBUSY;
1569 	int i = 0;
1570 
1571 	for (i = 0; i < state->stream_count; i++) {
1572 		acrtc = get_crtc_by_otg_inst(
1573 				adev, state->stream_status[i].primary_otg_inst);
1574 
1575 		if (acrtc && state->stream_status[i].plane_count != 0) {
1576 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1577 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1578 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1580 			if (rc)
1581 				DRM_WARN("Failed to %s pflip interrupts\n",
1582 					 enable ? "enable" : "disable");
1583 
1584 			if (enable) {
1585 				rc = dm_enable_vblank(&acrtc->base);
1586 				if (rc)
1587 					DRM_WARN("Failed to enable vblank interrupts\n");
1588 			} else {
1589 				dm_disable_vblank(&acrtc->base);
1590 			}
1591 
1592 		}
1593 	}
1594 
1595 }
1596 
1597 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1598 {
1599 	struct dc_state *context = NULL;
1600 	enum dc_status res = DC_ERROR_UNEXPECTED;
1601 	int i;
1602 	struct dc_stream_state *del_streams[MAX_PIPES];
1603 	int del_streams_count = 0;
1604 
1605 	memset(del_streams, 0, sizeof(del_streams));
1606 
1607 	context = dc_create_state(dc);
1608 	if (context == NULL)
1609 		goto context_alloc_fail;
1610 
1611 	dc_resource_state_copy_construct_current(dc, context);
1612 
1613 	/* First remove from context all streams */
1614 	for (i = 0; i < context->stream_count; i++) {
1615 		struct dc_stream_state *stream = context->streams[i];
1616 
1617 		del_streams[del_streams_count++] = stream;
1618 	}
1619 
1620 	/* Remove all planes for removed streams and then remove the streams */
1621 	for (i = 0; i < del_streams_count; i++) {
1622 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1623 			res = DC_FAIL_DETACH_SURFACES;
1624 			goto fail;
1625 		}
1626 
1627 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1628 		if (res != DC_OK)
1629 			goto fail;
1630 	}
1631 
1632 
1633 	res = dc_validate_global_state(dc, context, false);
1634 
1635 	if (res != DC_OK) {
1636 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1637 		goto fail;
1638 	}
1639 
1640 	res = dc_commit_state(dc, context);
1641 
1642 fail:
1643 	dc_release_state(context);
1644 
1645 context_alloc_fail:
1646 	return res;
1647 }
1648 
1649 static int dm_suspend(void *handle)
1650 {
1651 	struct amdgpu_device *adev = handle;
1652 	struct amdgpu_display_manager *dm = &adev->dm;
1653 	int ret = 0;
1654 
1655 	if (adev->in_gpu_reset) {
1656 		mutex_lock(&dm->dc_lock);
1657 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1658 
1659 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1660 
1661 		amdgpu_dm_commit_zero_streams(dm->dc);
1662 
1663 		amdgpu_dm_irq_suspend(adev);
1664 
1665 		return ret;
1666 	}
1667 
1668 	WARN_ON(adev->dm.cached_state);
1669 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1670 
1671 	s3_handle_mst(adev->ddev, true);
1672 
1673 	amdgpu_dm_irq_suspend(adev);
1674 
1675 
1676 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1677 
1678 	return 0;
1679 }
1680 
1681 static struct amdgpu_dm_connector *
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1683 					     struct drm_crtc *crtc)
1684 {
1685 	uint32_t i;
1686 	struct drm_connector_state *new_con_state;
1687 	struct drm_connector *connector;
1688 	struct drm_crtc *crtc_from_state;
1689 
1690 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1691 		crtc_from_state = new_con_state->crtc;
1692 
1693 		if (crtc_from_state == crtc)
1694 			return to_amdgpu_dm_connector(connector);
1695 	}
1696 
1697 	return NULL;
1698 }
1699 
1700 static void emulated_link_detect(struct dc_link *link)
1701 {
1702 	struct dc_sink_init_data sink_init_data = { 0 };
1703 	struct display_sink_capability sink_caps = { 0 };
1704 	enum dc_edid_status edid_status;
1705 	struct dc_context *dc_ctx = link->ctx;
1706 	struct dc_sink *sink = NULL;
1707 	struct dc_sink *prev_sink = NULL;
1708 
1709 	link->type = dc_connection_none;
1710 	prev_sink = link->local_sink;
1711 
1712 	if (prev_sink != NULL)
1713 		dc_sink_retain(prev_sink);
1714 
1715 	switch (link->connector_signal) {
1716 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1717 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1719 		break;
1720 	}
1721 
1722 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1723 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1725 		break;
1726 	}
1727 
1728 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1729 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1731 		break;
1732 	}
1733 
1734 	case SIGNAL_TYPE_LVDS: {
1735 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1737 		break;
1738 	}
1739 
1740 	case SIGNAL_TYPE_EDP: {
1741 		sink_caps.transaction_type =
1742 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1743 		sink_caps.signal = SIGNAL_TYPE_EDP;
1744 		break;
1745 	}
1746 
1747 	case SIGNAL_TYPE_DISPLAY_PORT: {
1748 		sink_caps.transaction_type =
1749 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1750 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1751 		break;
1752 	}
1753 
1754 	default:
1755 		DC_ERROR("Invalid connector type! signal:%d\n",
1756 			link->connector_signal);
1757 		return;
1758 	}
1759 
1760 	sink_init_data.link = link;
1761 	sink_init_data.sink_signal = sink_caps.signal;
1762 
1763 	sink = dc_sink_create(&sink_init_data);
1764 	if (!sink) {
1765 		DC_ERROR("Failed to create sink!\n");
1766 		return;
1767 	}
1768 
1769 	/* dc_sink_create returns a new reference */
1770 	link->local_sink = sink;
1771 
1772 	edid_status = dm_helpers_read_local_edid(
1773 			link->ctx,
1774 			link,
1775 			sink);
1776 
1777 	if (edid_status != EDID_OK)
1778 		DC_ERROR("Failed to read EDID");
1779 
1780 }
1781 
1782 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1783 				     struct amdgpu_display_manager *dm)
1784 {
1785 	struct {
1786 		struct dc_surface_update surface_updates[MAX_SURFACES];
1787 		struct dc_plane_info plane_infos[MAX_SURFACES];
1788 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1789 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1790 		struct dc_stream_update stream_update;
1791 	} * bundle;
1792 	int k, m;
1793 
1794 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1795 
1796 	if (!bundle) {
1797 		dm_error("Failed to allocate update bundle\n");
1798 		goto cleanup;
1799 	}
1800 
1801 	for (k = 0; k < dc_state->stream_count; k++) {
1802 		bundle->stream_update.stream = dc_state->streams[k];
1803 
1804 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1805 			bundle->surface_updates[m].surface =
1806 				dc_state->stream_status->plane_states[m];
1807 			bundle->surface_updates[m].surface->force_full_update =
1808 				true;
1809 		}
1810 		dc_commit_updates_for_stream(
1811 			dm->dc, bundle->surface_updates,
1812 			dc_state->stream_status->plane_count,
1813 			dc_state->streams[k], &bundle->stream_update, dc_state);
1814 	}
1815 
1816 cleanup:
1817 	kfree(bundle);
1818 
1819 	return;
1820 }
1821 
1822 static int dm_resume(void *handle)
1823 {
1824 	struct amdgpu_device *adev = handle;
1825 	struct drm_device *ddev = adev->ddev;
1826 	struct amdgpu_display_manager *dm = &adev->dm;
1827 	struct amdgpu_dm_connector *aconnector;
1828 	struct drm_connector *connector;
1829 	struct drm_connector_list_iter iter;
1830 	struct drm_crtc *crtc;
1831 	struct drm_crtc_state *new_crtc_state;
1832 	struct dm_crtc_state *dm_new_crtc_state;
1833 	struct drm_plane *plane;
1834 	struct drm_plane_state *new_plane_state;
1835 	struct dm_plane_state *dm_new_plane_state;
1836 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1837 	enum dc_connection_type new_connection_type = dc_connection_none;
1838 	struct dc_state *dc_state;
1839 	int i, r, j;
1840 
1841 	if (adev->in_gpu_reset) {
1842 		dc_state = dm->cached_dc_state;
1843 
1844 		r = dm_dmub_hw_init(adev);
1845 		if (r)
1846 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1847 
1848 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1849 		dc_resume(dm->dc);
1850 
1851 		amdgpu_dm_irq_resume_early(adev);
1852 
1853 		for (i = 0; i < dc_state->stream_count; i++) {
1854 			dc_state->streams[i]->mode_changed = true;
1855 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1856 				dc_state->stream_status->plane_states[j]->update_flags.raw
1857 					= 0xffffffff;
1858 			}
1859 		}
1860 
1861 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1862 
1863 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1864 
1865 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1866 
1867 		dc_release_state(dm->cached_dc_state);
1868 		dm->cached_dc_state = NULL;
1869 
1870 		amdgpu_dm_irq_resume_late(adev);
1871 
1872 		mutex_unlock(&dm->dc_lock);
1873 
1874 		return 0;
1875 	}
1876 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 	dc_release_state(dm_state->context);
1878 	dm_state->context = dc_create_state(dm->dc);
1879 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 	dc_resource_state_construct(dm->dc, dm_state->context);
1881 
1882 	/* Before powering on DC we need to re-initialize DMUB. */
1883 	r = dm_dmub_hw_init(adev);
1884 	if (r)
1885 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1886 
1887 	/* power on hardware */
1888 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1889 
1890 	/* program HPD filter */
1891 	dc_resume(dm->dc);
1892 
1893 	/*
1894 	 * early enable HPD Rx IRQ, should be done before set mode as short
1895 	 * pulse interrupts are used for MST
1896 	 */
1897 	amdgpu_dm_irq_resume_early(adev);
1898 
1899 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 	s3_handle_mst(ddev, false);
1901 
1902 	/* Do detection*/
1903 	drm_connector_list_iter_begin(ddev, &iter);
1904 	drm_for_each_connector_iter(connector, &iter) {
1905 		aconnector = to_amdgpu_dm_connector(connector);
1906 
1907 		/*
1908 		 * this is the case when traversing through already created
1909 		 * MST connectors, should be skipped
1910 		 */
1911 		if (aconnector->mst_port)
1912 			continue;
1913 
1914 		mutex_lock(&aconnector->hpd_lock);
1915 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1916 			DRM_ERROR("KMS: Failed to detect connector\n");
1917 
1918 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1919 			emulated_link_detect(aconnector->dc_link);
1920 		else
1921 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1922 
1923 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1924 			aconnector->fake_enable = false;
1925 
1926 		if (aconnector->dc_sink)
1927 			dc_sink_release(aconnector->dc_sink);
1928 		aconnector->dc_sink = NULL;
1929 		amdgpu_dm_update_connector_after_detect(aconnector);
1930 		mutex_unlock(&aconnector->hpd_lock);
1931 	}
1932 	drm_connector_list_iter_end(&iter);
1933 
1934 	/* Force mode set in atomic commit */
1935 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1936 		new_crtc_state->active_changed = true;
1937 
1938 	/*
1939 	 * atomic_check is expected to create the dc states. We need to release
1940 	 * them here, since they were duplicated as part of the suspend
1941 	 * procedure.
1942 	 */
1943 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1944 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1945 		if (dm_new_crtc_state->stream) {
1946 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1947 			dc_stream_release(dm_new_crtc_state->stream);
1948 			dm_new_crtc_state->stream = NULL;
1949 		}
1950 	}
1951 
1952 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1953 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1954 		if (dm_new_plane_state->dc_state) {
1955 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1956 			dc_plane_state_release(dm_new_plane_state->dc_state);
1957 			dm_new_plane_state->dc_state = NULL;
1958 		}
1959 	}
1960 
1961 	drm_atomic_helper_resume(ddev, dm->cached_state);
1962 
1963 	dm->cached_state = NULL;
1964 
1965 	amdgpu_dm_irq_resume_late(adev);
1966 
1967 	amdgpu_dm_smu_write_watermarks_table(adev);
1968 
1969 	return 0;
1970 }
1971 
1972 /**
1973  * DOC: DM Lifecycle
1974  *
1975  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977  * the base driver's device list to be initialized and torn down accordingly.
1978  *
1979  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1980  */
1981 
1982 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1983 	.name = "dm",
1984 	.early_init = dm_early_init,
1985 	.late_init = dm_late_init,
1986 	.sw_init = dm_sw_init,
1987 	.sw_fini = dm_sw_fini,
1988 	.hw_init = dm_hw_init,
1989 	.hw_fini = dm_hw_fini,
1990 	.suspend = dm_suspend,
1991 	.resume = dm_resume,
1992 	.is_idle = dm_is_idle,
1993 	.wait_for_idle = dm_wait_for_idle,
1994 	.check_soft_reset = dm_check_soft_reset,
1995 	.soft_reset = dm_soft_reset,
1996 	.set_clockgating_state = dm_set_clockgating_state,
1997 	.set_powergating_state = dm_set_powergating_state,
1998 };
1999 
2000 const struct amdgpu_ip_block_version dm_ip_block =
2001 {
2002 	.type = AMD_IP_BLOCK_TYPE_DCE,
2003 	.major = 1,
2004 	.minor = 0,
2005 	.rev = 0,
2006 	.funcs = &amdgpu_dm_funcs,
2007 };
2008 
2009 
2010 /**
2011  * DOC: atomic
2012  *
2013  * *WIP*
2014  */
2015 
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2017 	.fb_create = amdgpu_display_user_framebuffer_create,
2018 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2019 	.atomic_check = amdgpu_dm_atomic_check,
2020 	.atomic_commit = amdgpu_dm_atomic_commit,
2021 };
2022 
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2024 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2025 };
2026 
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2028 {
2029 	u32 max_cll, min_cll, max, min, q, r;
2030 	struct amdgpu_dm_backlight_caps *caps;
2031 	struct amdgpu_display_manager *dm;
2032 	struct drm_connector *conn_base;
2033 	struct amdgpu_device *adev;
2034 	struct dc_link *link = NULL;
2035 	static const u8 pre_computed_values[] = {
2036 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2038 
2039 	if (!aconnector || !aconnector->dc_link)
2040 		return;
2041 
2042 	link = aconnector->dc_link;
2043 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2044 		return;
2045 
2046 	conn_base = &aconnector->base;
2047 	adev = conn_base->dev->dev_private;
2048 	dm = &adev->dm;
2049 	caps = &dm->backlight_caps;
2050 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2051 	caps->aux_support = false;
2052 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2053 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2054 
2055 	if (caps->ext_caps->bits.oled == 1 ||
2056 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2057 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2058 		caps->aux_support = true;
2059 
2060 	/* From the specification (CTA-861-G), for calculating the maximum
2061 	 * luminance we need to use:
2062 	 *	Luminance = 50*2**(CV/32)
2063 	 * Where CV is a one-byte value.
2064 	 * For calculating this expression we may need float point precision;
2065 	 * to avoid this complexity level, we take advantage that CV is divided
2066 	 * by a constant. From the Euclids division algorithm, we know that CV
2067 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 	 * need to pre-compute the value of r/32. For pre-computing the values
2070 	 * We just used the following Ruby line:
2071 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 	 * The results of the above expressions can be verified at
2073 	 * pre_computed_values.
2074 	 */
2075 	q = max_cll >> 5;
2076 	r = max_cll % 32;
2077 	max = (1 << q) * pre_computed_values[r];
2078 
2079 	// min luminance: maxLum * (CV/255)^2 / 100
2080 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2081 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2082 
2083 	caps->aux_max_input_signal = max;
2084 	caps->aux_min_input_signal = min;
2085 }
2086 
2087 void amdgpu_dm_update_connector_after_detect(
2088 		struct amdgpu_dm_connector *aconnector)
2089 {
2090 	struct drm_connector *connector = &aconnector->base;
2091 	struct drm_device *dev = connector->dev;
2092 	struct dc_sink *sink;
2093 
2094 	/* MST handled by drm_mst framework */
2095 	if (aconnector->mst_mgr.mst_state == true)
2096 		return;
2097 
2098 
2099 	sink = aconnector->dc_link->local_sink;
2100 	if (sink)
2101 		dc_sink_retain(sink);
2102 
2103 	/*
2104 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 	 * the connector sink is set to either fake or physical sink depends on link status.
2106 	 * Skip if already done during boot.
2107 	 */
2108 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2109 			&& aconnector->dc_em_sink) {
2110 
2111 		/*
2112 		 * For S3 resume with headless use eml_sink to fake stream
2113 		 * because on resume connector->sink is set to NULL
2114 		 */
2115 		mutex_lock(&dev->mode_config.mutex);
2116 
2117 		if (sink) {
2118 			if (aconnector->dc_sink) {
2119 				amdgpu_dm_update_freesync_caps(connector, NULL);
2120 				/*
2121 				 * retain and release below are used to
2122 				 * bump up refcount for sink because the link doesn't point
2123 				 * to it anymore after disconnect, so on next crtc to connector
2124 				 * reshuffle by UMD we will get into unwanted dc_sink release
2125 				 */
2126 				dc_sink_release(aconnector->dc_sink);
2127 			}
2128 			aconnector->dc_sink = sink;
2129 			dc_sink_retain(aconnector->dc_sink);
2130 			amdgpu_dm_update_freesync_caps(connector,
2131 					aconnector->edid);
2132 		} else {
2133 			amdgpu_dm_update_freesync_caps(connector, NULL);
2134 			if (!aconnector->dc_sink) {
2135 				aconnector->dc_sink = aconnector->dc_em_sink;
2136 				dc_sink_retain(aconnector->dc_sink);
2137 			}
2138 		}
2139 
2140 		mutex_unlock(&dev->mode_config.mutex);
2141 
2142 		if (sink)
2143 			dc_sink_release(sink);
2144 		return;
2145 	}
2146 
2147 	/*
2148 	 * TODO: temporary guard to look for proper fix
2149 	 * if this sink is MST sink, we should not do anything
2150 	 */
2151 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2152 		dc_sink_release(sink);
2153 		return;
2154 	}
2155 
2156 	if (aconnector->dc_sink == sink) {
2157 		/*
2158 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2159 		 * Do nothing!!
2160 		 */
2161 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 				aconnector->connector_id);
2163 		if (sink)
2164 			dc_sink_release(sink);
2165 		return;
2166 	}
2167 
2168 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 		aconnector->connector_id, aconnector->dc_sink, sink);
2170 
2171 	mutex_lock(&dev->mode_config.mutex);
2172 
2173 	/*
2174 	 * 1. Update status of the drm connector
2175 	 * 2. Send an event and let userspace tell us what to do
2176 	 */
2177 	if (sink) {
2178 		/*
2179 		 * TODO: check if we still need the S3 mode update workaround.
2180 		 * If yes, put it here.
2181 		 */
2182 		if (aconnector->dc_sink)
2183 			amdgpu_dm_update_freesync_caps(connector, NULL);
2184 
2185 		aconnector->dc_sink = sink;
2186 		dc_sink_retain(aconnector->dc_sink);
2187 		if (sink->dc_edid.length == 0) {
2188 			aconnector->edid = NULL;
2189 			if (aconnector->dc_link->aux_mode) {
2190 				drm_dp_cec_unset_edid(
2191 					&aconnector->dm_dp_aux.aux);
2192 			}
2193 		} else {
2194 			aconnector->edid =
2195 				(struct edid *)sink->dc_edid.raw_edid;
2196 
2197 			drm_connector_update_edid_property(connector,
2198 							   aconnector->edid);
2199 			drm_add_edid_modes(connector, aconnector->edid);
2200 
2201 			if (aconnector->dc_link->aux_mode)
2202 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2203 						    aconnector->edid);
2204 		}
2205 
2206 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2207 		update_connector_ext_caps(aconnector);
2208 	} else {
2209 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2210 		amdgpu_dm_update_freesync_caps(connector, NULL);
2211 		drm_connector_update_edid_property(connector, NULL);
2212 		aconnector->num_modes = 0;
2213 		dc_sink_release(aconnector->dc_sink);
2214 		aconnector->dc_sink = NULL;
2215 		aconnector->edid = NULL;
2216 #ifdef CONFIG_DRM_AMD_DC_HDCP
2217 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2218 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2219 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2220 #endif
2221 	}
2222 
2223 	mutex_unlock(&dev->mode_config.mutex);
2224 
2225 	if (sink)
2226 		dc_sink_release(sink);
2227 }
2228 
2229 static void handle_hpd_irq(void *param)
2230 {
2231 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2232 	struct drm_connector *connector = &aconnector->base;
2233 	struct drm_device *dev = connector->dev;
2234 	enum dc_connection_type new_connection_type = dc_connection_none;
2235 #ifdef CONFIG_DRM_AMD_DC_HDCP
2236 	struct amdgpu_device *adev = dev->dev_private;
2237 #endif
2238 
2239 	/*
2240 	 * In case of failure or MST no need to update connector status or notify the OS
2241 	 * since (for MST case) MST does this in its own context.
2242 	 */
2243 	mutex_lock(&aconnector->hpd_lock);
2244 
2245 #ifdef CONFIG_DRM_AMD_DC_HDCP
2246 	if (adev->dm.hdcp_workqueue)
2247 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2248 #endif
2249 	if (aconnector->fake_enable)
2250 		aconnector->fake_enable = false;
2251 
2252 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2253 		DRM_ERROR("KMS: Failed to detect connector\n");
2254 
2255 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2256 		emulated_link_detect(aconnector->dc_link);
2257 
2258 
2259 		drm_modeset_lock_all(dev);
2260 		dm_restore_drm_connector_state(dev, connector);
2261 		drm_modeset_unlock_all(dev);
2262 
2263 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2264 			drm_kms_helper_hotplug_event(dev);
2265 
2266 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2267 		amdgpu_dm_update_connector_after_detect(aconnector);
2268 
2269 
2270 		drm_modeset_lock_all(dev);
2271 		dm_restore_drm_connector_state(dev, connector);
2272 		drm_modeset_unlock_all(dev);
2273 
2274 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2275 			drm_kms_helper_hotplug_event(dev);
2276 	}
2277 	mutex_unlock(&aconnector->hpd_lock);
2278 
2279 }
2280 
2281 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2282 {
2283 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2284 	uint8_t dret;
2285 	bool new_irq_handled = false;
2286 	int dpcd_addr;
2287 	int dpcd_bytes_to_read;
2288 
2289 	const int max_process_count = 30;
2290 	int process_count = 0;
2291 
2292 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2293 
2294 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2295 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2296 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2297 		dpcd_addr = DP_SINK_COUNT;
2298 	} else {
2299 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2300 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2301 		dpcd_addr = DP_SINK_COUNT_ESI;
2302 	}
2303 
2304 	dret = drm_dp_dpcd_read(
2305 		&aconnector->dm_dp_aux.aux,
2306 		dpcd_addr,
2307 		esi,
2308 		dpcd_bytes_to_read);
2309 
2310 	while (dret == dpcd_bytes_to_read &&
2311 		process_count < max_process_count) {
2312 		uint8_t retry;
2313 		dret = 0;
2314 
2315 		process_count++;
2316 
2317 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2318 		/* handle HPD short pulse irq */
2319 		if (aconnector->mst_mgr.mst_state)
2320 			drm_dp_mst_hpd_irq(
2321 				&aconnector->mst_mgr,
2322 				esi,
2323 				&new_irq_handled);
2324 
2325 		if (new_irq_handled) {
2326 			/* ACK at DPCD to notify down stream */
2327 			const int ack_dpcd_bytes_to_write =
2328 				dpcd_bytes_to_read - 1;
2329 
2330 			for (retry = 0; retry < 3; retry++) {
2331 				uint8_t wret;
2332 
2333 				wret = drm_dp_dpcd_write(
2334 					&aconnector->dm_dp_aux.aux,
2335 					dpcd_addr + 1,
2336 					&esi[1],
2337 					ack_dpcd_bytes_to_write);
2338 				if (wret == ack_dpcd_bytes_to_write)
2339 					break;
2340 			}
2341 
2342 			/* check if there is new irq to be handled */
2343 			dret = drm_dp_dpcd_read(
2344 				&aconnector->dm_dp_aux.aux,
2345 				dpcd_addr,
2346 				esi,
2347 				dpcd_bytes_to_read);
2348 
2349 			new_irq_handled = false;
2350 		} else {
2351 			break;
2352 		}
2353 	}
2354 
2355 	if (process_count == max_process_count)
2356 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2357 }
2358 
2359 static void handle_hpd_rx_irq(void *param)
2360 {
2361 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2362 	struct drm_connector *connector = &aconnector->base;
2363 	struct drm_device *dev = connector->dev;
2364 	struct dc_link *dc_link = aconnector->dc_link;
2365 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2366 	enum dc_connection_type new_connection_type = dc_connection_none;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368 	union hpd_irq_data hpd_irq_data;
2369 	struct amdgpu_device *adev = dev->dev_private;
2370 
2371 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2372 #endif
2373 
2374 	/*
2375 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2376 	 * conflict, after implement i2c helper, this mutex should be
2377 	 * retired.
2378 	 */
2379 	if (dc_link->type != dc_connection_mst_branch)
2380 		mutex_lock(&aconnector->hpd_lock);
2381 
2382 
2383 #ifdef CONFIG_DRM_AMD_DC_HDCP
2384 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2385 #else
2386 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2387 #endif
2388 			!is_mst_root_connector) {
2389 		/* Downstream Port status changed. */
2390 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2391 			DRM_ERROR("KMS: Failed to detect connector\n");
2392 
2393 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2394 			emulated_link_detect(dc_link);
2395 
2396 			if (aconnector->fake_enable)
2397 				aconnector->fake_enable = false;
2398 
2399 			amdgpu_dm_update_connector_after_detect(aconnector);
2400 
2401 
2402 			drm_modeset_lock_all(dev);
2403 			dm_restore_drm_connector_state(dev, connector);
2404 			drm_modeset_unlock_all(dev);
2405 
2406 			drm_kms_helper_hotplug_event(dev);
2407 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2408 
2409 			if (aconnector->fake_enable)
2410 				aconnector->fake_enable = false;
2411 
2412 			amdgpu_dm_update_connector_after_detect(aconnector);
2413 
2414 
2415 			drm_modeset_lock_all(dev);
2416 			dm_restore_drm_connector_state(dev, connector);
2417 			drm_modeset_unlock_all(dev);
2418 
2419 			drm_kms_helper_hotplug_event(dev);
2420 		}
2421 	}
2422 #ifdef CONFIG_DRM_AMD_DC_HDCP
2423 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2424 		if (adev->dm.hdcp_workqueue)
2425 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2426 	}
2427 #endif
2428 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2429 	    (dc_link->type == dc_connection_mst_branch))
2430 		dm_handle_hpd_rx_irq(aconnector);
2431 
2432 	if (dc_link->type != dc_connection_mst_branch) {
2433 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2434 		mutex_unlock(&aconnector->hpd_lock);
2435 	}
2436 }
2437 
2438 static void register_hpd_handlers(struct amdgpu_device *adev)
2439 {
2440 	struct drm_device *dev = adev->ddev;
2441 	struct drm_connector *connector;
2442 	struct amdgpu_dm_connector *aconnector;
2443 	const struct dc_link *dc_link;
2444 	struct dc_interrupt_params int_params = {0};
2445 
2446 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2447 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2448 
2449 	list_for_each_entry(connector,
2450 			&dev->mode_config.connector_list, head)	{
2451 
2452 		aconnector = to_amdgpu_dm_connector(connector);
2453 		dc_link = aconnector->dc_link;
2454 
2455 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2456 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2457 			int_params.irq_source = dc_link->irq_source_hpd;
2458 
2459 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2460 					handle_hpd_irq,
2461 					(void *) aconnector);
2462 		}
2463 
2464 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2465 
2466 			/* Also register for DP short pulse (hpd_rx). */
2467 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2468 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2469 
2470 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2471 					handle_hpd_rx_irq,
2472 					(void *) aconnector);
2473 		}
2474 	}
2475 }
2476 
2477 /* Register IRQ sources and initialize IRQ callbacks */
2478 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2479 {
2480 	struct dc *dc = adev->dm.dc;
2481 	struct common_irq_params *c_irq_params;
2482 	struct dc_interrupt_params int_params = {0};
2483 	int r;
2484 	int i;
2485 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2486 
2487 	if (adev->asic_type >= CHIP_VEGA10)
2488 		client_id = SOC15_IH_CLIENTID_DCE;
2489 
2490 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2491 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2492 
2493 	/*
2494 	 * Actions of amdgpu_irq_add_id():
2495 	 * 1. Register a set() function with base driver.
2496 	 *    Base driver will call set() function to enable/disable an
2497 	 *    interrupt in DC hardware.
2498 	 * 2. Register amdgpu_dm_irq_handler().
2499 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2500 	 *    coming from DC hardware.
2501 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2502 	 *    for acknowledging and handling. */
2503 
2504 	/* Use VBLANK interrupt */
2505 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2506 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2507 		if (r) {
2508 			DRM_ERROR("Failed to add crtc irq id!\n");
2509 			return r;
2510 		}
2511 
2512 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2513 		int_params.irq_source =
2514 			dc_interrupt_to_irq_source(dc, i, 0);
2515 
2516 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2517 
2518 		c_irq_params->adev = adev;
2519 		c_irq_params->irq_src = int_params.irq_source;
2520 
2521 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2522 				dm_crtc_high_irq, c_irq_params);
2523 	}
2524 
2525 	/* Use VUPDATE interrupt */
2526 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2527 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2528 		if (r) {
2529 			DRM_ERROR("Failed to add vupdate irq id!\n");
2530 			return r;
2531 		}
2532 
2533 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2534 		int_params.irq_source =
2535 			dc_interrupt_to_irq_source(dc, i, 0);
2536 
2537 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2538 
2539 		c_irq_params->adev = adev;
2540 		c_irq_params->irq_src = int_params.irq_source;
2541 
2542 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2543 				dm_vupdate_high_irq, c_irq_params);
2544 	}
2545 
2546 	/* Use GRPH_PFLIP interrupt */
2547 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2548 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2549 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2550 		if (r) {
2551 			DRM_ERROR("Failed to add page flip irq id!\n");
2552 			return r;
2553 		}
2554 
2555 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2556 		int_params.irq_source =
2557 			dc_interrupt_to_irq_source(dc, i, 0);
2558 
2559 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2560 
2561 		c_irq_params->adev = adev;
2562 		c_irq_params->irq_src = int_params.irq_source;
2563 
2564 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2565 				dm_pflip_high_irq, c_irq_params);
2566 
2567 	}
2568 
2569 	/* HPD */
2570 	r = amdgpu_irq_add_id(adev, client_id,
2571 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2572 	if (r) {
2573 		DRM_ERROR("Failed to add hpd irq id!\n");
2574 		return r;
2575 	}
2576 
2577 	register_hpd_handlers(adev);
2578 
2579 	return 0;
2580 }
2581 
2582 #if defined(CONFIG_DRM_AMD_DC_DCN)
2583 /* Register IRQ sources and initialize IRQ callbacks */
2584 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2585 {
2586 	struct dc *dc = adev->dm.dc;
2587 	struct common_irq_params *c_irq_params;
2588 	struct dc_interrupt_params int_params = {0};
2589 	int r;
2590 	int i;
2591 
2592 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2593 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2594 
2595 	/*
2596 	 * Actions of amdgpu_irq_add_id():
2597 	 * 1. Register a set() function with base driver.
2598 	 *    Base driver will call set() function to enable/disable an
2599 	 *    interrupt in DC hardware.
2600 	 * 2. Register amdgpu_dm_irq_handler().
2601 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2602 	 *    coming from DC hardware.
2603 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2604 	 *    for acknowledging and handling.
2605 	 */
2606 
2607 	/* Use VSTARTUP interrupt */
2608 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2609 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2610 			i++) {
2611 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2612 
2613 		if (r) {
2614 			DRM_ERROR("Failed to add crtc irq id!\n");
2615 			return r;
2616 		}
2617 
2618 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2619 		int_params.irq_source =
2620 			dc_interrupt_to_irq_source(dc, i, 0);
2621 
2622 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2623 
2624 		c_irq_params->adev = adev;
2625 		c_irq_params->irq_src = int_params.irq_source;
2626 
2627 		amdgpu_dm_irq_register_interrupt(
2628 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2629 	}
2630 
2631 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2632 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2633 	 * to trigger at end of each vblank, regardless of state of the lock,
2634 	 * matching DCE behaviour.
2635 	 */
2636 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2637 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2638 	     i++) {
2639 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2640 
2641 		if (r) {
2642 			DRM_ERROR("Failed to add vupdate irq id!\n");
2643 			return r;
2644 		}
2645 
2646 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2647 		int_params.irq_source =
2648 			dc_interrupt_to_irq_source(dc, i, 0);
2649 
2650 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2651 
2652 		c_irq_params->adev = adev;
2653 		c_irq_params->irq_src = int_params.irq_source;
2654 
2655 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2656 				dm_vupdate_high_irq, c_irq_params);
2657 	}
2658 
2659 	/* Use GRPH_PFLIP interrupt */
2660 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2661 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2662 			i++) {
2663 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2664 		if (r) {
2665 			DRM_ERROR("Failed to add page flip irq id!\n");
2666 			return r;
2667 		}
2668 
2669 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2670 		int_params.irq_source =
2671 			dc_interrupt_to_irq_source(dc, i, 0);
2672 
2673 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2674 
2675 		c_irq_params->adev = adev;
2676 		c_irq_params->irq_src = int_params.irq_source;
2677 
2678 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2679 				dm_pflip_high_irq, c_irq_params);
2680 
2681 	}
2682 
2683 	/* HPD */
2684 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2685 			&adev->hpd_irq);
2686 	if (r) {
2687 		DRM_ERROR("Failed to add hpd irq id!\n");
2688 		return r;
2689 	}
2690 
2691 	register_hpd_handlers(adev);
2692 
2693 	return 0;
2694 }
2695 #endif
2696 
2697 /*
2698  * Acquires the lock for the atomic state object and returns
2699  * the new atomic state.
2700  *
2701  * This should only be called during atomic check.
2702  */
2703 static int dm_atomic_get_state(struct drm_atomic_state *state,
2704 			       struct dm_atomic_state **dm_state)
2705 {
2706 	struct drm_device *dev = state->dev;
2707 	struct amdgpu_device *adev = dev->dev_private;
2708 	struct amdgpu_display_manager *dm = &adev->dm;
2709 	struct drm_private_state *priv_state;
2710 
2711 	if (*dm_state)
2712 		return 0;
2713 
2714 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2715 	if (IS_ERR(priv_state))
2716 		return PTR_ERR(priv_state);
2717 
2718 	*dm_state = to_dm_atomic_state(priv_state);
2719 
2720 	return 0;
2721 }
2722 
2723 static struct dm_atomic_state *
2724 dm_atomic_get_new_state(struct drm_atomic_state *state)
2725 {
2726 	struct drm_device *dev = state->dev;
2727 	struct amdgpu_device *adev = dev->dev_private;
2728 	struct amdgpu_display_manager *dm = &adev->dm;
2729 	struct drm_private_obj *obj;
2730 	struct drm_private_state *new_obj_state;
2731 	int i;
2732 
2733 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2734 		if (obj->funcs == dm->atomic_obj.funcs)
2735 			return to_dm_atomic_state(new_obj_state);
2736 	}
2737 
2738 	return NULL;
2739 }
2740 
2741 static struct dm_atomic_state *
2742 dm_atomic_get_old_state(struct drm_atomic_state *state)
2743 {
2744 	struct drm_device *dev = state->dev;
2745 	struct amdgpu_device *adev = dev->dev_private;
2746 	struct amdgpu_display_manager *dm = &adev->dm;
2747 	struct drm_private_obj *obj;
2748 	struct drm_private_state *old_obj_state;
2749 	int i;
2750 
2751 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2752 		if (obj->funcs == dm->atomic_obj.funcs)
2753 			return to_dm_atomic_state(old_obj_state);
2754 	}
2755 
2756 	return NULL;
2757 }
2758 
2759 static struct drm_private_state *
2760 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2761 {
2762 	struct dm_atomic_state *old_state, *new_state;
2763 
2764 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2765 	if (!new_state)
2766 		return NULL;
2767 
2768 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2769 
2770 	old_state = to_dm_atomic_state(obj->state);
2771 
2772 	if (old_state && old_state->context)
2773 		new_state->context = dc_copy_state(old_state->context);
2774 
2775 	if (!new_state->context) {
2776 		kfree(new_state);
2777 		return NULL;
2778 	}
2779 
2780 	return &new_state->base;
2781 }
2782 
2783 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2784 				    struct drm_private_state *state)
2785 {
2786 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2787 
2788 	if (dm_state && dm_state->context)
2789 		dc_release_state(dm_state->context);
2790 
2791 	kfree(dm_state);
2792 }
2793 
2794 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2795 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2796 	.atomic_destroy_state = dm_atomic_destroy_state,
2797 };
2798 
2799 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2800 {
2801 	struct dm_atomic_state *state;
2802 	int r;
2803 
2804 	adev->mode_info.mode_config_initialized = true;
2805 
2806 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2807 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2808 
2809 	adev->ddev->mode_config.max_width = 16384;
2810 	adev->ddev->mode_config.max_height = 16384;
2811 
2812 	adev->ddev->mode_config.preferred_depth = 24;
2813 	adev->ddev->mode_config.prefer_shadow = 1;
2814 	/* indicates support for immediate flip */
2815 	adev->ddev->mode_config.async_page_flip = true;
2816 
2817 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2818 
2819 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2820 	if (!state)
2821 		return -ENOMEM;
2822 
2823 	state->context = dc_create_state(adev->dm.dc);
2824 	if (!state->context) {
2825 		kfree(state);
2826 		return -ENOMEM;
2827 	}
2828 
2829 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2830 
2831 	drm_atomic_private_obj_init(adev->ddev,
2832 				    &adev->dm.atomic_obj,
2833 				    &state->base,
2834 				    &dm_atomic_state_funcs);
2835 
2836 	r = amdgpu_display_modeset_create_props(adev);
2837 	if (r)
2838 		return r;
2839 
2840 	r = amdgpu_dm_audio_init(adev);
2841 	if (r)
2842 		return r;
2843 
2844 	return 0;
2845 }
2846 
2847 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2848 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2849 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2850 
2851 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2852 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2853 
2854 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2855 {
2856 #if defined(CONFIG_ACPI)
2857 	struct amdgpu_dm_backlight_caps caps;
2858 
2859 	if (dm->backlight_caps.caps_valid)
2860 		return;
2861 
2862 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2863 	if (caps.caps_valid) {
2864 		dm->backlight_caps.caps_valid = true;
2865 		if (caps.aux_support)
2866 			return;
2867 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2868 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2869 	} else {
2870 		dm->backlight_caps.min_input_signal =
2871 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2872 		dm->backlight_caps.max_input_signal =
2873 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2874 	}
2875 #else
2876 	if (dm->backlight_caps.aux_support)
2877 		return;
2878 
2879 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2880 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2881 #endif
2882 }
2883 
2884 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2885 {
2886 	bool rc;
2887 
2888 	if (!link)
2889 		return 1;
2890 
2891 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2892 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2893 
2894 	return rc ? 0 : 1;
2895 }
2896 
2897 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2898 			      const uint32_t user_brightness)
2899 {
2900 	u32 min, max, conversion_pace;
2901 	u32 brightness = user_brightness;
2902 
2903 	if (!caps)
2904 		goto out;
2905 
2906 	if (!caps->aux_support) {
2907 		max = caps->max_input_signal;
2908 		min = caps->min_input_signal;
2909 		/*
2910 		 * The brightness input is in the range 0-255
2911 		 * It needs to be rescaled to be between the
2912 		 * requested min and max input signal
2913 		 * It also needs to be scaled up by 0x101 to
2914 		 * match the DC interface which has a range of
2915 		 * 0 to 0xffff
2916 		 */
2917 		conversion_pace = 0x101;
2918 		brightness =
2919 			user_brightness
2920 			* conversion_pace
2921 			* (max - min)
2922 			/ AMDGPU_MAX_BL_LEVEL
2923 			+ min * conversion_pace;
2924 	} else {
2925 		/* TODO
2926 		 * We are doing a linear interpolation here, which is OK but
2927 		 * does not provide the optimal result. We probably want
2928 		 * something close to the Perceptual Quantizer (PQ) curve.
2929 		 */
2930 		max = caps->aux_max_input_signal;
2931 		min = caps->aux_min_input_signal;
2932 
2933 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2934 			       + user_brightness * max;
2935 		// Multiple the value by 1000 since we use millinits
2936 		brightness *= 1000;
2937 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2938 	}
2939 
2940 out:
2941 	return brightness;
2942 }
2943 
2944 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2945 {
2946 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2947 	struct amdgpu_dm_backlight_caps caps;
2948 	struct dc_link *link = NULL;
2949 	u32 brightness;
2950 	bool rc;
2951 
2952 	amdgpu_dm_update_backlight_caps(dm);
2953 	caps = dm->backlight_caps;
2954 
2955 	link = (struct dc_link *)dm->backlight_link;
2956 
2957 	brightness = convert_brightness(&caps, bd->props.brightness);
2958 	// Change brightness based on AUX property
2959 	if (caps.aux_support)
2960 		return set_backlight_via_aux(link, brightness);
2961 
2962 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2963 
2964 	return rc ? 0 : 1;
2965 }
2966 
2967 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2968 {
2969 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2970 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2971 
2972 	if (ret == DC_ERROR_UNEXPECTED)
2973 		return bd->props.brightness;
2974 	return ret;
2975 }
2976 
2977 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2978 	.options = BL_CORE_SUSPENDRESUME,
2979 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2980 	.update_status	= amdgpu_dm_backlight_update_status,
2981 };
2982 
2983 static void
2984 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2985 {
2986 	char bl_name[16];
2987 	struct backlight_properties props = { 0 };
2988 
2989 	amdgpu_dm_update_backlight_caps(dm);
2990 
2991 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2992 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2993 	props.type = BACKLIGHT_RAW;
2994 
2995 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2996 			dm->adev->ddev->primary->index);
2997 
2998 	dm->backlight_dev = backlight_device_register(bl_name,
2999 			dm->adev->ddev->dev,
3000 			dm,
3001 			&amdgpu_dm_backlight_ops,
3002 			&props);
3003 
3004 	if (IS_ERR(dm->backlight_dev))
3005 		DRM_ERROR("DM: Backlight registration failed!\n");
3006 	else
3007 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3008 }
3009 
3010 #endif
3011 
3012 static int initialize_plane(struct amdgpu_display_manager *dm,
3013 			    struct amdgpu_mode_info *mode_info, int plane_id,
3014 			    enum drm_plane_type plane_type,
3015 			    const struct dc_plane_cap *plane_cap)
3016 {
3017 	struct drm_plane *plane;
3018 	unsigned long possible_crtcs;
3019 	int ret = 0;
3020 
3021 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3022 	if (!plane) {
3023 		DRM_ERROR("KMS: Failed to allocate plane\n");
3024 		return -ENOMEM;
3025 	}
3026 	plane->type = plane_type;
3027 
3028 	/*
3029 	 * HACK: IGT tests expect that the primary plane for a CRTC
3030 	 * can only have one possible CRTC. Only expose support for
3031 	 * any CRTC if they're not going to be used as a primary plane
3032 	 * for a CRTC - like overlay or underlay planes.
3033 	 */
3034 	possible_crtcs = 1 << plane_id;
3035 	if (plane_id >= dm->dc->caps.max_streams)
3036 		possible_crtcs = 0xff;
3037 
3038 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3039 
3040 	if (ret) {
3041 		DRM_ERROR("KMS: Failed to initialize plane\n");
3042 		kfree(plane);
3043 		return ret;
3044 	}
3045 
3046 	if (mode_info)
3047 		mode_info->planes[plane_id] = plane;
3048 
3049 	return ret;
3050 }
3051 
3052 
3053 static void register_backlight_device(struct amdgpu_display_manager *dm,
3054 				      struct dc_link *link)
3055 {
3056 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3057 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3058 
3059 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3060 	    link->type != dc_connection_none) {
3061 		/*
3062 		 * Event if registration failed, we should continue with
3063 		 * DM initialization because not having a backlight control
3064 		 * is better then a black screen.
3065 		 */
3066 		amdgpu_dm_register_backlight_device(dm);
3067 
3068 		if (dm->backlight_dev)
3069 			dm->backlight_link = link;
3070 	}
3071 #endif
3072 }
3073 
3074 
3075 /*
3076  * In this architecture, the association
3077  * connector -> encoder -> crtc
3078  * id not really requried. The crtc and connector will hold the
3079  * display_index as an abstraction to use with DAL component
3080  *
3081  * Returns 0 on success
3082  */
3083 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3084 {
3085 	struct amdgpu_display_manager *dm = &adev->dm;
3086 	int32_t i;
3087 	struct amdgpu_dm_connector *aconnector = NULL;
3088 	struct amdgpu_encoder *aencoder = NULL;
3089 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3090 	uint32_t link_cnt;
3091 	int32_t primary_planes;
3092 	enum dc_connection_type new_connection_type = dc_connection_none;
3093 	const struct dc_plane_cap *plane;
3094 
3095 	link_cnt = dm->dc->caps.max_links;
3096 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3097 		DRM_ERROR("DM: Failed to initialize mode config\n");
3098 		return -EINVAL;
3099 	}
3100 
3101 	/* There is one primary plane per CRTC */
3102 	primary_planes = dm->dc->caps.max_streams;
3103 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3104 
3105 	/*
3106 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3107 	 * Order is reversed to match iteration order in atomic check.
3108 	 */
3109 	for (i = (primary_planes - 1); i >= 0; i--) {
3110 		plane = &dm->dc->caps.planes[i];
3111 
3112 		if (initialize_plane(dm, mode_info, i,
3113 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3114 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3115 			goto fail;
3116 		}
3117 	}
3118 
3119 	/*
3120 	 * Initialize overlay planes, index starting after primary planes.
3121 	 * These planes have a higher DRM index than the primary planes since
3122 	 * they should be considered as having a higher z-order.
3123 	 * Order is reversed to match iteration order in atomic check.
3124 	 *
3125 	 * Only support DCN for now, and only expose one so we don't encourage
3126 	 * userspace to use up all the pipes.
3127 	 */
3128 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3129 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3130 
3131 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3132 			continue;
3133 
3134 		if (!plane->blends_with_above || !plane->blends_with_below)
3135 			continue;
3136 
3137 		if (!plane->pixel_format_support.argb8888)
3138 			continue;
3139 
3140 		if (initialize_plane(dm, NULL, primary_planes + i,
3141 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3142 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3143 			goto fail;
3144 		}
3145 
3146 		/* Only create one overlay plane. */
3147 		break;
3148 	}
3149 
3150 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3151 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3152 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3153 			goto fail;
3154 		}
3155 
3156 	dm->display_indexes_num = dm->dc->caps.max_streams;
3157 
3158 	/* loops over all connectors on the board */
3159 	for (i = 0; i < link_cnt; i++) {
3160 		struct dc_link *link = NULL;
3161 
3162 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3163 			DRM_ERROR(
3164 				"KMS: Cannot support more than %d display indexes\n",
3165 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3166 			continue;
3167 		}
3168 
3169 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3170 		if (!aconnector)
3171 			goto fail;
3172 
3173 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3174 		if (!aencoder)
3175 			goto fail;
3176 
3177 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3178 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3179 			goto fail;
3180 		}
3181 
3182 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3183 			DRM_ERROR("KMS: Failed to initialize connector\n");
3184 			goto fail;
3185 		}
3186 
3187 		link = dc_get_link_at_index(dm->dc, i);
3188 
3189 		if (!dc_link_detect_sink(link, &new_connection_type))
3190 			DRM_ERROR("KMS: Failed to detect connector\n");
3191 
3192 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3193 			emulated_link_detect(link);
3194 			amdgpu_dm_update_connector_after_detect(aconnector);
3195 
3196 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3197 			amdgpu_dm_update_connector_after_detect(aconnector);
3198 			register_backlight_device(dm, link);
3199 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3200 				amdgpu_dm_set_psr_caps(link);
3201 		}
3202 
3203 
3204 	}
3205 
3206 	/* Software is initialized. Now we can register interrupt handlers. */
3207 	switch (adev->asic_type) {
3208 	case CHIP_BONAIRE:
3209 	case CHIP_HAWAII:
3210 	case CHIP_KAVERI:
3211 	case CHIP_KABINI:
3212 	case CHIP_MULLINS:
3213 	case CHIP_TONGA:
3214 	case CHIP_FIJI:
3215 	case CHIP_CARRIZO:
3216 	case CHIP_STONEY:
3217 	case CHIP_POLARIS11:
3218 	case CHIP_POLARIS10:
3219 	case CHIP_POLARIS12:
3220 	case CHIP_VEGAM:
3221 	case CHIP_VEGA10:
3222 	case CHIP_VEGA12:
3223 	case CHIP_VEGA20:
3224 		if (dce110_register_irq_handlers(dm->adev)) {
3225 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3226 			goto fail;
3227 		}
3228 		break;
3229 #if defined(CONFIG_DRM_AMD_DC_DCN)
3230 	case CHIP_RAVEN:
3231 	case CHIP_NAVI12:
3232 	case CHIP_NAVI10:
3233 	case CHIP_NAVI14:
3234 	case CHIP_RENOIR:
3235 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3236 	case CHIP_SIENNA_CICHLID:
3237 	case CHIP_NAVY_FLOUNDER:
3238 #endif
3239 		if (dcn10_register_irq_handlers(dm->adev)) {
3240 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3241 			goto fail;
3242 		}
3243 		break;
3244 #endif
3245 	default:
3246 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3247 		goto fail;
3248 	}
3249 
3250 	/* No userspace support. */
3251 	dm->dc->debug.disable_tri_buf = true;
3252 
3253 	return 0;
3254 fail:
3255 	kfree(aencoder);
3256 	kfree(aconnector);
3257 
3258 	return -EINVAL;
3259 }
3260 
3261 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3262 {
3263 	drm_mode_config_cleanup(dm->ddev);
3264 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3265 	return;
3266 }
3267 
3268 /******************************************************************************
3269  * amdgpu_display_funcs functions
3270  *****************************************************************************/
3271 
3272 /*
3273  * dm_bandwidth_update - program display watermarks
3274  *
3275  * @adev: amdgpu_device pointer
3276  *
3277  * Calculate and program the display watermarks and line buffer allocation.
3278  */
3279 static void dm_bandwidth_update(struct amdgpu_device *adev)
3280 {
3281 	/* TODO: implement later */
3282 }
3283 
3284 static const struct amdgpu_display_funcs dm_display_funcs = {
3285 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3286 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3287 	.backlight_set_level = NULL, /* never called for DC */
3288 	.backlight_get_level = NULL, /* never called for DC */
3289 	.hpd_sense = NULL,/* called unconditionally */
3290 	.hpd_set_polarity = NULL, /* called unconditionally */
3291 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3292 	.page_flip_get_scanoutpos =
3293 		dm_crtc_get_scanoutpos,/* called unconditionally */
3294 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3295 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3296 };
3297 
3298 #if defined(CONFIG_DEBUG_KERNEL_DC)
3299 
3300 static ssize_t s3_debug_store(struct device *device,
3301 			      struct device_attribute *attr,
3302 			      const char *buf,
3303 			      size_t count)
3304 {
3305 	int ret;
3306 	int s3_state;
3307 	struct drm_device *drm_dev = dev_get_drvdata(device);
3308 	struct amdgpu_device *adev = drm_dev->dev_private;
3309 
3310 	ret = kstrtoint(buf, 0, &s3_state);
3311 
3312 	if (ret == 0) {
3313 		if (s3_state) {
3314 			dm_resume(adev);
3315 			drm_kms_helper_hotplug_event(adev->ddev);
3316 		} else
3317 			dm_suspend(adev);
3318 	}
3319 
3320 	return ret == 0 ? count : 0;
3321 }
3322 
3323 DEVICE_ATTR_WO(s3_debug);
3324 
3325 #endif
3326 
3327 static int dm_early_init(void *handle)
3328 {
3329 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3330 
3331 	switch (adev->asic_type) {
3332 	case CHIP_BONAIRE:
3333 	case CHIP_HAWAII:
3334 		adev->mode_info.num_crtc = 6;
3335 		adev->mode_info.num_hpd = 6;
3336 		adev->mode_info.num_dig = 6;
3337 		break;
3338 	case CHIP_KAVERI:
3339 		adev->mode_info.num_crtc = 4;
3340 		adev->mode_info.num_hpd = 6;
3341 		adev->mode_info.num_dig = 7;
3342 		break;
3343 	case CHIP_KABINI:
3344 	case CHIP_MULLINS:
3345 		adev->mode_info.num_crtc = 2;
3346 		adev->mode_info.num_hpd = 6;
3347 		adev->mode_info.num_dig = 6;
3348 		break;
3349 	case CHIP_FIJI:
3350 	case CHIP_TONGA:
3351 		adev->mode_info.num_crtc = 6;
3352 		adev->mode_info.num_hpd = 6;
3353 		adev->mode_info.num_dig = 7;
3354 		break;
3355 	case CHIP_CARRIZO:
3356 		adev->mode_info.num_crtc = 3;
3357 		adev->mode_info.num_hpd = 6;
3358 		adev->mode_info.num_dig = 9;
3359 		break;
3360 	case CHIP_STONEY:
3361 		adev->mode_info.num_crtc = 2;
3362 		adev->mode_info.num_hpd = 6;
3363 		adev->mode_info.num_dig = 9;
3364 		break;
3365 	case CHIP_POLARIS11:
3366 	case CHIP_POLARIS12:
3367 		adev->mode_info.num_crtc = 5;
3368 		adev->mode_info.num_hpd = 5;
3369 		adev->mode_info.num_dig = 5;
3370 		break;
3371 	case CHIP_POLARIS10:
3372 	case CHIP_VEGAM:
3373 		adev->mode_info.num_crtc = 6;
3374 		adev->mode_info.num_hpd = 6;
3375 		adev->mode_info.num_dig = 6;
3376 		break;
3377 	case CHIP_VEGA10:
3378 	case CHIP_VEGA12:
3379 	case CHIP_VEGA20:
3380 		adev->mode_info.num_crtc = 6;
3381 		adev->mode_info.num_hpd = 6;
3382 		adev->mode_info.num_dig = 6;
3383 		break;
3384 #if defined(CONFIG_DRM_AMD_DC_DCN)
3385 	case CHIP_RAVEN:
3386 		adev->mode_info.num_crtc = 4;
3387 		adev->mode_info.num_hpd = 4;
3388 		adev->mode_info.num_dig = 4;
3389 		break;
3390 #endif
3391 	case CHIP_NAVI10:
3392 	case CHIP_NAVI12:
3393 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3394 	case CHIP_SIENNA_CICHLID:
3395 	case CHIP_NAVY_FLOUNDER:
3396 #endif
3397 		adev->mode_info.num_crtc = 6;
3398 		adev->mode_info.num_hpd = 6;
3399 		adev->mode_info.num_dig = 6;
3400 		break;
3401 	case CHIP_NAVI14:
3402 		adev->mode_info.num_crtc = 5;
3403 		adev->mode_info.num_hpd = 5;
3404 		adev->mode_info.num_dig = 5;
3405 		break;
3406 	case CHIP_RENOIR:
3407 		adev->mode_info.num_crtc = 4;
3408 		adev->mode_info.num_hpd = 4;
3409 		adev->mode_info.num_dig = 4;
3410 		break;
3411 	default:
3412 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3413 		return -EINVAL;
3414 	}
3415 
3416 	amdgpu_dm_set_irq_funcs(adev);
3417 
3418 	if (adev->mode_info.funcs == NULL)
3419 		adev->mode_info.funcs = &dm_display_funcs;
3420 
3421 	/*
3422 	 * Note: Do NOT change adev->audio_endpt_rreg and
3423 	 * adev->audio_endpt_wreg because they are initialised in
3424 	 * amdgpu_device_init()
3425 	 */
3426 #if defined(CONFIG_DEBUG_KERNEL_DC)
3427 	device_create_file(
3428 		adev->ddev->dev,
3429 		&dev_attr_s3_debug);
3430 #endif
3431 
3432 	return 0;
3433 }
3434 
3435 static bool modeset_required(struct drm_crtc_state *crtc_state,
3436 			     struct dc_stream_state *new_stream,
3437 			     struct dc_stream_state *old_stream)
3438 {
3439 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3440 		return false;
3441 
3442 	if (!crtc_state->enable)
3443 		return false;
3444 
3445 	return crtc_state->active;
3446 }
3447 
3448 static bool modereset_required(struct drm_crtc_state *crtc_state)
3449 {
3450 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3451 		return false;
3452 
3453 	return !crtc_state->enable || !crtc_state->active;
3454 }
3455 
3456 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3457 {
3458 	drm_encoder_cleanup(encoder);
3459 	kfree(encoder);
3460 }
3461 
3462 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3463 	.destroy = amdgpu_dm_encoder_destroy,
3464 };
3465 
3466 
3467 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3468 				struct dc_scaling_info *scaling_info)
3469 {
3470 	int scale_w, scale_h;
3471 
3472 	memset(scaling_info, 0, sizeof(*scaling_info));
3473 
3474 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3475 	scaling_info->src_rect.x = state->src_x >> 16;
3476 	scaling_info->src_rect.y = state->src_y >> 16;
3477 
3478 	scaling_info->src_rect.width = state->src_w >> 16;
3479 	if (scaling_info->src_rect.width == 0)
3480 		return -EINVAL;
3481 
3482 	scaling_info->src_rect.height = state->src_h >> 16;
3483 	if (scaling_info->src_rect.height == 0)
3484 		return -EINVAL;
3485 
3486 	scaling_info->dst_rect.x = state->crtc_x;
3487 	scaling_info->dst_rect.y = state->crtc_y;
3488 
3489 	if (state->crtc_w == 0)
3490 		return -EINVAL;
3491 
3492 	scaling_info->dst_rect.width = state->crtc_w;
3493 
3494 	if (state->crtc_h == 0)
3495 		return -EINVAL;
3496 
3497 	scaling_info->dst_rect.height = state->crtc_h;
3498 
3499 	/* DRM doesn't specify clipping on destination output. */
3500 	scaling_info->clip_rect = scaling_info->dst_rect;
3501 
3502 	/* TODO: Validate scaling per-format with DC plane caps */
3503 	scale_w = scaling_info->dst_rect.width * 1000 /
3504 		  scaling_info->src_rect.width;
3505 
3506 	if (scale_w < 250 || scale_w > 16000)
3507 		return -EINVAL;
3508 
3509 	scale_h = scaling_info->dst_rect.height * 1000 /
3510 		  scaling_info->src_rect.height;
3511 
3512 	if (scale_h < 250 || scale_h > 16000)
3513 		return -EINVAL;
3514 
3515 	/*
3516 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3517 	 * assume reasonable defaults based on the format.
3518 	 */
3519 
3520 	return 0;
3521 }
3522 
3523 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3524 		       uint64_t *tiling_flags, bool *tmz_surface)
3525 {
3526 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3527 	int r = amdgpu_bo_reserve(rbo, false);
3528 
3529 	if (unlikely(r)) {
3530 		/* Don't show error message when returning -ERESTARTSYS */
3531 		if (r != -ERESTARTSYS)
3532 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3533 		return r;
3534 	}
3535 
3536 	if (tiling_flags)
3537 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3538 
3539 	if (tmz_surface)
3540 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3541 
3542 	amdgpu_bo_unreserve(rbo);
3543 
3544 	return r;
3545 }
3546 
3547 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3548 {
3549 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3550 
3551 	return offset ? (address + offset * 256) : 0;
3552 }
3553 
3554 static int
3555 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3556 			  const struct amdgpu_framebuffer *afb,
3557 			  const enum surface_pixel_format format,
3558 			  const enum dc_rotation_angle rotation,
3559 			  const struct plane_size *plane_size,
3560 			  const union dc_tiling_info *tiling_info,
3561 			  const uint64_t info,
3562 			  struct dc_plane_dcc_param *dcc,
3563 			  struct dc_plane_address *address,
3564 			  bool force_disable_dcc)
3565 {
3566 	struct dc *dc = adev->dm.dc;
3567 	struct dc_dcc_surface_param input;
3568 	struct dc_surface_dcc_cap output;
3569 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3570 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3571 	uint64_t dcc_address;
3572 
3573 	memset(&input, 0, sizeof(input));
3574 	memset(&output, 0, sizeof(output));
3575 
3576 	if (force_disable_dcc)
3577 		return 0;
3578 
3579 	if (!offset)
3580 		return 0;
3581 
3582 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3583 		return 0;
3584 
3585 	if (!dc->cap_funcs.get_dcc_compression_cap)
3586 		return -EINVAL;
3587 
3588 	input.format = format;
3589 	input.surface_size.width = plane_size->surface_size.width;
3590 	input.surface_size.height = plane_size->surface_size.height;
3591 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3592 
3593 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3594 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3595 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3596 		input.scan = SCAN_DIRECTION_VERTICAL;
3597 
3598 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3599 		return -EINVAL;
3600 
3601 	if (!output.capable)
3602 		return -EINVAL;
3603 
3604 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3605 		return -EINVAL;
3606 
3607 	dcc->enable = 1;
3608 	dcc->meta_pitch =
3609 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3610 	dcc->independent_64b_blks = i64b;
3611 
3612 	dcc_address = get_dcc_address(afb->address, info);
3613 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3614 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3615 
3616 	return 0;
3617 }
3618 
3619 static int
3620 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3621 			     const struct amdgpu_framebuffer *afb,
3622 			     const enum surface_pixel_format format,
3623 			     const enum dc_rotation_angle rotation,
3624 			     const uint64_t tiling_flags,
3625 			     union dc_tiling_info *tiling_info,
3626 			     struct plane_size *plane_size,
3627 			     struct dc_plane_dcc_param *dcc,
3628 			     struct dc_plane_address *address,
3629 			     bool tmz_surface,
3630 			     bool force_disable_dcc)
3631 {
3632 	const struct drm_framebuffer *fb = &afb->base;
3633 	int ret;
3634 
3635 	memset(tiling_info, 0, sizeof(*tiling_info));
3636 	memset(plane_size, 0, sizeof(*plane_size));
3637 	memset(dcc, 0, sizeof(*dcc));
3638 	memset(address, 0, sizeof(*address));
3639 
3640 	address->tmz_surface = tmz_surface;
3641 
3642 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3643 		plane_size->surface_size.x = 0;
3644 		plane_size->surface_size.y = 0;
3645 		plane_size->surface_size.width = fb->width;
3646 		plane_size->surface_size.height = fb->height;
3647 		plane_size->surface_pitch =
3648 			fb->pitches[0] / fb->format->cpp[0];
3649 
3650 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3651 		address->grph.addr.low_part = lower_32_bits(afb->address);
3652 		address->grph.addr.high_part = upper_32_bits(afb->address);
3653 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3654 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3655 
3656 		plane_size->surface_size.x = 0;
3657 		plane_size->surface_size.y = 0;
3658 		plane_size->surface_size.width = fb->width;
3659 		plane_size->surface_size.height = fb->height;
3660 		plane_size->surface_pitch =
3661 			fb->pitches[0] / fb->format->cpp[0];
3662 
3663 		plane_size->chroma_size.x = 0;
3664 		plane_size->chroma_size.y = 0;
3665 		/* TODO: set these based on surface format */
3666 		plane_size->chroma_size.width = fb->width / 2;
3667 		plane_size->chroma_size.height = fb->height / 2;
3668 
3669 		plane_size->chroma_pitch =
3670 			fb->pitches[1] / fb->format->cpp[1];
3671 
3672 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3673 		address->video_progressive.luma_addr.low_part =
3674 			lower_32_bits(afb->address);
3675 		address->video_progressive.luma_addr.high_part =
3676 			upper_32_bits(afb->address);
3677 		address->video_progressive.chroma_addr.low_part =
3678 			lower_32_bits(chroma_addr);
3679 		address->video_progressive.chroma_addr.high_part =
3680 			upper_32_bits(chroma_addr);
3681 	}
3682 
3683 	/* Fill GFX8 params */
3684 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3685 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3686 
3687 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3688 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3689 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3690 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3691 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3692 
3693 		/* XXX fix me for VI */
3694 		tiling_info->gfx8.num_banks = num_banks;
3695 		tiling_info->gfx8.array_mode =
3696 				DC_ARRAY_2D_TILED_THIN1;
3697 		tiling_info->gfx8.tile_split = tile_split;
3698 		tiling_info->gfx8.bank_width = bankw;
3699 		tiling_info->gfx8.bank_height = bankh;
3700 		tiling_info->gfx8.tile_aspect = mtaspect;
3701 		tiling_info->gfx8.tile_mode =
3702 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3703 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3704 			== DC_ARRAY_1D_TILED_THIN1) {
3705 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3706 	}
3707 
3708 	tiling_info->gfx8.pipe_config =
3709 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3710 
3711 	if (adev->asic_type == CHIP_VEGA10 ||
3712 	    adev->asic_type == CHIP_VEGA12 ||
3713 	    adev->asic_type == CHIP_VEGA20 ||
3714 	    adev->asic_type == CHIP_NAVI10 ||
3715 	    adev->asic_type == CHIP_NAVI14 ||
3716 	    adev->asic_type == CHIP_NAVI12 ||
3717 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3718 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3719 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3720 #endif
3721 	    adev->asic_type == CHIP_RENOIR ||
3722 	    adev->asic_type == CHIP_RAVEN) {
3723 		/* Fill GFX9 params */
3724 		tiling_info->gfx9.num_pipes =
3725 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3726 		tiling_info->gfx9.num_banks =
3727 			adev->gfx.config.gb_addr_config_fields.num_banks;
3728 		tiling_info->gfx9.pipe_interleave =
3729 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3730 		tiling_info->gfx9.num_shader_engines =
3731 			adev->gfx.config.gb_addr_config_fields.num_se;
3732 		tiling_info->gfx9.max_compressed_frags =
3733 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3734 		tiling_info->gfx9.num_rb_per_se =
3735 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3736 		tiling_info->gfx9.swizzle =
3737 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3738 		tiling_info->gfx9.shaderEnable = 1;
3739 
3740 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3741 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3742 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3743 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3744 #endif
3745 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3746 						plane_size, tiling_info,
3747 						tiling_flags, dcc, address,
3748 						force_disable_dcc);
3749 		if (ret)
3750 			return ret;
3751 	}
3752 
3753 	return 0;
3754 }
3755 
3756 static void
3757 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3758 			       bool *per_pixel_alpha, bool *global_alpha,
3759 			       int *global_alpha_value)
3760 {
3761 	*per_pixel_alpha = false;
3762 	*global_alpha = false;
3763 	*global_alpha_value = 0xff;
3764 
3765 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3766 		return;
3767 
3768 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3769 		static const uint32_t alpha_formats[] = {
3770 			DRM_FORMAT_ARGB8888,
3771 			DRM_FORMAT_RGBA8888,
3772 			DRM_FORMAT_ABGR8888,
3773 		};
3774 		uint32_t format = plane_state->fb->format->format;
3775 		unsigned int i;
3776 
3777 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3778 			if (format == alpha_formats[i]) {
3779 				*per_pixel_alpha = true;
3780 				break;
3781 			}
3782 		}
3783 	}
3784 
3785 	if (plane_state->alpha < 0xffff) {
3786 		*global_alpha = true;
3787 		*global_alpha_value = plane_state->alpha >> 8;
3788 	}
3789 }
3790 
3791 static int
3792 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3793 			    const enum surface_pixel_format format,
3794 			    enum dc_color_space *color_space)
3795 {
3796 	bool full_range;
3797 
3798 	*color_space = COLOR_SPACE_SRGB;
3799 
3800 	/* DRM color properties only affect non-RGB formats. */
3801 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3802 		return 0;
3803 
3804 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3805 
3806 	switch (plane_state->color_encoding) {
3807 	case DRM_COLOR_YCBCR_BT601:
3808 		if (full_range)
3809 			*color_space = COLOR_SPACE_YCBCR601;
3810 		else
3811 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3812 		break;
3813 
3814 	case DRM_COLOR_YCBCR_BT709:
3815 		if (full_range)
3816 			*color_space = COLOR_SPACE_YCBCR709;
3817 		else
3818 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3819 		break;
3820 
3821 	case DRM_COLOR_YCBCR_BT2020:
3822 		if (full_range)
3823 			*color_space = COLOR_SPACE_2020_YCBCR;
3824 		else
3825 			return -EINVAL;
3826 		break;
3827 
3828 	default:
3829 		return -EINVAL;
3830 	}
3831 
3832 	return 0;
3833 }
3834 
3835 static int
3836 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3837 			    const struct drm_plane_state *plane_state,
3838 			    const uint64_t tiling_flags,
3839 			    struct dc_plane_info *plane_info,
3840 			    struct dc_plane_address *address,
3841 			    bool tmz_surface,
3842 			    bool force_disable_dcc)
3843 {
3844 	const struct drm_framebuffer *fb = plane_state->fb;
3845 	const struct amdgpu_framebuffer *afb =
3846 		to_amdgpu_framebuffer(plane_state->fb);
3847 	struct drm_format_name_buf format_name;
3848 	int ret;
3849 
3850 	memset(plane_info, 0, sizeof(*plane_info));
3851 
3852 	switch (fb->format->format) {
3853 	case DRM_FORMAT_C8:
3854 		plane_info->format =
3855 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3856 		break;
3857 	case DRM_FORMAT_RGB565:
3858 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3859 		break;
3860 	case DRM_FORMAT_XRGB8888:
3861 	case DRM_FORMAT_ARGB8888:
3862 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3863 		break;
3864 	case DRM_FORMAT_XRGB2101010:
3865 	case DRM_FORMAT_ARGB2101010:
3866 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3867 		break;
3868 	case DRM_FORMAT_XBGR2101010:
3869 	case DRM_FORMAT_ABGR2101010:
3870 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3871 		break;
3872 	case DRM_FORMAT_XBGR8888:
3873 	case DRM_FORMAT_ABGR8888:
3874 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3875 		break;
3876 	case DRM_FORMAT_NV21:
3877 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3878 		break;
3879 	case DRM_FORMAT_NV12:
3880 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3881 		break;
3882 	case DRM_FORMAT_P010:
3883 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3884 		break;
3885 	case DRM_FORMAT_XRGB16161616F:
3886 	case DRM_FORMAT_ARGB16161616F:
3887 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3888 		break;
3889 	case DRM_FORMAT_XBGR16161616F:
3890 	case DRM_FORMAT_ABGR16161616F:
3891 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3892 		break;
3893 	default:
3894 		DRM_ERROR(
3895 			"Unsupported screen format %s\n",
3896 			drm_get_format_name(fb->format->format, &format_name));
3897 		return -EINVAL;
3898 	}
3899 
3900 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3901 	case DRM_MODE_ROTATE_0:
3902 		plane_info->rotation = ROTATION_ANGLE_0;
3903 		break;
3904 	case DRM_MODE_ROTATE_90:
3905 		plane_info->rotation = ROTATION_ANGLE_90;
3906 		break;
3907 	case DRM_MODE_ROTATE_180:
3908 		plane_info->rotation = ROTATION_ANGLE_180;
3909 		break;
3910 	case DRM_MODE_ROTATE_270:
3911 		plane_info->rotation = ROTATION_ANGLE_270;
3912 		break;
3913 	default:
3914 		plane_info->rotation = ROTATION_ANGLE_0;
3915 		break;
3916 	}
3917 
3918 	plane_info->visible = true;
3919 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3920 
3921 	plane_info->layer_index = 0;
3922 
3923 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3924 					  &plane_info->color_space);
3925 	if (ret)
3926 		return ret;
3927 
3928 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3929 					   plane_info->rotation, tiling_flags,
3930 					   &plane_info->tiling_info,
3931 					   &plane_info->plane_size,
3932 					   &plane_info->dcc, address, tmz_surface,
3933 					   force_disable_dcc);
3934 	if (ret)
3935 		return ret;
3936 
3937 	fill_blending_from_plane_state(
3938 		plane_state, &plane_info->per_pixel_alpha,
3939 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3940 
3941 	return 0;
3942 }
3943 
3944 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3945 				    struct dc_plane_state *dc_plane_state,
3946 				    struct drm_plane_state *plane_state,
3947 				    struct drm_crtc_state *crtc_state)
3948 {
3949 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3950 	const struct amdgpu_framebuffer *amdgpu_fb =
3951 		to_amdgpu_framebuffer(plane_state->fb);
3952 	struct dc_scaling_info scaling_info;
3953 	struct dc_plane_info plane_info;
3954 	uint64_t tiling_flags;
3955 	int ret;
3956 	bool tmz_surface = false;
3957 	bool force_disable_dcc = false;
3958 
3959 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3960 	if (ret)
3961 		return ret;
3962 
3963 	dc_plane_state->src_rect = scaling_info.src_rect;
3964 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3965 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3966 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3967 
3968 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3969 	if (ret)
3970 		return ret;
3971 
3972 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3973 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3974 					  &plane_info,
3975 					  &dc_plane_state->address,
3976 					  tmz_surface,
3977 					  force_disable_dcc);
3978 	if (ret)
3979 		return ret;
3980 
3981 	dc_plane_state->format = plane_info.format;
3982 	dc_plane_state->color_space = plane_info.color_space;
3983 	dc_plane_state->format = plane_info.format;
3984 	dc_plane_state->plane_size = plane_info.plane_size;
3985 	dc_plane_state->rotation = plane_info.rotation;
3986 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3987 	dc_plane_state->stereo_format = plane_info.stereo_format;
3988 	dc_plane_state->tiling_info = plane_info.tiling_info;
3989 	dc_plane_state->visible = plane_info.visible;
3990 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3991 	dc_plane_state->global_alpha = plane_info.global_alpha;
3992 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3993 	dc_plane_state->dcc = plane_info.dcc;
3994 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3995 
3996 	/*
3997 	 * Always set input transfer function, since plane state is refreshed
3998 	 * every time.
3999 	 */
4000 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4001 	if (ret)
4002 		return ret;
4003 
4004 	return 0;
4005 }
4006 
4007 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4008 					   const struct dm_connector_state *dm_state,
4009 					   struct dc_stream_state *stream)
4010 {
4011 	enum amdgpu_rmx_type rmx_type;
4012 
4013 	struct rect src = { 0 }; /* viewport in composition space*/
4014 	struct rect dst = { 0 }; /* stream addressable area */
4015 
4016 	/* no mode. nothing to be done */
4017 	if (!mode)
4018 		return;
4019 
4020 	/* Full screen scaling by default */
4021 	src.width = mode->hdisplay;
4022 	src.height = mode->vdisplay;
4023 	dst.width = stream->timing.h_addressable;
4024 	dst.height = stream->timing.v_addressable;
4025 
4026 	if (dm_state) {
4027 		rmx_type = dm_state->scaling;
4028 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4029 			if (src.width * dst.height <
4030 					src.height * dst.width) {
4031 				/* height needs less upscaling/more downscaling */
4032 				dst.width = src.width *
4033 						dst.height / src.height;
4034 			} else {
4035 				/* width needs less upscaling/more downscaling */
4036 				dst.height = src.height *
4037 						dst.width / src.width;
4038 			}
4039 		} else if (rmx_type == RMX_CENTER) {
4040 			dst = src;
4041 		}
4042 
4043 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4044 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4045 
4046 		if (dm_state->underscan_enable) {
4047 			dst.x += dm_state->underscan_hborder / 2;
4048 			dst.y += dm_state->underscan_vborder / 2;
4049 			dst.width -= dm_state->underscan_hborder;
4050 			dst.height -= dm_state->underscan_vborder;
4051 		}
4052 	}
4053 
4054 	stream->src = src;
4055 	stream->dst = dst;
4056 
4057 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4058 			dst.x, dst.y, dst.width, dst.height);
4059 
4060 }
4061 
4062 static enum dc_color_depth
4063 convert_color_depth_from_display_info(const struct drm_connector *connector,
4064 				      bool is_y420, int requested_bpc)
4065 {
4066 	uint8_t bpc;
4067 
4068 	if (is_y420) {
4069 		bpc = 8;
4070 
4071 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4072 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4073 			bpc = 16;
4074 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4075 			bpc = 12;
4076 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4077 			bpc = 10;
4078 	} else {
4079 		bpc = (uint8_t)connector->display_info.bpc;
4080 		/* Assume 8 bpc by default if no bpc is specified. */
4081 		bpc = bpc ? bpc : 8;
4082 	}
4083 
4084 	if (requested_bpc > 0) {
4085 		/*
4086 		 * Cap display bpc based on the user requested value.
4087 		 *
4088 		 * The value for state->max_bpc may not correctly updated
4089 		 * depending on when the connector gets added to the state
4090 		 * or if this was called outside of atomic check, so it
4091 		 * can't be used directly.
4092 		 */
4093 		bpc = min_t(u8, bpc, requested_bpc);
4094 
4095 		/* Round down to the nearest even number. */
4096 		bpc = bpc - (bpc & 1);
4097 	}
4098 
4099 	switch (bpc) {
4100 	case 0:
4101 		/*
4102 		 * Temporary Work around, DRM doesn't parse color depth for
4103 		 * EDID revision before 1.4
4104 		 * TODO: Fix edid parsing
4105 		 */
4106 		return COLOR_DEPTH_888;
4107 	case 6:
4108 		return COLOR_DEPTH_666;
4109 	case 8:
4110 		return COLOR_DEPTH_888;
4111 	case 10:
4112 		return COLOR_DEPTH_101010;
4113 	case 12:
4114 		return COLOR_DEPTH_121212;
4115 	case 14:
4116 		return COLOR_DEPTH_141414;
4117 	case 16:
4118 		return COLOR_DEPTH_161616;
4119 	default:
4120 		return COLOR_DEPTH_UNDEFINED;
4121 	}
4122 }
4123 
4124 static enum dc_aspect_ratio
4125 get_aspect_ratio(const struct drm_display_mode *mode_in)
4126 {
4127 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4128 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4129 }
4130 
4131 static enum dc_color_space
4132 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4133 {
4134 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4135 
4136 	switch (dc_crtc_timing->pixel_encoding)	{
4137 	case PIXEL_ENCODING_YCBCR422:
4138 	case PIXEL_ENCODING_YCBCR444:
4139 	case PIXEL_ENCODING_YCBCR420:
4140 	{
4141 		/*
4142 		 * 27030khz is the separation point between HDTV and SDTV
4143 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4144 		 * respectively
4145 		 */
4146 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4147 			if (dc_crtc_timing->flags.Y_ONLY)
4148 				color_space =
4149 					COLOR_SPACE_YCBCR709_LIMITED;
4150 			else
4151 				color_space = COLOR_SPACE_YCBCR709;
4152 		} else {
4153 			if (dc_crtc_timing->flags.Y_ONLY)
4154 				color_space =
4155 					COLOR_SPACE_YCBCR601_LIMITED;
4156 			else
4157 				color_space = COLOR_SPACE_YCBCR601;
4158 		}
4159 
4160 	}
4161 	break;
4162 	case PIXEL_ENCODING_RGB:
4163 		color_space = COLOR_SPACE_SRGB;
4164 		break;
4165 
4166 	default:
4167 		WARN_ON(1);
4168 		break;
4169 	}
4170 
4171 	return color_space;
4172 }
4173 
4174 static bool adjust_colour_depth_from_display_info(
4175 	struct dc_crtc_timing *timing_out,
4176 	const struct drm_display_info *info)
4177 {
4178 	enum dc_color_depth depth = timing_out->display_color_depth;
4179 	int normalized_clk;
4180 	do {
4181 		normalized_clk = timing_out->pix_clk_100hz / 10;
4182 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4183 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4184 			normalized_clk /= 2;
4185 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4186 		switch (depth) {
4187 		case COLOR_DEPTH_888:
4188 			break;
4189 		case COLOR_DEPTH_101010:
4190 			normalized_clk = (normalized_clk * 30) / 24;
4191 			break;
4192 		case COLOR_DEPTH_121212:
4193 			normalized_clk = (normalized_clk * 36) / 24;
4194 			break;
4195 		case COLOR_DEPTH_161616:
4196 			normalized_clk = (normalized_clk * 48) / 24;
4197 			break;
4198 		default:
4199 			/* The above depths are the only ones valid for HDMI. */
4200 			return false;
4201 		}
4202 		if (normalized_clk <= info->max_tmds_clock) {
4203 			timing_out->display_color_depth = depth;
4204 			return true;
4205 		}
4206 	} while (--depth > COLOR_DEPTH_666);
4207 	return false;
4208 }
4209 
4210 static void fill_stream_properties_from_drm_display_mode(
4211 	struct dc_stream_state *stream,
4212 	const struct drm_display_mode *mode_in,
4213 	const struct drm_connector *connector,
4214 	const struct drm_connector_state *connector_state,
4215 	const struct dc_stream_state *old_stream,
4216 	int requested_bpc)
4217 {
4218 	struct dc_crtc_timing *timing_out = &stream->timing;
4219 	const struct drm_display_info *info = &connector->display_info;
4220 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4221 	struct hdmi_vendor_infoframe hv_frame;
4222 	struct hdmi_avi_infoframe avi_frame;
4223 
4224 	memset(&hv_frame, 0, sizeof(hv_frame));
4225 	memset(&avi_frame, 0, sizeof(avi_frame));
4226 
4227 	timing_out->h_border_left = 0;
4228 	timing_out->h_border_right = 0;
4229 	timing_out->v_border_top = 0;
4230 	timing_out->v_border_bottom = 0;
4231 	/* TODO: un-hardcode */
4232 	if (drm_mode_is_420_only(info, mode_in)
4233 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4234 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4235 	else if (drm_mode_is_420_also(info, mode_in)
4236 			&& aconnector->force_yuv420_output)
4237 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4238 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4239 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4240 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4241 	else
4242 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4243 
4244 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4245 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4246 		connector,
4247 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4248 		requested_bpc);
4249 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4250 	timing_out->hdmi_vic = 0;
4251 
4252 	if(old_stream) {
4253 		timing_out->vic = old_stream->timing.vic;
4254 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4255 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4256 	} else {
4257 		timing_out->vic = drm_match_cea_mode(mode_in);
4258 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4259 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4260 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4261 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4262 	}
4263 
4264 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4265 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4266 		timing_out->vic = avi_frame.video_code;
4267 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4268 		timing_out->hdmi_vic = hv_frame.vic;
4269 	}
4270 
4271 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4272 	timing_out->h_total = mode_in->crtc_htotal;
4273 	timing_out->h_sync_width =
4274 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4275 	timing_out->h_front_porch =
4276 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4277 	timing_out->v_total = mode_in->crtc_vtotal;
4278 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4279 	timing_out->v_front_porch =
4280 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4281 	timing_out->v_sync_width =
4282 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4283 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4284 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4285 
4286 	stream->output_color_space = get_output_color_space(timing_out);
4287 
4288 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4289 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4290 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4291 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4292 		    drm_mode_is_420_also(info, mode_in) &&
4293 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4294 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4295 			adjust_colour_depth_from_display_info(timing_out, info);
4296 		}
4297 	}
4298 }
4299 
4300 static void fill_audio_info(struct audio_info *audio_info,
4301 			    const struct drm_connector *drm_connector,
4302 			    const struct dc_sink *dc_sink)
4303 {
4304 	int i = 0;
4305 	int cea_revision = 0;
4306 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4307 
4308 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4309 	audio_info->product_id = edid_caps->product_id;
4310 
4311 	cea_revision = drm_connector->display_info.cea_rev;
4312 
4313 	strscpy(audio_info->display_name,
4314 		edid_caps->display_name,
4315 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4316 
4317 	if (cea_revision >= 3) {
4318 		audio_info->mode_count = edid_caps->audio_mode_count;
4319 
4320 		for (i = 0; i < audio_info->mode_count; ++i) {
4321 			audio_info->modes[i].format_code =
4322 					(enum audio_format_code)
4323 					(edid_caps->audio_modes[i].format_code);
4324 			audio_info->modes[i].channel_count =
4325 					edid_caps->audio_modes[i].channel_count;
4326 			audio_info->modes[i].sample_rates.all =
4327 					edid_caps->audio_modes[i].sample_rate;
4328 			audio_info->modes[i].sample_size =
4329 					edid_caps->audio_modes[i].sample_size;
4330 		}
4331 	}
4332 
4333 	audio_info->flags.all = edid_caps->speaker_flags;
4334 
4335 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4336 	if (drm_connector->latency_present[0]) {
4337 		audio_info->video_latency = drm_connector->video_latency[0];
4338 		audio_info->audio_latency = drm_connector->audio_latency[0];
4339 	}
4340 
4341 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4342 
4343 }
4344 
4345 static void
4346 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4347 				      struct drm_display_mode *dst_mode)
4348 {
4349 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4350 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4351 	dst_mode->crtc_clock = src_mode->crtc_clock;
4352 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4353 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4354 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4355 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4356 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4357 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4358 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4359 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4360 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4361 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4362 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4363 }
4364 
4365 static void
4366 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4367 					const struct drm_display_mode *native_mode,
4368 					bool scale_enabled)
4369 {
4370 	if (scale_enabled) {
4371 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4372 	} else if (native_mode->clock == drm_mode->clock &&
4373 			native_mode->htotal == drm_mode->htotal &&
4374 			native_mode->vtotal == drm_mode->vtotal) {
4375 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4376 	} else {
4377 		/* no scaling nor amdgpu inserted, no need to patch */
4378 	}
4379 }
4380 
4381 static struct dc_sink *
4382 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4383 {
4384 	struct dc_sink_init_data sink_init_data = { 0 };
4385 	struct dc_sink *sink = NULL;
4386 	sink_init_data.link = aconnector->dc_link;
4387 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4388 
4389 	sink = dc_sink_create(&sink_init_data);
4390 	if (!sink) {
4391 		DRM_ERROR("Failed to create sink!\n");
4392 		return NULL;
4393 	}
4394 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4395 
4396 	return sink;
4397 }
4398 
4399 static void set_multisync_trigger_params(
4400 		struct dc_stream_state *stream)
4401 {
4402 	if (stream->triggered_crtc_reset.enabled) {
4403 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4404 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4405 	}
4406 }
4407 
4408 static void set_master_stream(struct dc_stream_state *stream_set[],
4409 			      int stream_count)
4410 {
4411 	int j, highest_rfr = 0, master_stream = 0;
4412 
4413 	for (j = 0;  j < stream_count; j++) {
4414 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4415 			int refresh_rate = 0;
4416 
4417 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4418 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4419 			if (refresh_rate > highest_rfr) {
4420 				highest_rfr = refresh_rate;
4421 				master_stream = j;
4422 			}
4423 		}
4424 	}
4425 	for (j = 0;  j < stream_count; j++) {
4426 		if (stream_set[j])
4427 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4428 	}
4429 }
4430 
4431 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4432 {
4433 	int i = 0;
4434 
4435 	if (context->stream_count < 2)
4436 		return;
4437 	for (i = 0; i < context->stream_count ; i++) {
4438 		if (!context->streams[i])
4439 			continue;
4440 		/*
4441 		 * TODO: add a function to read AMD VSDB bits and set
4442 		 * crtc_sync_master.multi_sync_enabled flag
4443 		 * For now it's set to false
4444 		 */
4445 		set_multisync_trigger_params(context->streams[i]);
4446 	}
4447 	set_master_stream(context->streams, context->stream_count);
4448 }
4449 
4450 static struct dc_stream_state *
4451 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4452 		       const struct drm_display_mode *drm_mode,
4453 		       const struct dm_connector_state *dm_state,
4454 		       const struct dc_stream_state *old_stream,
4455 		       int requested_bpc)
4456 {
4457 	struct drm_display_mode *preferred_mode = NULL;
4458 	struct drm_connector *drm_connector;
4459 	const struct drm_connector_state *con_state =
4460 		dm_state ? &dm_state->base : NULL;
4461 	struct dc_stream_state *stream = NULL;
4462 	struct drm_display_mode mode = *drm_mode;
4463 	bool native_mode_found = false;
4464 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4465 	int mode_refresh;
4466 	int preferred_refresh = 0;
4467 #if defined(CONFIG_DRM_AMD_DC_DCN)
4468 	struct dsc_dec_dpcd_caps dsc_caps;
4469 #endif
4470 	uint32_t link_bandwidth_kbps;
4471 
4472 	struct dc_sink *sink = NULL;
4473 	if (aconnector == NULL) {
4474 		DRM_ERROR("aconnector is NULL!\n");
4475 		return stream;
4476 	}
4477 
4478 	drm_connector = &aconnector->base;
4479 
4480 	if (!aconnector->dc_sink) {
4481 		sink = create_fake_sink(aconnector);
4482 		if (!sink)
4483 			return stream;
4484 	} else {
4485 		sink = aconnector->dc_sink;
4486 		dc_sink_retain(sink);
4487 	}
4488 
4489 	stream = dc_create_stream_for_sink(sink);
4490 
4491 	if (stream == NULL) {
4492 		DRM_ERROR("Failed to create stream for sink!\n");
4493 		goto finish;
4494 	}
4495 
4496 	stream->dm_stream_context = aconnector;
4497 
4498 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4499 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4500 
4501 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4502 		/* Search for preferred mode */
4503 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4504 			native_mode_found = true;
4505 			break;
4506 		}
4507 	}
4508 	if (!native_mode_found)
4509 		preferred_mode = list_first_entry_or_null(
4510 				&aconnector->base.modes,
4511 				struct drm_display_mode,
4512 				head);
4513 
4514 	mode_refresh = drm_mode_vrefresh(&mode);
4515 
4516 	if (preferred_mode == NULL) {
4517 		/*
4518 		 * This may not be an error, the use case is when we have no
4519 		 * usermode calls to reset and set mode upon hotplug. In this
4520 		 * case, we call set mode ourselves to restore the previous mode
4521 		 * and the modelist may not be filled in in time.
4522 		 */
4523 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4524 	} else {
4525 		decide_crtc_timing_for_drm_display_mode(
4526 				&mode, preferred_mode,
4527 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4528 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4529 	}
4530 
4531 	if (!dm_state)
4532 		drm_mode_set_crtcinfo(&mode, 0);
4533 
4534 	/*
4535 	* If scaling is enabled and refresh rate didn't change
4536 	* we copy the vic and polarities of the old timings
4537 	*/
4538 	if (!scale || mode_refresh != preferred_refresh)
4539 		fill_stream_properties_from_drm_display_mode(stream,
4540 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4541 	else
4542 		fill_stream_properties_from_drm_display_mode(stream,
4543 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4544 
4545 	stream->timing.flags.DSC = 0;
4546 
4547 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4548 #if defined(CONFIG_DRM_AMD_DC_DCN)
4549 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4550 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4551 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4552 				      &dsc_caps);
4553 #endif
4554 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4555 							     dc_link_get_link_cap(aconnector->dc_link));
4556 
4557 #if defined(CONFIG_DRM_AMD_DC_DCN)
4558 		if (dsc_caps.is_dsc_supported)
4559 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4560 						  &dsc_caps,
4561 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4562 						  link_bandwidth_kbps,
4563 						  &stream->timing,
4564 						  &stream->timing.dsc_cfg))
4565 				stream->timing.flags.DSC = 1;
4566 #endif
4567 	}
4568 
4569 	update_stream_scaling_settings(&mode, dm_state, stream);
4570 
4571 	fill_audio_info(
4572 		&stream->audio_info,
4573 		drm_connector,
4574 		sink);
4575 
4576 	update_stream_signal(stream, sink);
4577 
4578 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4579 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4580 	if (stream->link->psr_settings.psr_feature_enabled) {
4581 		//
4582 		// should decide stream support vsc sdp colorimetry capability
4583 		// before building vsc info packet
4584 		//
4585 		stream->use_vsc_sdp_for_colorimetry = false;
4586 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4587 			stream->use_vsc_sdp_for_colorimetry =
4588 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4589 		} else {
4590 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4591 				stream->use_vsc_sdp_for_colorimetry = true;
4592 		}
4593 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4594 	}
4595 finish:
4596 	dc_sink_release(sink);
4597 
4598 	return stream;
4599 }
4600 
4601 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4602 {
4603 	drm_crtc_cleanup(crtc);
4604 	kfree(crtc);
4605 }
4606 
4607 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4608 				  struct drm_crtc_state *state)
4609 {
4610 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4611 
4612 	/* TODO Destroy dc_stream objects are stream object is flattened */
4613 	if (cur->stream)
4614 		dc_stream_release(cur->stream);
4615 
4616 
4617 	__drm_atomic_helper_crtc_destroy_state(state);
4618 
4619 
4620 	kfree(state);
4621 }
4622 
4623 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4624 {
4625 	struct dm_crtc_state *state;
4626 
4627 	if (crtc->state)
4628 		dm_crtc_destroy_state(crtc, crtc->state);
4629 
4630 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4631 	if (WARN_ON(!state))
4632 		return;
4633 
4634 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4635 }
4636 
4637 static struct drm_crtc_state *
4638 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4639 {
4640 	struct dm_crtc_state *state, *cur;
4641 
4642 	cur = to_dm_crtc_state(crtc->state);
4643 
4644 	if (WARN_ON(!crtc->state))
4645 		return NULL;
4646 
4647 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4648 	if (!state)
4649 		return NULL;
4650 
4651 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4652 
4653 	if (cur->stream) {
4654 		state->stream = cur->stream;
4655 		dc_stream_retain(state->stream);
4656 	}
4657 
4658 	state->active_planes = cur->active_planes;
4659 	state->vrr_params = cur->vrr_params;
4660 	state->vrr_infopacket = cur->vrr_infopacket;
4661 	state->abm_level = cur->abm_level;
4662 	state->vrr_supported = cur->vrr_supported;
4663 	state->freesync_config = cur->freesync_config;
4664 	state->crc_src = cur->crc_src;
4665 	state->cm_has_degamma = cur->cm_has_degamma;
4666 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4667 
4668 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4669 
4670 	return &state->base;
4671 }
4672 
4673 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4674 {
4675 	enum dc_irq_source irq_source;
4676 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4677 	struct amdgpu_device *adev = crtc->dev->dev_private;
4678 	int rc;
4679 
4680 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4681 
4682 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4683 
4684 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4685 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4686 	return rc;
4687 }
4688 
4689 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4690 {
4691 	enum dc_irq_source irq_source;
4692 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4693 	struct amdgpu_device *adev = crtc->dev->dev_private;
4694 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4695 	int rc = 0;
4696 
4697 	if (enable) {
4698 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4699 		if (amdgpu_dm_vrr_active(acrtc_state))
4700 			rc = dm_set_vupdate_irq(crtc, true);
4701 	} else {
4702 		/* vblank irq off -> vupdate irq off */
4703 		rc = dm_set_vupdate_irq(crtc, false);
4704 	}
4705 
4706 	if (rc)
4707 		return rc;
4708 
4709 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4710 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4711 }
4712 
4713 static int dm_enable_vblank(struct drm_crtc *crtc)
4714 {
4715 	return dm_set_vblank(crtc, true);
4716 }
4717 
4718 static void dm_disable_vblank(struct drm_crtc *crtc)
4719 {
4720 	dm_set_vblank(crtc, false);
4721 }
4722 
4723 /* Implemented only the options currently availible for the driver */
4724 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4725 	.reset = dm_crtc_reset_state,
4726 	.destroy = amdgpu_dm_crtc_destroy,
4727 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4728 	.set_config = drm_atomic_helper_set_config,
4729 	.page_flip = drm_atomic_helper_page_flip,
4730 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4731 	.atomic_destroy_state = dm_crtc_destroy_state,
4732 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4733 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4734 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4735 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4736 	.enable_vblank = dm_enable_vblank,
4737 	.disable_vblank = dm_disable_vblank,
4738 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4739 };
4740 
4741 static enum drm_connector_status
4742 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4743 {
4744 	bool connected;
4745 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4746 
4747 	/*
4748 	 * Notes:
4749 	 * 1. This interface is NOT called in context of HPD irq.
4750 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4751 	 * makes it a bad place for *any* MST-related activity.
4752 	 */
4753 
4754 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4755 	    !aconnector->fake_enable)
4756 		connected = (aconnector->dc_sink != NULL);
4757 	else
4758 		connected = (aconnector->base.force == DRM_FORCE_ON);
4759 
4760 	return (connected ? connector_status_connected :
4761 			connector_status_disconnected);
4762 }
4763 
4764 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4765 					    struct drm_connector_state *connector_state,
4766 					    struct drm_property *property,
4767 					    uint64_t val)
4768 {
4769 	struct drm_device *dev = connector->dev;
4770 	struct amdgpu_device *adev = dev->dev_private;
4771 	struct dm_connector_state *dm_old_state =
4772 		to_dm_connector_state(connector->state);
4773 	struct dm_connector_state *dm_new_state =
4774 		to_dm_connector_state(connector_state);
4775 
4776 	int ret = -EINVAL;
4777 
4778 	if (property == dev->mode_config.scaling_mode_property) {
4779 		enum amdgpu_rmx_type rmx_type;
4780 
4781 		switch (val) {
4782 		case DRM_MODE_SCALE_CENTER:
4783 			rmx_type = RMX_CENTER;
4784 			break;
4785 		case DRM_MODE_SCALE_ASPECT:
4786 			rmx_type = RMX_ASPECT;
4787 			break;
4788 		case DRM_MODE_SCALE_FULLSCREEN:
4789 			rmx_type = RMX_FULL;
4790 			break;
4791 		case DRM_MODE_SCALE_NONE:
4792 		default:
4793 			rmx_type = RMX_OFF;
4794 			break;
4795 		}
4796 
4797 		if (dm_old_state->scaling == rmx_type)
4798 			return 0;
4799 
4800 		dm_new_state->scaling = rmx_type;
4801 		ret = 0;
4802 	} else if (property == adev->mode_info.underscan_hborder_property) {
4803 		dm_new_state->underscan_hborder = val;
4804 		ret = 0;
4805 	} else if (property == adev->mode_info.underscan_vborder_property) {
4806 		dm_new_state->underscan_vborder = val;
4807 		ret = 0;
4808 	} else if (property == adev->mode_info.underscan_property) {
4809 		dm_new_state->underscan_enable = val;
4810 		ret = 0;
4811 	} else if (property == adev->mode_info.abm_level_property) {
4812 		dm_new_state->abm_level = val;
4813 		ret = 0;
4814 	}
4815 
4816 	return ret;
4817 }
4818 
4819 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4820 					    const struct drm_connector_state *state,
4821 					    struct drm_property *property,
4822 					    uint64_t *val)
4823 {
4824 	struct drm_device *dev = connector->dev;
4825 	struct amdgpu_device *adev = dev->dev_private;
4826 	struct dm_connector_state *dm_state =
4827 		to_dm_connector_state(state);
4828 	int ret = -EINVAL;
4829 
4830 	if (property == dev->mode_config.scaling_mode_property) {
4831 		switch (dm_state->scaling) {
4832 		case RMX_CENTER:
4833 			*val = DRM_MODE_SCALE_CENTER;
4834 			break;
4835 		case RMX_ASPECT:
4836 			*val = DRM_MODE_SCALE_ASPECT;
4837 			break;
4838 		case RMX_FULL:
4839 			*val = DRM_MODE_SCALE_FULLSCREEN;
4840 			break;
4841 		case RMX_OFF:
4842 		default:
4843 			*val = DRM_MODE_SCALE_NONE;
4844 			break;
4845 		}
4846 		ret = 0;
4847 	} else if (property == adev->mode_info.underscan_hborder_property) {
4848 		*val = dm_state->underscan_hborder;
4849 		ret = 0;
4850 	} else if (property == adev->mode_info.underscan_vborder_property) {
4851 		*val = dm_state->underscan_vborder;
4852 		ret = 0;
4853 	} else if (property == adev->mode_info.underscan_property) {
4854 		*val = dm_state->underscan_enable;
4855 		ret = 0;
4856 	} else if (property == adev->mode_info.abm_level_property) {
4857 		*val = dm_state->abm_level;
4858 		ret = 0;
4859 	}
4860 
4861 	return ret;
4862 }
4863 
4864 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4865 {
4866 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4867 
4868 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4869 }
4870 
4871 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4872 {
4873 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4874 	const struct dc_link *link = aconnector->dc_link;
4875 	struct amdgpu_device *adev = connector->dev->dev_private;
4876 	struct amdgpu_display_manager *dm = &adev->dm;
4877 
4878 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4879 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4880 
4881 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4882 	    link->type != dc_connection_none &&
4883 	    dm->backlight_dev) {
4884 		backlight_device_unregister(dm->backlight_dev);
4885 		dm->backlight_dev = NULL;
4886 	}
4887 #endif
4888 
4889 	if (aconnector->dc_em_sink)
4890 		dc_sink_release(aconnector->dc_em_sink);
4891 	aconnector->dc_em_sink = NULL;
4892 	if (aconnector->dc_sink)
4893 		dc_sink_release(aconnector->dc_sink);
4894 	aconnector->dc_sink = NULL;
4895 
4896 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4897 	drm_connector_unregister(connector);
4898 	drm_connector_cleanup(connector);
4899 	if (aconnector->i2c) {
4900 		i2c_del_adapter(&aconnector->i2c->base);
4901 		kfree(aconnector->i2c);
4902 	}
4903 	kfree(aconnector->dm_dp_aux.aux.name);
4904 
4905 	kfree(connector);
4906 }
4907 
4908 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4909 {
4910 	struct dm_connector_state *state =
4911 		to_dm_connector_state(connector->state);
4912 
4913 	if (connector->state)
4914 		__drm_atomic_helper_connector_destroy_state(connector->state);
4915 
4916 	kfree(state);
4917 
4918 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4919 
4920 	if (state) {
4921 		state->scaling = RMX_OFF;
4922 		state->underscan_enable = false;
4923 		state->underscan_hborder = 0;
4924 		state->underscan_vborder = 0;
4925 		state->base.max_requested_bpc = 8;
4926 		state->vcpi_slots = 0;
4927 		state->pbn = 0;
4928 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4929 			state->abm_level = amdgpu_dm_abm_level;
4930 
4931 		__drm_atomic_helper_connector_reset(connector, &state->base);
4932 	}
4933 }
4934 
4935 struct drm_connector_state *
4936 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4937 {
4938 	struct dm_connector_state *state =
4939 		to_dm_connector_state(connector->state);
4940 
4941 	struct dm_connector_state *new_state =
4942 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4943 
4944 	if (!new_state)
4945 		return NULL;
4946 
4947 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4948 
4949 	new_state->freesync_capable = state->freesync_capable;
4950 	new_state->abm_level = state->abm_level;
4951 	new_state->scaling = state->scaling;
4952 	new_state->underscan_enable = state->underscan_enable;
4953 	new_state->underscan_hborder = state->underscan_hborder;
4954 	new_state->underscan_vborder = state->underscan_vborder;
4955 	new_state->vcpi_slots = state->vcpi_slots;
4956 	new_state->pbn = state->pbn;
4957 	return &new_state->base;
4958 }
4959 
4960 static int
4961 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4962 {
4963 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4964 		to_amdgpu_dm_connector(connector);
4965 	int r;
4966 
4967 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4968 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4969 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4970 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4971 		if (r)
4972 			return r;
4973 	}
4974 
4975 #if defined(CONFIG_DEBUG_FS)
4976 	connector_debugfs_init(amdgpu_dm_connector);
4977 #endif
4978 
4979 	return 0;
4980 }
4981 
4982 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4983 	.reset = amdgpu_dm_connector_funcs_reset,
4984 	.detect = amdgpu_dm_connector_detect,
4985 	.fill_modes = drm_helper_probe_single_connector_modes,
4986 	.destroy = amdgpu_dm_connector_destroy,
4987 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4988 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4989 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4990 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4991 	.late_register = amdgpu_dm_connector_late_register,
4992 	.early_unregister = amdgpu_dm_connector_unregister
4993 };
4994 
4995 static int get_modes(struct drm_connector *connector)
4996 {
4997 	return amdgpu_dm_connector_get_modes(connector);
4998 }
4999 
5000 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5001 {
5002 	struct dc_sink_init_data init_params = {
5003 			.link = aconnector->dc_link,
5004 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5005 	};
5006 	struct edid *edid;
5007 
5008 	if (!aconnector->base.edid_blob_ptr) {
5009 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5010 				aconnector->base.name);
5011 
5012 		aconnector->base.force = DRM_FORCE_OFF;
5013 		aconnector->base.override_edid = false;
5014 		return;
5015 	}
5016 
5017 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5018 
5019 	aconnector->edid = edid;
5020 
5021 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5022 		aconnector->dc_link,
5023 		(uint8_t *)edid,
5024 		(edid->extensions + 1) * EDID_LENGTH,
5025 		&init_params);
5026 
5027 	if (aconnector->base.force == DRM_FORCE_ON) {
5028 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5029 		aconnector->dc_link->local_sink :
5030 		aconnector->dc_em_sink;
5031 		dc_sink_retain(aconnector->dc_sink);
5032 	}
5033 }
5034 
5035 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5036 {
5037 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5038 
5039 	/*
5040 	 * In case of headless boot with force on for DP managed connector
5041 	 * Those settings have to be != 0 to get initial modeset
5042 	 */
5043 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5044 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5045 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5046 	}
5047 
5048 
5049 	aconnector->base.override_edid = true;
5050 	create_eml_sink(aconnector);
5051 }
5052 
5053 static struct dc_stream_state *
5054 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5055 				const struct drm_display_mode *drm_mode,
5056 				const struct dm_connector_state *dm_state,
5057 				const struct dc_stream_state *old_stream)
5058 {
5059 	struct drm_connector *connector = &aconnector->base;
5060 	struct amdgpu_device *adev = connector->dev->dev_private;
5061 	struct dc_stream_state *stream;
5062 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5063 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5064 	enum dc_status dc_result = DC_OK;
5065 
5066 	do {
5067 		stream = create_stream_for_sink(aconnector, drm_mode,
5068 						dm_state, old_stream,
5069 						requested_bpc);
5070 		if (stream == NULL) {
5071 			DRM_ERROR("Failed to create stream for sink!\n");
5072 			break;
5073 		}
5074 
5075 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5076 
5077 		if (dc_result != DC_OK) {
5078 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5079 				      drm_mode->hdisplay,
5080 				      drm_mode->vdisplay,
5081 				      drm_mode->clock,
5082 				      dc_result,
5083 				      dc_status_to_str(dc_result));
5084 
5085 			dc_stream_release(stream);
5086 			stream = NULL;
5087 			requested_bpc -= 2; /* lower bpc to retry validation */
5088 		}
5089 
5090 	} while (stream == NULL && requested_bpc >= 6);
5091 
5092 	return stream;
5093 }
5094 
5095 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5096 				   struct drm_display_mode *mode)
5097 {
5098 	int result = MODE_ERROR;
5099 	struct dc_sink *dc_sink;
5100 	/* TODO: Unhardcode stream count */
5101 	struct dc_stream_state *stream;
5102 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5103 
5104 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5105 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5106 		return result;
5107 
5108 	/*
5109 	 * Only run this the first time mode_valid is called to initilialize
5110 	 * EDID mgmt
5111 	 */
5112 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5113 		!aconnector->dc_em_sink)
5114 		handle_edid_mgmt(aconnector);
5115 
5116 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5117 
5118 	if (dc_sink == NULL) {
5119 		DRM_ERROR("dc_sink is NULL!\n");
5120 		goto fail;
5121 	}
5122 
5123 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5124 	if (stream) {
5125 		dc_stream_release(stream);
5126 		result = MODE_OK;
5127 	}
5128 
5129 fail:
5130 	/* TODO: error handling*/
5131 	return result;
5132 }
5133 
5134 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5135 				struct dc_info_packet *out)
5136 {
5137 	struct hdmi_drm_infoframe frame;
5138 	unsigned char buf[30]; /* 26 + 4 */
5139 	ssize_t len;
5140 	int ret, i;
5141 
5142 	memset(out, 0, sizeof(*out));
5143 
5144 	if (!state->hdr_output_metadata)
5145 		return 0;
5146 
5147 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5148 	if (ret)
5149 		return ret;
5150 
5151 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5152 	if (len < 0)
5153 		return (int)len;
5154 
5155 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5156 	if (len != 30)
5157 		return -EINVAL;
5158 
5159 	/* Prepare the infopacket for DC. */
5160 	switch (state->connector->connector_type) {
5161 	case DRM_MODE_CONNECTOR_HDMIA:
5162 		out->hb0 = 0x87; /* type */
5163 		out->hb1 = 0x01; /* version */
5164 		out->hb2 = 0x1A; /* length */
5165 		out->sb[0] = buf[3]; /* checksum */
5166 		i = 1;
5167 		break;
5168 
5169 	case DRM_MODE_CONNECTOR_DisplayPort:
5170 	case DRM_MODE_CONNECTOR_eDP:
5171 		out->hb0 = 0x00; /* sdp id, zero */
5172 		out->hb1 = 0x87; /* type */
5173 		out->hb2 = 0x1D; /* payload len - 1 */
5174 		out->hb3 = (0x13 << 2); /* sdp version */
5175 		out->sb[0] = 0x01; /* version */
5176 		out->sb[1] = 0x1A; /* length */
5177 		i = 2;
5178 		break;
5179 
5180 	default:
5181 		return -EINVAL;
5182 	}
5183 
5184 	memcpy(&out->sb[i], &buf[4], 26);
5185 	out->valid = true;
5186 
5187 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5188 		       sizeof(out->sb), false);
5189 
5190 	return 0;
5191 }
5192 
5193 static bool
5194 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5195 			  const struct drm_connector_state *new_state)
5196 {
5197 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5198 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5199 
5200 	if (old_blob != new_blob) {
5201 		if (old_blob && new_blob &&
5202 		    old_blob->length == new_blob->length)
5203 			return memcmp(old_blob->data, new_blob->data,
5204 				      old_blob->length);
5205 
5206 		return true;
5207 	}
5208 
5209 	return false;
5210 }
5211 
5212 static int
5213 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5214 				 struct drm_atomic_state *state)
5215 {
5216 	struct drm_connector_state *new_con_state =
5217 		drm_atomic_get_new_connector_state(state, conn);
5218 	struct drm_connector_state *old_con_state =
5219 		drm_atomic_get_old_connector_state(state, conn);
5220 	struct drm_crtc *crtc = new_con_state->crtc;
5221 	struct drm_crtc_state *new_crtc_state;
5222 	int ret;
5223 
5224 	if (!crtc)
5225 		return 0;
5226 
5227 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5228 		struct dc_info_packet hdr_infopacket;
5229 
5230 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5231 		if (ret)
5232 			return ret;
5233 
5234 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5235 		if (IS_ERR(new_crtc_state))
5236 			return PTR_ERR(new_crtc_state);
5237 
5238 		/*
5239 		 * DC considers the stream backends changed if the
5240 		 * static metadata changes. Forcing the modeset also
5241 		 * gives a simple way for userspace to switch from
5242 		 * 8bpc to 10bpc when setting the metadata to enter
5243 		 * or exit HDR.
5244 		 *
5245 		 * Changing the static metadata after it's been
5246 		 * set is permissible, however. So only force a
5247 		 * modeset if we're entering or exiting HDR.
5248 		 */
5249 		new_crtc_state->mode_changed =
5250 			!old_con_state->hdr_output_metadata ||
5251 			!new_con_state->hdr_output_metadata;
5252 	}
5253 
5254 	return 0;
5255 }
5256 
5257 static const struct drm_connector_helper_funcs
5258 amdgpu_dm_connector_helper_funcs = {
5259 	/*
5260 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5261 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5262 	 * are missing after user start lightdm. So we need to renew modes list.
5263 	 * in get_modes call back, not just return the modes count
5264 	 */
5265 	.get_modes = get_modes,
5266 	.mode_valid = amdgpu_dm_connector_mode_valid,
5267 	.atomic_check = amdgpu_dm_connector_atomic_check,
5268 };
5269 
5270 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5271 {
5272 }
5273 
5274 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5275 {
5276 	struct drm_device *dev = new_crtc_state->crtc->dev;
5277 	struct drm_plane *plane;
5278 
5279 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5280 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5281 			return true;
5282 	}
5283 
5284 	return false;
5285 }
5286 
5287 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5288 {
5289 	struct drm_atomic_state *state = new_crtc_state->state;
5290 	struct drm_plane *plane;
5291 	int num_active = 0;
5292 
5293 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5294 		struct drm_plane_state *new_plane_state;
5295 
5296 		/* Cursor planes are "fake". */
5297 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5298 			continue;
5299 
5300 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5301 
5302 		if (!new_plane_state) {
5303 			/*
5304 			 * The plane is enable on the CRTC and hasn't changed
5305 			 * state. This means that it previously passed
5306 			 * validation and is therefore enabled.
5307 			 */
5308 			num_active += 1;
5309 			continue;
5310 		}
5311 
5312 		/* We need a framebuffer to be considered enabled. */
5313 		num_active += (new_plane_state->fb != NULL);
5314 	}
5315 
5316 	return num_active;
5317 }
5318 
5319 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5320 					 struct drm_crtc_state *new_crtc_state)
5321 {
5322 	struct dm_crtc_state *dm_new_crtc_state =
5323 		to_dm_crtc_state(new_crtc_state);
5324 
5325 	dm_new_crtc_state->active_planes = 0;
5326 
5327 	if (!dm_new_crtc_state->stream)
5328 		return;
5329 
5330 	dm_new_crtc_state->active_planes =
5331 		count_crtc_active_planes(new_crtc_state);
5332 }
5333 
5334 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5335 				       struct drm_crtc_state *state)
5336 {
5337 	struct amdgpu_device *adev = crtc->dev->dev_private;
5338 	struct dc *dc = adev->dm.dc;
5339 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5340 	int ret = -EINVAL;
5341 
5342 	dm_update_crtc_active_planes(crtc, state);
5343 
5344 	if (unlikely(!dm_crtc_state->stream &&
5345 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5346 		WARN_ON(1);
5347 		return ret;
5348 	}
5349 
5350 	/* In some use cases, like reset, no stream is attached */
5351 	if (!dm_crtc_state->stream)
5352 		return 0;
5353 
5354 	/*
5355 	 * We want at least one hardware plane enabled to use
5356 	 * the stream with a cursor enabled.
5357 	 */
5358 	if (state->enable && state->active &&
5359 	    does_crtc_have_active_cursor(state) &&
5360 	    dm_crtc_state->active_planes == 0)
5361 		return -EINVAL;
5362 
5363 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5364 		return 0;
5365 
5366 	return ret;
5367 }
5368 
5369 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5370 				      const struct drm_display_mode *mode,
5371 				      struct drm_display_mode *adjusted_mode)
5372 {
5373 	return true;
5374 }
5375 
5376 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5377 	.disable = dm_crtc_helper_disable,
5378 	.atomic_check = dm_crtc_helper_atomic_check,
5379 	.mode_fixup = dm_crtc_helper_mode_fixup,
5380 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5381 };
5382 
5383 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5384 {
5385 
5386 }
5387 
5388 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5389 {
5390 	switch (display_color_depth) {
5391 		case COLOR_DEPTH_666:
5392 			return 6;
5393 		case COLOR_DEPTH_888:
5394 			return 8;
5395 		case COLOR_DEPTH_101010:
5396 			return 10;
5397 		case COLOR_DEPTH_121212:
5398 			return 12;
5399 		case COLOR_DEPTH_141414:
5400 			return 14;
5401 		case COLOR_DEPTH_161616:
5402 			return 16;
5403 		default:
5404 			break;
5405 		}
5406 	return 0;
5407 }
5408 
5409 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5410 					  struct drm_crtc_state *crtc_state,
5411 					  struct drm_connector_state *conn_state)
5412 {
5413 	struct drm_atomic_state *state = crtc_state->state;
5414 	struct drm_connector *connector = conn_state->connector;
5415 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5416 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5417 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5418 	struct drm_dp_mst_topology_mgr *mst_mgr;
5419 	struct drm_dp_mst_port *mst_port;
5420 	enum dc_color_depth color_depth;
5421 	int clock, bpp = 0;
5422 	bool is_y420 = false;
5423 
5424 	if (!aconnector->port || !aconnector->dc_sink)
5425 		return 0;
5426 
5427 	mst_port = aconnector->port;
5428 	mst_mgr = &aconnector->mst_port->mst_mgr;
5429 
5430 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5431 		return 0;
5432 
5433 	if (!state->duplicated) {
5434 		int max_bpc = conn_state->max_requested_bpc;
5435 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5436 				aconnector->force_yuv420_output;
5437 		color_depth = convert_color_depth_from_display_info(connector,
5438 								    is_y420,
5439 								    max_bpc);
5440 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5441 		clock = adjusted_mode->clock;
5442 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5443 	}
5444 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5445 									   mst_mgr,
5446 									   mst_port,
5447 									   dm_new_connector_state->pbn,
5448 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5449 	if (dm_new_connector_state->vcpi_slots < 0) {
5450 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5451 		return dm_new_connector_state->vcpi_slots;
5452 	}
5453 	return 0;
5454 }
5455 
5456 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5457 	.disable = dm_encoder_helper_disable,
5458 	.atomic_check = dm_encoder_helper_atomic_check
5459 };
5460 
5461 #if defined(CONFIG_DRM_AMD_DC_DCN)
5462 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5463 					    struct dc_state *dc_state)
5464 {
5465 	struct dc_stream_state *stream = NULL;
5466 	struct drm_connector *connector;
5467 	struct drm_connector_state *new_con_state, *old_con_state;
5468 	struct amdgpu_dm_connector *aconnector;
5469 	struct dm_connector_state *dm_conn_state;
5470 	int i, j, clock, bpp;
5471 	int vcpi, pbn_div, pbn = 0;
5472 
5473 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5474 
5475 		aconnector = to_amdgpu_dm_connector(connector);
5476 
5477 		if (!aconnector->port)
5478 			continue;
5479 
5480 		if (!new_con_state || !new_con_state->crtc)
5481 			continue;
5482 
5483 		dm_conn_state = to_dm_connector_state(new_con_state);
5484 
5485 		for (j = 0; j < dc_state->stream_count; j++) {
5486 			stream = dc_state->streams[j];
5487 			if (!stream)
5488 				continue;
5489 
5490 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5491 				break;
5492 
5493 			stream = NULL;
5494 		}
5495 
5496 		if (!stream)
5497 			continue;
5498 
5499 		if (stream->timing.flags.DSC != 1) {
5500 			drm_dp_mst_atomic_enable_dsc(state,
5501 						     aconnector->port,
5502 						     dm_conn_state->pbn,
5503 						     0,
5504 						     false);
5505 			continue;
5506 		}
5507 
5508 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5509 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5510 		clock = stream->timing.pix_clk_100hz / 10;
5511 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5512 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5513 						    aconnector->port,
5514 						    pbn, pbn_div,
5515 						    true);
5516 		if (vcpi < 0)
5517 			return vcpi;
5518 
5519 		dm_conn_state->pbn = pbn;
5520 		dm_conn_state->vcpi_slots = vcpi;
5521 	}
5522 	return 0;
5523 }
5524 #endif
5525 
5526 static void dm_drm_plane_reset(struct drm_plane *plane)
5527 {
5528 	struct dm_plane_state *amdgpu_state = NULL;
5529 
5530 	if (plane->state)
5531 		plane->funcs->atomic_destroy_state(plane, plane->state);
5532 
5533 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5534 	WARN_ON(amdgpu_state == NULL);
5535 
5536 	if (amdgpu_state)
5537 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5538 }
5539 
5540 static struct drm_plane_state *
5541 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5542 {
5543 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5544 
5545 	old_dm_plane_state = to_dm_plane_state(plane->state);
5546 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5547 	if (!dm_plane_state)
5548 		return NULL;
5549 
5550 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5551 
5552 	if (old_dm_plane_state->dc_state) {
5553 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5554 		dc_plane_state_retain(dm_plane_state->dc_state);
5555 	}
5556 
5557 	return &dm_plane_state->base;
5558 }
5559 
5560 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5561 				struct drm_plane_state *state)
5562 {
5563 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5564 
5565 	if (dm_plane_state->dc_state)
5566 		dc_plane_state_release(dm_plane_state->dc_state);
5567 
5568 	drm_atomic_helper_plane_destroy_state(plane, state);
5569 }
5570 
5571 static const struct drm_plane_funcs dm_plane_funcs = {
5572 	.update_plane	= drm_atomic_helper_update_plane,
5573 	.disable_plane	= drm_atomic_helper_disable_plane,
5574 	.destroy	= drm_primary_helper_destroy,
5575 	.reset = dm_drm_plane_reset,
5576 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5577 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5578 };
5579 
5580 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5581 				      struct drm_plane_state *new_state)
5582 {
5583 	struct amdgpu_framebuffer *afb;
5584 	struct drm_gem_object *obj;
5585 	struct amdgpu_device *adev;
5586 	struct amdgpu_bo *rbo;
5587 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5588 	struct list_head list;
5589 	struct ttm_validate_buffer tv;
5590 	struct ww_acquire_ctx ticket;
5591 	uint64_t tiling_flags;
5592 	uint32_t domain;
5593 	int r;
5594 	bool tmz_surface = false;
5595 	bool force_disable_dcc = false;
5596 
5597 	dm_plane_state_old = to_dm_plane_state(plane->state);
5598 	dm_plane_state_new = to_dm_plane_state(new_state);
5599 
5600 	if (!new_state->fb) {
5601 		DRM_DEBUG_DRIVER("No FB bound\n");
5602 		return 0;
5603 	}
5604 
5605 	afb = to_amdgpu_framebuffer(new_state->fb);
5606 	obj = new_state->fb->obj[0];
5607 	rbo = gem_to_amdgpu_bo(obj);
5608 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5609 	INIT_LIST_HEAD(&list);
5610 
5611 	tv.bo = &rbo->tbo;
5612 	tv.num_shared = 1;
5613 	list_add(&tv.head, &list);
5614 
5615 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5616 	if (r) {
5617 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5618 		return r;
5619 	}
5620 
5621 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5622 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5623 	else
5624 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5625 
5626 	r = amdgpu_bo_pin(rbo, domain);
5627 	if (unlikely(r != 0)) {
5628 		if (r != -ERESTARTSYS)
5629 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5630 		ttm_eu_backoff_reservation(&ticket, &list);
5631 		return r;
5632 	}
5633 
5634 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5635 	if (unlikely(r != 0)) {
5636 		amdgpu_bo_unpin(rbo);
5637 		ttm_eu_backoff_reservation(&ticket, &list);
5638 		DRM_ERROR("%p bind failed\n", rbo);
5639 		return r;
5640 	}
5641 
5642 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5643 
5644 	tmz_surface = amdgpu_bo_encrypted(rbo);
5645 
5646 	ttm_eu_backoff_reservation(&ticket, &list);
5647 
5648 	afb->address = amdgpu_bo_gpu_offset(rbo);
5649 
5650 	amdgpu_bo_ref(rbo);
5651 
5652 	if (dm_plane_state_new->dc_state &&
5653 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5654 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5655 
5656 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5657 		fill_plane_buffer_attributes(
5658 			adev, afb, plane_state->format, plane_state->rotation,
5659 			tiling_flags, &plane_state->tiling_info,
5660 			&plane_state->plane_size, &plane_state->dcc,
5661 			&plane_state->address, tmz_surface,
5662 			force_disable_dcc);
5663 	}
5664 
5665 	return 0;
5666 }
5667 
5668 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5669 				       struct drm_plane_state *old_state)
5670 {
5671 	struct amdgpu_bo *rbo;
5672 	int r;
5673 
5674 	if (!old_state->fb)
5675 		return;
5676 
5677 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5678 	r = amdgpu_bo_reserve(rbo, false);
5679 	if (unlikely(r)) {
5680 		DRM_ERROR("failed to reserve rbo before unpin\n");
5681 		return;
5682 	}
5683 
5684 	amdgpu_bo_unpin(rbo);
5685 	amdgpu_bo_unreserve(rbo);
5686 	amdgpu_bo_unref(&rbo);
5687 }
5688 
5689 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5690 				       struct drm_crtc_state *new_crtc_state)
5691 {
5692 	int max_downscale = 0;
5693 	int max_upscale = INT_MAX;
5694 
5695 	/* TODO: These should be checked against DC plane caps */
5696 	return drm_atomic_helper_check_plane_state(
5697 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5698 }
5699 
5700 static int dm_plane_atomic_check(struct drm_plane *plane,
5701 				 struct drm_plane_state *state)
5702 {
5703 	struct amdgpu_device *adev = plane->dev->dev_private;
5704 	struct dc *dc = adev->dm.dc;
5705 	struct dm_plane_state *dm_plane_state;
5706 	struct dc_scaling_info scaling_info;
5707 	struct drm_crtc_state *new_crtc_state;
5708 	int ret;
5709 
5710 	dm_plane_state = to_dm_plane_state(state);
5711 
5712 	if (!dm_plane_state->dc_state)
5713 		return 0;
5714 
5715 	new_crtc_state =
5716 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5717 	if (!new_crtc_state)
5718 		return -EINVAL;
5719 
5720 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5721 	if (ret)
5722 		return ret;
5723 
5724 	ret = fill_dc_scaling_info(state, &scaling_info);
5725 	if (ret)
5726 		return ret;
5727 
5728 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5729 		return 0;
5730 
5731 	return -EINVAL;
5732 }
5733 
5734 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5735 				       struct drm_plane_state *new_plane_state)
5736 {
5737 	/* Only support async updates on cursor planes. */
5738 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5739 		return -EINVAL;
5740 
5741 	return 0;
5742 }
5743 
5744 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5745 					 struct drm_plane_state *new_state)
5746 {
5747 	struct drm_plane_state *old_state =
5748 		drm_atomic_get_old_plane_state(new_state->state, plane);
5749 
5750 	swap(plane->state->fb, new_state->fb);
5751 
5752 	plane->state->src_x = new_state->src_x;
5753 	plane->state->src_y = new_state->src_y;
5754 	plane->state->src_w = new_state->src_w;
5755 	plane->state->src_h = new_state->src_h;
5756 	plane->state->crtc_x = new_state->crtc_x;
5757 	plane->state->crtc_y = new_state->crtc_y;
5758 	plane->state->crtc_w = new_state->crtc_w;
5759 	plane->state->crtc_h = new_state->crtc_h;
5760 
5761 	handle_cursor_update(plane, old_state);
5762 }
5763 
5764 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5765 	.prepare_fb = dm_plane_helper_prepare_fb,
5766 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5767 	.atomic_check = dm_plane_atomic_check,
5768 	.atomic_async_check = dm_plane_atomic_async_check,
5769 	.atomic_async_update = dm_plane_atomic_async_update
5770 };
5771 
5772 /*
5773  * TODO: these are currently initialized to rgb formats only.
5774  * For future use cases we should either initialize them dynamically based on
5775  * plane capabilities, or initialize this array to all formats, so internal drm
5776  * check will succeed, and let DC implement proper check
5777  */
5778 static const uint32_t rgb_formats[] = {
5779 	DRM_FORMAT_XRGB8888,
5780 	DRM_FORMAT_ARGB8888,
5781 	DRM_FORMAT_RGBA8888,
5782 	DRM_FORMAT_XRGB2101010,
5783 	DRM_FORMAT_XBGR2101010,
5784 	DRM_FORMAT_ARGB2101010,
5785 	DRM_FORMAT_ABGR2101010,
5786 	DRM_FORMAT_XBGR8888,
5787 	DRM_FORMAT_ABGR8888,
5788 	DRM_FORMAT_RGB565,
5789 };
5790 
5791 static const uint32_t overlay_formats[] = {
5792 	DRM_FORMAT_XRGB8888,
5793 	DRM_FORMAT_ARGB8888,
5794 	DRM_FORMAT_RGBA8888,
5795 	DRM_FORMAT_XBGR8888,
5796 	DRM_FORMAT_ABGR8888,
5797 	DRM_FORMAT_RGB565
5798 };
5799 
5800 static const u32 cursor_formats[] = {
5801 	DRM_FORMAT_ARGB8888
5802 };
5803 
5804 static int get_plane_formats(const struct drm_plane *plane,
5805 			     const struct dc_plane_cap *plane_cap,
5806 			     uint32_t *formats, int max_formats)
5807 {
5808 	int i, num_formats = 0;
5809 
5810 	/*
5811 	 * TODO: Query support for each group of formats directly from
5812 	 * DC plane caps. This will require adding more formats to the
5813 	 * caps list.
5814 	 */
5815 
5816 	switch (plane->type) {
5817 	case DRM_PLANE_TYPE_PRIMARY:
5818 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5819 			if (num_formats >= max_formats)
5820 				break;
5821 
5822 			formats[num_formats++] = rgb_formats[i];
5823 		}
5824 
5825 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5826 			formats[num_formats++] = DRM_FORMAT_NV12;
5827 		if (plane_cap && plane_cap->pixel_format_support.p010)
5828 			formats[num_formats++] = DRM_FORMAT_P010;
5829 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5830 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5831 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5832 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5833 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5834 		}
5835 		break;
5836 
5837 	case DRM_PLANE_TYPE_OVERLAY:
5838 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5839 			if (num_formats >= max_formats)
5840 				break;
5841 
5842 			formats[num_formats++] = overlay_formats[i];
5843 		}
5844 		break;
5845 
5846 	case DRM_PLANE_TYPE_CURSOR:
5847 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5848 			if (num_formats >= max_formats)
5849 				break;
5850 
5851 			formats[num_formats++] = cursor_formats[i];
5852 		}
5853 		break;
5854 	}
5855 
5856 	return num_formats;
5857 }
5858 
5859 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5860 				struct drm_plane *plane,
5861 				unsigned long possible_crtcs,
5862 				const struct dc_plane_cap *plane_cap)
5863 {
5864 	uint32_t formats[32];
5865 	int num_formats;
5866 	int res = -EPERM;
5867 	unsigned int supported_rotations;
5868 
5869 	num_formats = get_plane_formats(plane, plane_cap, formats,
5870 					ARRAY_SIZE(formats));
5871 
5872 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5873 				       &dm_plane_funcs, formats, num_formats,
5874 				       NULL, plane->type, NULL);
5875 	if (res)
5876 		return res;
5877 
5878 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5879 	    plane_cap && plane_cap->per_pixel_alpha) {
5880 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5881 					  BIT(DRM_MODE_BLEND_PREMULTI);
5882 
5883 		drm_plane_create_alpha_property(plane);
5884 		drm_plane_create_blend_mode_property(plane, blend_caps);
5885 	}
5886 
5887 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5888 	    plane_cap &&
5889 	    (plane_cap->pixel_format_support.nv12 ||
5890 	     plane_cap->pixel_format_support.p010)) {
5891 		/* This only affects YUV formats. */
5892 		drm_plane_create_color_properties(
5893 			plane,
5894 			BIT(DRM_COLOR_YCBCR_BT601) |
5895 			BIT(DRM_COLOR_YCBCR_BT709) |
5896 			BIT(DRM_COLOR_YCBCR_BT2020),
5897 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5898 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5899 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5900 	}
5901 
5902 	supported_rotations =
5903 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5904 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5905 
5906 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5907 					   supported_rotations);
5908 
5909 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5910 
5911 	/* Create (reset) the plane state */
5912 	if (plane->funcs->reset)
5913 		plane->funcs->reset(plane);
5914 
5915 	return 0;
5916 }
5917 
5918 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5919 			       struct drm_plane *plane,
5920 			       uint32_t crtc_index)
5921 {
5922 	struct amdgpu_crtc *acrtc = NULL;
5923 	struct drm_plane *cursor_plane;
5924 
5925 	int res = -ENOMEM;
5926 
5927 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5928 	if (!cursor_plane)
5929 		goto fail;
5930 
5931 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5932 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5933 
5934 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5935 	if (!acrtc)
5936 		goto fail;
5937 
5938 	res = drm_crtc_init_with_planes(
5939 			dm->ddev,
5940 			&acrtc->base,
5941 			plane,
5942 			cursor_plane,
5943 			&amdgpu_dm_crtc_funcs, NULL);
5944 
5945 	if (res)
5946 		goto fail;
5947 
5948 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5949 
5950 	/* Create (reset) the plane state */
5951 	if (acrtc->base.funcs->reset)
5952 		acrtc->base.funcs->reset(&acrtc->base);
5953 
5954 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5955 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5956 
5957 	acrtc->crtc_id = crtc_index;
5958 	acrtc->base.enabled = false;
5959 	acrtc->otg_inst = -1;
5960 
5961 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5962 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5963 				   true, MAX_COLOR_LUT_ENTRIES);
5964 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5965 
5966 	return 0;
5967 
5968 fail:
5969 	kfree(acrtc);
5970 	kfree(cursor_plane);
5971 	return res;
5972 }
5973 
5974 
5975 static int to_drm_connector_type(enum signal_type st)
5976 {
5977 	switch (st) {
5978 	case SIGNAL_TYPE_HDMI_TYPE_A:
5979 		return DRM_MODE_CONNECTOR_HDMIA;
5980 	case SIGNAL_TYPE_EDP:
5981 		return DRM_MODE_CONNECTOR_eDP;
5982 	case SIGNAL_TYPE_LVDS:
5983 		return DRM_MODE_CONNECTOR_LVDS;
5984 	case SIGNAL_TYPE_RGB:
5985 		return DRM_MODE_CONNECTOR_VGA;
5986 	case SIGNAL_TYPE_DISPLAY_PORT:
5987 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5988 		return DRM_MODE_CONNECTOR_DisplayPort;
5989 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5990 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5991 		return DRM_MODE_CONNECTOR_DVID;
5992 	case SIGNAL_TYPE_VIRTUAL:
5993 		return DRM_MODE_CONNECTOR_VIRTUAL;
5994 
5995 	default:
5996 		return DRM_MODE_CONNECTOR_Unknown;
5997 	}
5998 }
5999 
6000 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6001 {
6002 	struct drm_encoder *encoder;
6003 
6004 	/* There is only one encoder per connector */
6005 	drm_connector_for_each_possible_encoder(connector, encoder)
6006 		return encoder;
6007 
6008 	return NULL;
6009 }
6010 
6011 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6012 {
6013 	struct drm_encoder *encoder;
6014 	struct amdgpu_encoder *amdgpu_encoder;
6015 
6016 	encoder = amdgpu_dm_connector_to_encoder(connector);
6017 
6018 	if (encoder == NULL)
6019 		return;
6020 
6021 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6022 
6023 	amdgpu_encoder->native_mode.clock = 0;
6024 
6025 	if (!list_empty(&connector->probed_modes)) {
6026 		struct drm_display_mode *preferred_mode = NULL;
6027 
6028 		list_for_each_entry(preferred_mode,
6029 				    &connector->probed_modes,
6030 				    head) {
6031 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6032 				amdgpu_encoder->native_mode = *preferred_mode;
6033 
6034 			break;
6035 		}
6036 
6037 	}
6038 }
6039 
6040 static struct drm_display_mode *
6041 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6042 			     char *name,
6043 			     int hdisplay, int vdisplay)
6044 {
6045 	struct drm_device *dev = encoder->dev;
6046 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6047 	struct drm_display_mode *mode = NULL;
6048 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6049 
6050 	mode = drm_mode_duplicate(dev, native_mode);
6051 
6052 	if (mode == NULL)
6053 		return NULL;
6054 
6055 	mode->hdisplay = hdisplay;
6056 	mode->vdisplay = vdisplay;
6057 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6058 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6059 
6060 	return mode;
6061 
6062 }
6063 
6064 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6065 						 struct drm_connector *connector)
6066 {
6067 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6068 	struct drm_display_mode *mode = NULL;
6069 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6070 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6071 				to_amdgpu_dm_connector(connector);
6072 	int i;
6073 	int n;
6074 	struct mode_size {
6075 		char name[DRM_DISPLAY_MODE_LEN];
6076 		int w;
6077 		int h;
6078 	} common_modes[] = {
6079 		{  "640x480",  640,  480},
6080 		{  "800x600",  800,  600},
6081 		{ "1024x768", 1024,  768},
6082 		{ "1280x720", 1280,  720},
6083 		{ "1280x800", 1280,  800},
6084 		{"1280x1024", 1280, 1024},
6085 		{ "1440x900", 1440,  900},
6086 		{"1680x1050", 1680, 1050},
6087 		{"1600x1200", 1600, 1200},
6088 		{"1920x1080", 1920, 1080},
6089 		{"1920x1200", 1920, 1200}
6090 	};
6091 
6092 	n = ARRAY_SIZE(common_modes);
6093 
6094 	for (i = 0; i < n; i++) {
6095 		struct drm_display_mode *curmode = NULL;
6096 		bool mode_existed = false;
6097 
6098 		if (common_modes[i].w > native_mode->hdisplay ||
6099 		    common_modes[i].h > native_mode->vdisplay ||
6100 		   (common_modes[i].w == native_mode->hdisplay &&
6101 		    common_modes[i].h == native_mode->vdisplay))
6102 			continue;
6103 
6104 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6105 			if (common_modes[i].w == curmode->hdisplay &&
6106 			    common_modes[i].h == curmode->vdisplay) {
6107 				mode_existed = true;
6108 				break;
6109 			}
6110 		}
6111 
6112 		if (mode_existed)
6113 			continue;
6114 
6115 		mode = amdgpu_dm_create_common_mode(encoder,
6116 				common_modes[i].name, common_modes[i].w,
6117 				common_modes[i].h);
6118 		drm_mode_probed_add(connector, mode);
6119 		amdgpu_dm_connector->num_modes++;
6120 	}
6121 }
6122 
6123 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6124 					      struct edid *edid)
6125 {
6126 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6127 			to_amdgpu_dm_connector(connector);
6128 
6129 	if (edid) {
6130 		/* empty probed_modes */
6131 		INIT_LIST_HEAD(&connector->probed_modes);
6132 		amdgpu_dm_connector->num_modes =
6133 				drm_add_edid_modes(connector, edid);
6134 
6135 		/* sorting the probed modes before calling function
6136 		 * amdgpu_dm_get_native_mode() since EDID can have
6137 		 * more than one preferred mode. The modes that are
6138 		 * later in the probed mode list could be of higher
6139 		 * and preferred resolution. For example, 3840x2160
6140 		 * resolution in base EDID preferred timing and 4096x2160
6141 		 * preferred resolution in DID extension block later.
6142 		 */
6143 		drm_mode_sort(&connector->probed_modes);
6144 		amdgpu_dm_get_native_mode(connector);
6145 	} else {
6146 		amdgpu_dm_connector->num_modes = 0;
6147 	}
6148 }
6149 
6150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6151 {
6152 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6153 			to_amdgpu_dm_connector(connector);
6154 	struct drm_encoder *encoder;
6155 	struct edid *edid = amdgpu_dm_connector->edid;
6156 
6157 	encoder = amdgpu_dm_connector_to_encoder(connector);
6158 
6159 	if (!edid || !drm_edid_is_valid(edid)) {
6160 		amdgpu_dm_connector->num_modes =
6161 				drm_add_modes_noedid(connector, 640, 480);
6162 	} else {
6163 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6164 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6165 	}
6166 	amdgpu_dm_fbc_init(connector);
6167 
6168 	return amdgpu_dm_connector->num_modes;
6169 }
6170 
6171 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6172 				     struct amdgpu_dm_connector *aconnector,
6173 				     int connector_type,
6174 				     struct dc_link *link,
6175 				     int link_index)
6176 {
6177 	struct amdgpu_device *adev = dm->ddev->dev_private;
6178 
6179 	/*
6180 	 * Some of the properties below require access to state, like bpc.
6181 	 * Allocate some default initial connector state with our reset helper.
6182 	 */
6183 	if (aconnector->base.funcs->reset)
6184 		aconnector->base.funcs->reset(&aconnector->base);
6185 
6186 	aconnector->connector_id = link_index;
6187 	aconnector->dc_link = link;
6188 	aconnector->base.interlace_allowed = false;
6189 	aconnector->base.doublescan_allowed = false;
6190 	aconnector->base.stereo_allowed = false;
6191 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6192 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6193 	aconnector->audio_inst = -1;
6194 	mutex_init(&aconnector->hpd_lock);
6195 
6196 	/*
6197 	 * configure support HPD hot plug connector_>polled default value is 0
6198 	 * which means HPD hot plug not supported
6199 	 */
6200 	switch (connector_type) {
6201 	case DRM_MODE_CONNECTOR_HDMIA:
6202 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6203 		aconnector->base.ycbcr_420_allowed =
6204 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6205 		break;
6206 	case DRM_MODE_CONNECTOR_DisplayPort:
6207 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6208 		aconnector->base.ycbcr_420_allowed =
6209 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6210 		break;
6211 	case DRM_MODE_CONNECTOR_DVID:
6212 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6213 		break;
6214 	default:
6215 		break;
6216 	}
6217 
6218 	drm_object_attach_property(&aconnector->base.base,
6219 				dm->ddev->mode_config.scaling_mode_property,
6220 				DRM_MODE_SCALE_NONE);
6221 
6222 	drm_object_attach_property(&aconnector->base.base,
6223 				adev->mode_info.underscan_property,
6224 				UNDERSCAN_OFF);
6225 	drm_object_attach_property(&aconnector->base.base,
6226 				adev->mode_info.underscan_hborder_property,
6227 				0);
6228 	drm_object_attach_property(&aconnector->base.base,
6229 				adev->mode_info.underscan_vborder_property,
6230 				0);
6231 
6232 	if (!aconnector->mst_port)
6233 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6234 
6235 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6236 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6237 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6238 
6239 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6240 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6241 		drm_object_attach_property(&aconnector->base.base,
6242 				adev->mode_info.abm_level_property, 0);
6243 	}
6244 
6245 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6246 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6247 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6248 		drm_object_attach_property(
6249 			&aconnector->base.base,
6250 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6251 
6252 		if (!aconnector->mst_port)
6253 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6254 
6255 #ifdef CONFIG_DRM_AMD_DC_HDCP
6256 		if (adev->dm.hdcp_workqueue)
6257 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6258 #endif
6259 	}
6260 }
6261 
6262 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6263 			      struct i2c_msg *msgs, int num)
6264 {
6265 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6266 	struct ddc_service *ddc_service = i2c->ddc_service;
6267 	struct i2c_command cmd;
6268 	int i;
6269 	int result = -EIO;
6270 
6271 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6272 
6273 	if (!cmd.payloads)
6274 		return result;
6275 
6276 	cmd.number_of_payloads = num;
6277 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6278 	cmd.speed = 100;
6279 
6280 	for (i = 0; i < num; i++) {
6281 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6282 		cmd.payloads[i].address = msgs[i].addr;
6283 		cmd.payloads[i].length = msgs[i].len;
6284 		cmd.payloads[i].data = msgs[i].buf;
6285 	}
6286 
6287 	if (dc_submit_i2c(
6288 			ddc_service->ctx->dc,
6289 			ddc_service->ddc_pin->hw_info.ddc_channel,
6290 			&cmd))
6291 		result = num;
6292 
6293 	kfree(cmd.payloads);
6294 	return result;
6295 }
6296 
6297 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6298 {
6299 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6300 }
6301 
6302 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6303 	.master_xfer = amdgpu_dm_i2c_xfer,
6304 	.functionality = amdgpu_dm_i2c_func,
6305 };
6306 
6307 static struct amdgpu_i2c_adapter *
6308 create_i2c(struct ddc_service *ddc_service,
6309 	   int link_index,
6310 	   int *res)
6311 {
6312 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6313 	struct amdgpu_i2c_adapter *i2c;
6314 
6315 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6316 	if (!i2c)
6317 		return NULL;
6318 	i2c->base.owner = THIS_MODULE;
6319 	i2c->base.class = I2C_CLASS_DDC;
6320 	i2c->base.dev.parent = &adev->pdev->dev;
6321 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6322 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6323 	i2c_set_adapdata(&i2c->base, i2c);
6324 	i2c->ddc_service = ddc_service;
6325 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6326 
6327 	return i2c;
6328 }
6329 
6330 
6331 /*
6332  * Note: this function assumes that dc_link_detect() was called for the
6333  * dc_link which will be represented by this aconnector.
6334  */
6335 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6336 				    struct amdgpu_dm_connector *aconnector,
6337 				    uint32_t link_index,
6338 				    struct amdgpu_encoder *aencoder)
6339 {
6340 	int res = 0;
6341 	int connector_type;
6342 	struct dc *dc = dm->dc;
6343 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6344 	struct amdgpu_i2c_adapter *i2c;
6345 
6346 	link->priv = aconnector;
6347 
6348 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6349 
6350 	i2c = create_i2c(link->ddc, link->link_index, &res);
6351 	if (!i2c) {
6352 		DRM_ERROR("Failed to create i2c adapter data\n");
6353 		return -ENOMEM;
6354 	}
6355 
6356 	aconnector->i2c = i2c;
6357 	res = i2c_add_adapter(&i2c->base);
6358 
6359 	if (res) {
6360 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6361 		goto out_free;
6362 	}
6363 
6364 	connector_type = to_drm_connector_type(link->connector_signal);
6365 
6366 	res = drm_connector_init_with_ddc(
6367 			dm->ddev,
6368 			&aconnector->base,
6369 			&amdgpu_dm_connector_funcs,
6370 			connector_type,
6371 			&i2c->base);
6372 
6373 	if (res) {
6374 		DRM_ERROR("connector_init failed\n");
6375 		aconnector->connector_id = -1;
6376 		goto out_free;
6377 	}
6378 
6379 	drm_connector_helper_add(
6380 			&aconnector->base,
6381 			&amdgpu_dm_connector_helper_funcs);
6382 
6383 	amdgpu_dm_connector_init_helper(
6384 		dm,
6385 		aconnector,
6386 		connector_type,
6387 		link,
6388 		link_index);
6389 
6390 	drm_connector_attach_encoder(
6391 		&aconnector->base, &aencoder->base);
6392 
6393 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6394 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6395 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6396 
6397 out_free:
6398 	if (res) {
6399 		kfree(i2c);
6400 		aconnector->i2c = NULL;
6401 	}
6402 	return res;
6403 }
6404 
6405 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6406 {
6407 	switch (adev->mode_info.num_crtc) {
6408 	case 1:
6409 		return 0x1;
6410 	case 2:
6411 		return 0x3;
6412 	case 3:
6413 		return 0x7;
6414 	case 4:
6415 		return 0xf;
6416 	case 5:
6417 		return 0x1f;
6418 	case 6:
6419 	default:
6420 		return 0x3f;
6421 	}
6422 }
6423 
6424 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6425 				  struct amdgpu_encoder *aencoder,
6426 				  uint32_t link_index)
6427 {
6428 	struct amdgpu_device *adev = dev->dev_private;
6429 
6430 	int res = drm_encoder_init(dev,
6431 				   &aencoder->base,
6432 				   &amdgpu_dm_encoder_funcs,
6433 				   DRM_MODE_ENCODER_TMDS,
6434 				   NULL);
6435 
6436 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6437 
6438 	if (!res)
6439 		aencoder->encoder_id = link_index;
6440 	else
6441 		aencoder->encoder_id = -1;
6442 
6443 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6444 
6445 	return res;
6446 }
6447 
6448 static void manage_dm_interrupts(struct amdgpu_device *adev,
6449 				 struct amdgpu_crtc *acrtc,
6450 				 bool enable)
6451 {
6452 	/*
6453 	 * We have no guarantee that the frontend index maps to the same
6454 	 * backend index - some even map to more than one.
6455 	 *
6456 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6457 	 */
6458 	int irq_type =
6459 		amdgpu_display_crtc_idx_to_irq_type(
6460 			adev,
6461 			acrtc->crtc_id);
6462 
6463 	if (enable) {
6464 		drm_crtc_vblank_on(&acrtc->base);
6465 		amdgpu_irq_get(
6466 			adev,
6467 			&adev->pageflip_irq,
6468 			irq_type);
6469 	} else {
6470 
6471 		amdgpu_irq_put(
6472 			adev,
6473 			&adev->pageflip_irq,
6474 			irq_type);
6475 		drm_crtc_vblank_off(&acrtc->base);
6476 	}
6477 }
6478 
6479 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6480 				      struct amdgpu_crtc *acrtc)
6481 {
6482 	int irq_type =
6483 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6484 
6485 	/**
6486 	 * This reads the current state for the IRQ and force reapplies
6487 	 * the setting to hardware.
6488 	 */
6489 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6490 }
6491 
6492 static bool
6493 is_scaling_state_different(const struct dm_connector_state *dm_state,
6494 			   const struct dm_connector_state *old_dm_state)
6495 {
6496 	if (dm_state->scaling != old_dm_state->scaling)
6497 		return true;
6498 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6499 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6500 			return true;
6501 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6502 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6503 			return true;
6504 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6505 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6506 		return true;
6507 	return false;
6508 }
6509 
6510 #ifdef CONFIG_DRM_AMD_DC_HDCP
6511 static bool is_content_protection_different(struct drm_connector_state *state,
6512 					    const struct drm_connector_state *old_state,
6513 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6514 {
6515 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6516 
6517 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6518 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6519 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6520 		return true;
6521 	}
6522 
6523 	/* CP is being re enabled, ignore this */
6524 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6525 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6526 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6527 		return false;
6528 	}
6529 
6530 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6531 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6532 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6533 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6534 
6535 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6536 	 * hot-plug, headless s3, dpms
6537 	 */
6538 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6539 	    aconnector->dc_sink != NULL)
6540 		return true;
6541 
6542 	if (old_state->content_protection == state->content_protection)
6543 		return false;
6544 
6545 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6546 		return true;
6547 
6548 	return false;
6549 }
6550 
6551 #endif
6552 static void remove_stream(struct amdgpu_device *adev,
6553 			  struct amdgpu_crtc *acrtc,
6554 			  struct dc_stream_state *stream)
6555 {
6556 	/* this is the update mode case */
6557 
6558 	acrtc->otg_inst = -1;
6559 	acrtc->enabled = false;
6560 }
6561 
6562 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6563 			       struct dc_cursor_position *position)
6564 {
6565 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6566 	int x, y;
6567 	int xorigin = 0, yorigin = 0;
6568 
6569 	position->enable = false;
6570 	position->x = 0;
6571 	position->y = 0;
6572 
6573 	if (!crtc || !plane->state->fb)
6574 		return 0;
6575 
6576 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6577 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6578 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6579 			  __func__,
6580 			  plane->state->crtc_w,
6581 			  plane->state->crtc_h);
6582 		return -EINVAL;
6583 	}
6584 
6585 	x = plane->state->crtc_x;
6586 	y = plane->state->crtc_y;
6587 
6588 	if (x <= -amdgpu_crtc->max_cursor_width ||
6589 	    y <= -amdgpu_crtc->max_cursor_height)
6590 		return 0;
6591 
6592 	if (x < 0) {
6593 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6594 		x = 0;
6595 	}
6596 	if (y < 0) {
6597 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6598 		y = 0;
6599 	}
6600 	position->enable = true;
6601 	position->translate_by_source = true;
6602 	position->x = x;
6603 	position->y = y;
6604 	position->x_hotspot = xorigin;
6605 	position->y_hotspot = yorigin;
6606 
6607 	return 0;
6608 }
6609 
6610 static void handle_cursor_update(struct drm_plane *plane,
6611 				 struct drm_plane_state *old_plane_state)
6612 {
6613 	struct amdgpu_device *adev = plane->dev->dev_private;
6614 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6615 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6616 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6617 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6618 	uint64_t address = afb ? afb->address : 0;
6619 	struct dc_cursor_position position;
6620 	struct dc_cursor_attributes attributes;
6621 	int ret;
6622 
6623 	if (!plane->state->fb && !old_plane_state->fb)
6624 		return;
6625 
6626 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6627 			 __func__,
6628 			 amdgpu_crtc->crtc_id,
6629 			 plane->state->crtc_w,
6630 			 plane->state->crtc_h);
6631 
6632 	ret = get_cursor_position(plane, crtc, &position);
6633 	if (ret)
6634 		return;
6635 
6636 	if (!position.enable) {
6637 		/* turn off cursor */
6638 		if (crtc_state && crtc_state->stream) {
6639 			mutex_lock(&adev->dm.dc_lock);
6640 			dc_stream_set_cursor_position(crtc_state->stream,
6641 						      &position);
6642 			mutex_unlock(&adev->dm.dc_lock);
6643 		}
6644 		return;
6645 	}
6646 
6647 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6648 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6649 
6650 	memset(&attributes, 0, sizeof(attributes));
6651 	attributes.address.high_part = upper_32_bits(address);
6652 	attributes.address.low_part  = lower_32_bits(address);
6653 	attributes.width             = plane->state->crtc_w;
6654 	attributes.height            = plane->state->crtc_h;
6655 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6656 	attributes.rotation_angle    = 0;
6657 	attributes.attribute_flags.value = 0;
6658 
6659 	attributes.pitch = attributes.width;
6660 
6661 	if (crtc_state->stream) {
6662 		mutex_lock(&adev->dm.dc_lock);
6663 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6664 							 &attributes))
6665 			DRM_ERROR("DC failed to set cursor attributes\n");
6666 
6667 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6668 						   &position))
6669 			DRM_ERROR("DC failed to set cursor position\n");
6670 		mutex_unlock(&adev->dm.dc_lock);
6671 	}
6672 }
6673 
6674 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6675 {
6676 
6677 	assert_spin_locked(&acrtc->base.dev->event_lock);
6678 	WARN_ON(acrtc->event);
6679 
6680 	acrtc->event = acrtc->base.state->event;
6681 
6682 	/* Set the flip status */
6683 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6684 
6685 	/* Mark this event as consumed */
6686 	acrtc->base.state->event = NULL;
6687 
6688 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6689 						 acrtc->crtc_id);
6690 }
6691 
6692 static void update_freesync_state_on_stream(
6693 	struct amdgpu_display_manager *dm,
6694 	struct dm_crtc_state *new_crtc_state,
6695 	struct dc_stream_state *new_stream,
6696 	struct dc_plane_state *surface,
6697 	u32 flip_timestamp_in_us)
6698 {
6699 	struct mod_vrr_params vrr_params;
6700 	struct dc_info_packet vrr_infopacket = {0};
6701 	struct amdgpu_device *adev = dm->adev;
6702 	unsigned long flags;
6703 
6704 	if (!new_stream)
6705 		return;
6706 
6707 	/*
6708 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6709 	 * For now it's sufficient to just guard against these conditions.
6710 	 */
6711 
6712 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6713 		return;
6714 
6715 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6716 	vrr_params = new_crtc_state->vrr_params;
6717 
6718 	if (surface) {
6719 		mod_freesync_handle_preflip(
6720 			dm->freesync_module,
6721 			surface,
6722 			new_stream,
6723 			flip_timestamp_in_us,
6724 			&vrr_params);
6725 
6726 		if (adev->family < AMDGPU_FAMILY_AI &&
6727 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6728 			mod_freesync_handle_v_update(dm->freesync_module,
6729 						     new_stream, &vrr_params);
6730 
6731 			/* Need to call this before the frame ends. */
6732 			dc_stream_adjust_vmin_vmax(dm->dc,
6733 						   new_crtc_state->stream,
6734 						   &vrr_params.adjust);
6735 		}
6736 	}
6737 
6738 	mod_freesync_build_vrr_infopacket(
6739 		dm->freesync_module,
6740 		new_stream,
6741 		&vrr_params,
6742 		PACKET_TYPE_VRR,
6743 		TRANSFER_FUNC_UNKNOWN,
6744 		&vrr_infopacket);
6745 
6746 	new_crtc_state->freesync_timing_changed |=
6747 		(memcmp(&new_crtc_state->vrr_params.adjust,
6748 			&vrr_params.adjust,
6749 			sizeof(vrr_params.adjust)) != 0);
6750 
6751 	new_crtc_state->freesync_vrr_info_changed |=
6752 		(memcmp(&new_crtc_state->vrr_infopacket,
6753 			&vrr_infopacket,
6754 			sizeof(vrr_infopacket)) != 0);
6755 
6756 	new_crtc_state->vrr_params = vrr_params;
6757 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6758 
6759 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6760 	new_stream->vrr_infopacket = vrr_infopacket;
6761 
6762 	if (new_crtc_state->freesync_vrr_info_changed)
6763 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6764 			      new_crtc_state->base.crtc->base.id,
6765 			      (int)new_crtc_state->base.vrr_enabled,
6766 			      (int)vrr_params.state);
6767 
6768 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6769 }
6770 
6771 static void pre_update_freesync_state_on_stream(
6772 	struct amdgpu_display_manager *dm,
6773 	struct dm_crtc_state *new_crtc_state)
6774 {
6775 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6776 	struct mod_vrr_params vrr_params;
6777 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6778 	struct amdgpu_device *adev = dm->adev;
6779 	unsigned long flags;
6780 
6781 	if (!new_stream)
6782 		return;
6783 
6784 	/*
6785 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6786 	 * For now it's sufficient to just guard against these conditions.
6787 	 */
6788 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6789 		return;
6790 
6791 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6792 	vrr_params = new_crtc_state->vrr_params;
6793 
6794 	if (new_crtc_state->vrr_supported &&
6795 	    config.min_refresh_in_uhz &&
6796 	    config.max_refresh_in_uhz) {
6797 		config.state = new_crtc_state->base.vrr_enabled ?
6798 			VRR_STATE_ACTIVE_VARIABLE :
6799 			VRR_STATE_INACTIVE;
6800 	} else {
6801 		config.state = VRR_STATE_UNSUPPORTED;
6802 	}
6803 
6804 	mod_freesync_build_vrr_params(dm->freesync_module,
6805 				      new_stream,
6806 				      &config, &vrr_params);
6807 
6808 	new_crtc_state->freesync_timing_changed |=
6809 		(memcmp(&new_crtc_state->vrr_params.adjust,
6810 			&vrr_params.adjust,
6811 			sizeof(vrr_params.adjust)) != 0);
6812 
6813 	new_crtc_state->vrr_params = vrr_params;
6814 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6815 }
6816 
6817 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6818 					    struct dm_crtc_state *new_state)
6819 {
6820 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6821 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6822 
6823 	if (!old_vrr_active && new_vrr_active) {
6824 		/* Transition VRR inactive -> active:
6825 		 * While VRR is active, we must not disable vblank irq, as a
6826 		 * reenable after disable would compute bogus vblank/pflip
6827 		 * timestamps if it likely happened inside display front-porch.
6828 		 *
6829 		 * We also need vupdate irq for the actual core vblank handling
6830 		 * at end of vblank.
6831 		 */
6832 		dm_set_vupdate_irq(new_state->base.crtc, true);
6833 		drm_crtc_vblank_get(new_state->base.crtc);
6834 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6835 				 __func__, new_state->base.crtc->base.id);
6836 	} else if (old_vrr_active && !new_vrr_active) {
6837 		/* Transition VRR active -> inactive:
6838 		 * Allow vblank irq disable again for fixed refresh rate.
6839 		 */
6840 		dm_set_vupdate_irq(new_state->base.crtc, false);
6841 		drm_crtc_vblank_put(new_state->base.crtc);
6842 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6843 				 __func__, new_state->base.crtc->base.id);
6844 	}
6845 }
6846 
6847 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6848 {
6849 	struct drm_plane *plane;
6850 	struct drm_plane_state *old_plane_state, *new_plane_state;
6851 	int i;
6852 
6853 	/*
6854 	 * TODO: Make this per-stream so we don't issue redundant updates for
6855 	 * commits with multiple streams.
6856 	 */
6857 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6858 				       new_plane_state, i)
6859 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6860 			handle_cursor_update(plane, old_plane_state);
6861 }
6862 
6863 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6864 				    struct dc_state *dc_state,
6865 				    struct drm_device *dev,
6866 				    struct amdgpu_display_manager *dm,
6867 				    struct drm_crtc *pcrtc,
6868 				    bool wait_for_vblank)
6869 {
6870 	uint32_t i;
6871 	uint64_t timestamp_ns;
6872 	struct drm_plane *plane;
6873 	struct drm_plane_state *old_plane_state, *new_plane_state;
6874 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6875 	struct drm_crtc_state *new_pcrtc_state =
6876 			drm_atomic_get_new_crtc_state(state, pcrtc);
6877 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6878 	struct dm_crtc_state *dm_old_crtc_state =
6879 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6880 	int planes_count = 0, vpos, hpos;
6881 	long r;
6882 	unsigned long flags;
6883 	struct amdgpu_bo *abo;
6884 	uint64_t tiling_flags;
6885 	bool tmz_surface = false;
6886 	uint32_t target_vblank, last_flip_vblank;
6887 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6888 	bool pflip_present = false;
6889 	struct {
6890 		struct dc_surface_update surface_updates[MAX_SURFACES];
6891 		struct dc_plane_info plane_infos[MAX_SURFACES];
6892 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6893 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6894 		struct dc_stream_update stream_update;
6895 	} *bundle;
6896 
6897 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6898 
6899 	if (!bundle) {
6900 		dm_error("Failed to allocate update bundle\n");
6901 		goto cleanup;
6902 	}
6903 
6904 	/*
6905 	 * Disable the cursor first if we're disabling all the planes.
6906 	 * It'll remain on the screen after the planes are re-enabled
6907 	 * if we don't.
6908 	 */
6909 	if (acrtc_state->active_planes == 0)
6910 		amdgpu_dm_commit_cursors(state);
6911 
6912 	/* update planes when needed */
6913 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6914 		struct drm_crtc *crtc = new_plane_state->crtc;
6915 		struct drm_crtc_state *new_crtc_state;
6916 		struct drm_framebuffer *fb = new_plane_state->fb;
6917 		bool plane_needs_flip;
6918 		struct dc_plane_state *dc_plane;
6919 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6920 
6921 		/* Cursor plane is handled after stream updates */
6922 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6923 			continue;
6924 
6925 		if (!fb || !crtc || pcrtc != crtc)
6926 			continue;
6927 
6928 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6929 		if (!new_crtc_state->active)
6930 			continue;
6931 
6932 		dc_plane = dm_new_plane_state->dc_state;
6933 
6934 		bundle->surface_updates[planes_count].surface = dc_plane;
6935 		if (new_pcrtc_state->color_mgmt_changed) {
6936 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6937 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6938 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6939 		}
6940 
6941 		fill_dc_scaling_info(new_plane_state,
6942 				     &bundle->scaling_infos[planes_count]);
6943 
6944 		bundle->surface_updates[planes_count].scaling_info =
6945 			&bundle->scaling_infos[planes_count];
6946 
6947 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6948 
6949 		pflip_present = pflip_present || plane_needs_flip;
6950 
6951 		if (!plane_needs_flip) {
6952 			planes_count += 1;
6953 			continue;
6954 		}
6955 
6956 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6957 
6958 		/*
6959 		 * Wait for all fences on this FB. Do limited wait to avoid
6960 		 * deadlock during GPU reset when this fence will not signal
6961 		 * but we hold reservation lock for the BO.
6962 		 */
6963 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6964 							false,
6965 							msecs_to_jiffies(5000));
6966 		if (unlikely(r <= 0))
6967 			DRM_ERROR("Waiting for fences timed out!");
6968 
6969 		/*
6970 		 * TODO This might fail and hence better not used, wait
6971 		 * explicitly on fences instead
6972 		 * and in general should be called for
6973 		 * blocking commit to as per framework helpers
6974 		 */
6975 		r = amdgpu_bo_reserve(abo, true);
6976 		if (unlikely(r != 0))
6977 			DRM_ERROR("failed to reserve buffer before flip\n");
6978 
6979 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6980 
6981 		tmz_surface = amdgpu_bo_encrypted(abo);
6982 
6983 		amdgpu_bo_unreserve(abo);
6984 
6985 		fill_dc_plane_info_and_addr(
6986 			dm->adev, new_plane_state, tiling_flags,
6987 			&bundle->plane_infos[planes_count],
6988 			&bundle->flip_addrs[planes_count].address,
6989 			tmz_surface,
6990 			false);
6991 
6992 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6993 				 new_plane_state->plane->index,
6994 				 bundle->plane_infos[planes_count].dcc.enable);
6995 
6996 		bundle->surface_updates[planes_count].plane_info =
6997 			&bundle->plane_infos[planes_count];
6998 
6999 		/*
7000 		 * Only allow immediate flips for fast updates that don't
7001 		 * change FB pitch, DCC state, rotation or mirroing.
7002 		 */
7003 		bundle->flip_addrs[planes_count].flip_immediate =
7004 			crtc->state->async_flip &&
7005 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7006 
7007 		timestamp_ns = ktime_get_ns();
7008 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7009 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7010 		bundle->surface_updates[planes_count].surface = dc_plane;
7011 
7012 		if (!bundle->surface_updates[planes_count].surface) {
7013 			DRM_ERROR("No surface for CRTC: id=%d\n",
7014 					acrtc_attach->crtc_id);
7015 			continue;
7016 		}
7017 
7018 		if (plane == pcrtc->primary)
7019 			update_freesync_state_on_stream(
7020 				dm,
7021 				acrtc_state,
7022 				acrtc_state->stream,
7023 				dc_plane,
7024 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7025 
7026 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7027 				 __func__,
7028 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7029 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7030 
7031 		planes_count += 1;
7032 
7033 	}
7034 
7035 	if (pflip_present) {
7036 		if (!vrr_active) {
7037 			/* Use old throttling in non-vrr fixed refresh rate mode
7038 			 * to keep flip scheduling based on target vblank counts
7039 			 * working in a backwards compatible way, e.g., for
7040 			 * clients using the GLX_OML_sync_control extension or
7041 			 * DRI3/Present extension with defined target_msc.
7042 			 */
7043 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7044 		}
7045 		else {
7046 			/* For variable refresh rate mode only:
7047 			 * Get vblank of last completed flip to avoid > 1 vrr
7048 			 * flips per video frame by use of throttling, but allow
7049 			 * flip programming anywhere in the possibly large
7050 			 * variable vrr vblank interval for fine-grained flip
7051 			 * timing control and more opportunity to avoid stutter
7052 			 * on late submission of flips.
7053 			 */
7054 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7055 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7056 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7057 		}
7058 
7059 		target_vblank = last_flip_vblank + wait_for_vblank;
7060 
7061 		/*
7062 		 * Wait until we're out of the vertical blank period before the one
7063 		 * targeted by the flip
7064 		 */
7065 		while ((acrtc_attach->enabled &&
7066 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7067 							    0, &vpos, &hpos, NULL,
7068 							    NULL, &pcrtc->hwmode)
7069 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7070 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7071 			(int)(target_vblank -
7072 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7073 			usleep_range(1000, 1100);
7074 		}
7075 
7076 		/**
7077 		 * Prepare the flip event for the pageflip interrupt to handle.
7078 		 *
7079 		 * This only works in the case where we've already turned on the
7080 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7081 		 * from 0 -> n planes we have to skip a hardware generated event
7082 		 * and rely on sending it from software.
7083 		 */
7084 		if (acrtc_attach->base.state->event &&
7085 		    acrtc_state->active_planes > 0) {
7086 			drm_crtc_vblank_get(pcrtc);
7087 
7088 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7089 
7090 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7091 			prepare_flip_isr(acrtc_attach);
7092 
7093 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7094 		}
7095 
7096 		if (acrtc_state->stream) {
7097 			if (acrtc_state->freesync_vrr_info_changed)
7098 				bundle->stream_update.vrr_infopacket =
7099 					&acrtc_state->stream->vrr_infopacket;
7100 		}
7101 	}
7102 
7103 	/* Update the planes if changed or disable if we don't have any. */
7104 	if ((planes_count || acrtc_state->active_planes == 0) &&
7105 		acrtc_state->stream) {
7106 		bundle->stream_update.stream = acrtc_state->stream;
7107 		if (new_pcrtc_state->mode_changed) {
7108 			bundle->stream_update.src = acrtc_state->stream->src;
7109 			bundle->stream_update.dst = acrtc_state->stream->dst;
7110 		}
7111 
7112 		if (new_pcrtc_state->color_mgmt_changed) {
7113 			/*
7114 			 * TODO: This isn't fully correct since we've actually
7115 			 * already modified the stream in place.
7116 			 */
7117 			bundle->stream_update.gamut_remap =
7118 				&acrtc_state->stream->gamut_remap_matrix;
7119 			bundle->stream_update.output_csc_transform =
7120 				&acrtc_state->stream->csc_color_matrix;
7121 			bundle->stream_update.out_transfer_func =
7122 				acrtc_state->stream->out_transfer_func;
7123 		}
7124 
7125 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7126 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7127 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7128 
7129 		/*
7130 		 * If FreeSync state on the stream has changed then we need to
7131 		 * re-adjust the min/max bounds now that DC doesn't handle this
7132 		 * as part of commit.
7133 		 */
7134 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7135 		    amdgpu_dm_vrr_active(acrtc_state)) {
7136 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7137 			dc_stream_adjust_vmin_vmax(
7138 				dm->dc, acrtc_state->stream,
7139 				&acrtc_state->vrr_params.adjust);
7140 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7141 		}
7142 		mutex_lock(&dm->dc_lock);
7143 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7144 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7145 			amdgpu_dm_psr_disable(acrtc_state->stream);
7146 
7147 		dc_commit_updates_for_stream(dm->dc,
7148 						     bundle->surface_updates,
7149 						     planes_count,
7150 						     acrtc_state->stream,
7151 						     &bundle->stream_update,
7152 						     dc_state);
7153 
7154 		/**
7155 		 * Enable or disable the interrupts on the backend.
7156 		 *
7157 		 * Most pipes are put into power gating when unused.
7158 		 *
7159 		 * When power gating is enabled on a pipe we lose the
7160 		 * interrupt enablement state when power gating is disabled.
7161 		 *
7162 		 * So we need to update the IRQ control state in hardware
7163 		 * whenever the pipe turns on (since it could be previously
7164 		 * power gated) or off (since some pipes can't be power gated
7165 		 * on some ASICs).
7166 		 */
7167 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7168 			dm_update_pflip_irq_state(
7169 				(struct amdgpu_device *)dev->dev_private,
7170 				acrtc_attach);
7171 
7172 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7173 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7174 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7175 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7176 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7177 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7178 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7179 			amdgpu_dm_psr_enable(acrtc_state->stream);
7180 		}
7181 
7182 		mutex_unlock(&dm->dc_lock);
7183 	}
7184 
7185 	/*
7186 	 * Update cursor state *after* programming all the planes.
7187 	 * This avoids redundant programming in the case where we're going
7188 	 * to be disabling a single plane - those pipes are being disabled.
7189 	 */
7190 	if (acrtc_state->active_planes)
7191 		amdgpu_dm_commit_cursors(state);
7192 
7193 cleanup:
7194 	kfree(bundle);
7195 }
7196 
7197 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7198 				   struct drm_atomic_state *state)
7199 {
7200 	struct amdgpu_device *adev = dev->dev_private;
7201 	struct amdgpu_dm_connector *aconnector;
7202 	struct drm_connector *connector;
7203 	struct drm_connector_state *old_con_state, *new_con_state;
7204 	struct drm_crtc_state *new_crtc_state;
7205 	struct dm_crtc_state *new_dm_crtc_state;
7206 	const struct dc_stream_status *status;
7207 	int i, inst;
7208 
7209 	/* Notify device removals. */
7210 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7211 		if (old_con_state->crtc != new_con_state->crtc) {
7212 			/* CRTC changes require notification. */
7213 			goto notify;
7214 		}
7215 
7216 		if (!new_con_state->crtc)
7217 			continue;
7218 
7219 		new_crtc_state = drm_atomic_get_new_crtc_state(
7220 			state, new_con_state->crtc);
7221 
7222 		if (!new_crtc_state)
7223 			continue;
7224 
7225 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7226 			continue;
7227 
7228 	notify:
7229 		aconnector = to_amdgpu_dm_connector(connector);
7230 
7231 		mutex_lock(&adev->dm.audio_lock);
7232 		inst = aconnector->audio_inst;
7233 		aconnector->audio_inst = -1;
7234 		mutex_unlock(&adev->dm.audio_lock);
7235 
7236 		amdgpu_dm_audio_eld_notify(adev, inst);
7237 	}
7238 
7239 	/* Notify audio device additions. */
7240 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7241 		if (!new_con_state->crtc)
7242 			continue;
7243 
7244 		new_crtc_state = drm_atomic_get_new_crtc_state(
7245 			state, new_con_state->crtc);
7246 
7247 		if (!new_crtc_state)
7248 			continue;
7249 
7250 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7251 			continue;
7252 
7253 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7254 		if (!new_dm_crtc_state->stream)
7255 			continue;
7256 
7257 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7258 		if (!status)
7259 			continue;
7260 
7261 		aconnector = to_amdgpu_dm_connector(connector);
7262 
7263 		mutex_lock(&adev->dm.audio_lock);
7264 		inst = status->audio_inst;
7265 		aconnector->audio_inst = inst;
7266 		mutex_unlock(&adev->dm.audio_lock);
7267 
7268 		amdgpu_dm_audio_eld_notify(adev, inst);
7269 	}
7270 }
7271 
7272 /*
7273  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7274  * @crtc_state: the DRM CRTC state
7275  * @stream_state: the DC stream state.
7276  *
7277  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7278  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7279  */
7280 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7281 						struct dc_stream_state *stream_state)
7282 {
7283 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7284 }
7285 
7286 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7287 				   struct drm_atomic_state *state,
7288 				   bool nonblock)
7289 {
7290 	struct drm_crtc *crtc;
7291 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7292 	struct amdgpu_device *adev = dev->dev_private;
7293 	int i;
7294 
7295 	/*
7296 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7297 	 * a modeset, being disabled, or have no active planes.
7298 	 *
7299 	 * It's done in atomic commit rather than commit tail for now since
7300 	 * some of these interrupt handlers access the current CRTC state and
7301 	 * potentially the stream pointer itself.
7302 	 *
7303 	 * Since the atomic state is swapped within atomic commit and not within
7304 	 * commit tail this would leave to new state (that hasn't been committed yet)
7305 	 * being accesssed from within the handlers.
7306 	 *
7307 	 * TODO: Fix this so we can do this in commit tail and not have to block
7308 	 * in atomic check.
7309 	 */
7310 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7311 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7312 
7313 		if (old_crtc_state->active &&
7314 		    (!new_crtc_state->active ||
7315 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7316 			manage_dm_interrupts(adev, acrtc, false);
7317 	}
7318 	/*
7319 	 * Add check here for SoC's that support hardware cursor plane, to
7320 	 * unset legacy_cursor_update
7321 	 */
7322 
7323 	return drm_atomic_helper_commit(dev, state, nonblock);
7324 
7325 	/*TODO Handle EINTR, reenable IRQ*/
7326 }
7327 
7328 /**
7329  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7330  * @state: The atomic state to commit
7331  *
7332  * This will tell DC to commit the constructed DC state from atomic_check,
7333  * programming the hardware. Any failures here implies a hardware failure, since
7334  * atomic check should have filtered anything non-kosher.
7335  */
7336 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7337 {
7338 	struct drm_device *dev = state->dev;
7339 	struct amdgpu_device *adev = dev->dev_private;
7340 	struct amdgpu_display_manager *dm = &adev->dm;
7341 	struct dm_atomic_state *dm_state;
7342 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7343 	uint32_t i, j;
7344 	struct drm_crtc *crtc;
7345 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7346 	unsigned long flags;
7347 	bool wait_for_vblank = true;
7348 	struct drm_connector *connector;
7349 	struct drm_connector_state *old_con_state, *new_con_state;
7350 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7351 	int crtc_disable_count = 0;
7352 
7353 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7354 
7355 	dm_state = dm_atomic_get_new_state(state);
7356 	if (dm_state && dm_state->context) {
7357 		dc_state = dm_state->context;
7358 	} else {
7359 		/* No state changes, retain current state. */
7360 		dc_state_temp = dc_create_state(dm->dc);
7361 		ASSERT(dc_state_temp);
7362 		dc_state = dc_state_temp;
7363 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7364 	}
7365 
7366 	/* update changed items */
7367 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7368 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7369 
7370 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7371 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7372 
7373 		DRM_DEBUG_DRIVER(
7374 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7375 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7376 			"connectors_changed:%d\n",
7377 			acrtc->crtc_id,
7378 			new_crtc_state->enable,
7379 			new_crtc_state->active,
7380 			new_crtc_state->planes_changed,
7381 			new_crtc_state->mode_changed,
7382 			new_crtc_state->active_changed,
7383 			new_crtc_state->connectors_changed);
7384 
7385 		/* Copy all transient state flags into dc state */
7386 		if (dm_new_crtc_state->stream) {
7387 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7388 							    dm_new_crtc_state->stream);
7389 		}
7390 
7391 		/* handles headless hotplug case, updating new_state and
7392 		 * aconnector as needed
7393 		 */
7394 
7395 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7396 
7397 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7398 
7399 			if (!dm_new_crtc_state->stream) {
7400 				/*
7401 				 * this could happen because of issues with
7402 				 * userspace notifications delivery.
7403 				 * In this case userspace tries to set mode on
7404 				 * display which is disconnected in fact.
7405 				 * dc_sink is NULL in this case on aconnector.
7406 				 * We expect reset mode will come soon.
7407 				 *
7408 				 * This can also happen when unplug is done
7409 				 * during resume sequence ended
7410 				 *
7411 				 * In this case, we want to pretend we still
7412 				 * have a sink to keep the pipe running so that
7413 				 * hw state is consistent with the sw state
7414 				 */
7415 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7416 						__func__, acrtc->base.base.id);
7417 				continue;
7418 			}
7419 
7420 			if (dm_old_crtc_state->stream)
7421 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7422 
7423 			pm_runtime_get_noresume(dev->dev);
7424 
7425 			acrtc->enabled = true;
7426 			acrtc->hw_mode = new_crtc_state->mode;
7427 			crtc->hwmode = new_crtc_state->mode;
7428 		} else if (modereset_required(new_crtc_state)) {
7429 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7430 			/* i.e. reset mode */
7431 			if (dm_old_crtc_state->stream) {
7432 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7433 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7434 
7435 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7436 			}
7437 		}
7438 	} /* for_each_crtc_in_state() */
7439 
7440 	if (dc_state) {
7441 		dm_enable_per_frame_crtc_master_sync(dc_state);
7442 		mutex_lock(&dm->dc_lock);
7443 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7444 		mutex_unlock(&dm->dc_lock);
7445 	}
7446 
7447 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7448 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7449 
7450 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7451 
7452 		if (dm_new_crtc_state->stream != NULL) {
7453 			const struct dc_stream_status *status =
7454 					dc_stream_get_status(dm_new_crtc_state->stream);
7455 
7456 			if (!status)
7457 				status = dc_stream_get_status_from_state(dc_state,
7458 									 dm_new_crtc_state->stream);
7459 
7460 			if (!status)
7461 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7462 			else
7463 				acrtc->otg_inst = status->primary_otg_inst;
7464 		}
7465 	}
7466 #ifdef CONFIG_DRM_AMD_DC_HDCP
7467 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7468 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7469 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7470 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7471 
7472 		new_crtc_state = NULL;
7473 
7474 		if (acrtc)
7475 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7476 
7477 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7478 
7479 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7480 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7481 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7482 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7483 			continue;
7484 		}
7485 
7486 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7487 			hdcp_update_display(
7488 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7489 				new_con_state->hdcp_content_type,
7490 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7491 													 : false);
7492 	}
7493 #endif
7494 
7495 	/* Handle connector state changes */
7496 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7497 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7498 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7499 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7500 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7501 		struct dc_stream_update stream_update;
7502 		struct dc_info_packet hdr_packet;
7503 		struct dc_stream_status *status = NULL;
7504 		bool abm_changed, hdr_changed, scaling_changed;
7505 
7506 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7507 		memset(&stream_update, 0, sizeof(stream_update));
7508 
7509 		if (acrtc) {
7510 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7511 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7512 		}
7513 
7514 		/* Skip any modesets/resets */
7515 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7516 			continue;
7517 
7518 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7519 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7520 
7521 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7522 							     dm_old_con_state);
7523 
7524 		abm_changed = dm_new_crtc_state->abm_level !=
7525 			      dm_old_crtc_state->abm_level;
7526 
7527 		hdr_changed =
7528 			is_hdr_metadata_different(old_con_state, new_con_state);
7529 
7530 		if (!scaling_changed && !abm_changed && !hdr_changed)
7531 			continue;
7532 
7533 		stream_update.stream = dm_new_crtc_state->stream;
7534 		if (scaling_changed) {
7535 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7536 					dm_new_con_state, dm_new_crtc_state->stream);
7537 
7538 			stream_update.src = dm_new_crtc_state->stream->src;
7539 			stream_update.dst = dm_new_crtc_state->stream->dst;
7540 		}
7541 
7542 		if (abm_changed) {
7543 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7544 
7545 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7546 		}
7547 
7548 		if (hdr_changed) {
7549 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7550 			stream_update.hdr_static_metadata = &hdr_packet;
7551 		}
7552 
7553 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7554 		WARN_ON(!status);
7555 		WARN_ON(!status->plane_count);
7556 
7557 		/*
7558 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7559 		 * Here we create an empty update on each plane.
7560 		 * To fix this, DC should permit updating only stream properties.
7561 		 */
7562 		for (j = 0; j < status->plane_count; j++)
7563 			dummy_updates[j].surface = status->plane_states[0];
7564 
7565 
7566 		mutex_lock(&dm->dc_lock);
7567 		dc_commit_updates_for_stream(dm->dc,
7568 						     dummy_updates,
7569 						     status->plane_count,
7570 						     dm_new_crtc_state->stream,
7571 						     &stream_update,
7572 						     dc_state);
7573 		mutex_unlock(&dm->dc_lock);
7574 	}
7575 
7576 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7577 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7578 				      new_crtc_state, i) {
7579 		if (old_crtc_state->active && !new_crtc_state->active)
7580 			crtc_disable_count++;
7581 
7582 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7583 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7584 
7585 		/* Update freesync active state. */
7586 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7587 
7588 		/* Handle vrr on->off / off->on transitions */
7589 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7590 						dm_new_crtc_state);
7591 	}
7592 
7593 	/**
7594 	 * Enable interrupts for CRTCs that are newly enabled or went through
7595 	 * a modeset. It was intentionally deferred until after the front end
7596 	 * state was modified to wait until the OTG was on and so the IRQ
7597 	 * handlers didn't access stale or invalid state.
7598 	 */
7599 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7600 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7601 
7602 		if (new_crtc_state->active &&
7603 		    (!old_crtc_state->active ||
7604 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7605 			manage_dm_interrupts(adev, acrtc, true);
7606 #ifdef CONFIG_DEBUG_FS
7607 			/**
7608 			 * Frontend may have changed so reapply the CRC capture
7609 			 * settings for the stream.
7610 			 */
7611 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7612 
7613 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7614 				amdgpu_dm_crtc_configure_crc_source(
7615 					crtc, dm_new_crtc_state,
7616 					dm_new_crtc_state->crc_src);
7617 			}
7618 #endif
7619 		}
7620 	}
7621 
7622 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7623 		if (new_crtc_state->async_flip)
7624 			wait_for_vblank = false;
7625 
7626 	/* update planes when needed per crtc*/
7627 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7628 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7629 
7630 		if (dm_new_crtc_state->stream)
7631 			amdgpu_dm_commit_planes(state, dc_state, dev,
7632 						dm, crtc, wait_for_vblank);
7633 	}
7634 
7635 	/* Update audio instances for each connector. */
7636 	amdgpu_dm_commit_audio(dev, state);
7637 
7638 	/*
7639 	 * send vblank event on all events not handled in flip and
7640 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7641 	 */
7642 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7643 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7644 
7645 		if (new_crtc_state->event)
7646 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7647 
7648 		new_crtc_state->event = NULL;
7649 	}
7650 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7651 
7652 	/* Signal HW programming completion */
7653 	drm_atomic_helper_commit_hw_done(state);
7654 
7655 	if (wait_for_vblank)
7656 		drm_atomic_helper_wait_for_flip_done(dev, state);
7657 
7658 	drm_atomic_helper_cleanup_planes(dev, state);
7659 
7660 	/*
7661 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7662 	 * so we can put the GPU into runtime suspend if we're not driving any
7663 	 * displays anymore
7664 	 */
7665 	for (i = 0; i < crtc_disable_count; i++)
7666 		pm_runtime_put_autosuspend(dev->dev);
7667 	pm_runtime_mark_last_busy(dev->dev);
7668 
7669 	if (dc_state_temp)
7670 		dc_release_state(dc_state_temp);
7671 }
7672 
7673 
7674 static int dm_force_atomic_commit(struct drm_connector *connector)
7675 {
7676 	int ret = 0;
7677 	struct drm_device *ddev = connector->dev;
7678 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7679 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7680 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7681 	struct drm_connector_state *conn_state;
7682 	struct drm_crtc_state *crtc_state;
7683 	struct drm_plane_state *plane_state;
7684 
7685 	if (!state)
7686 		return -ENOMEM;
7687 
7688 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7689 
7690 	/* Construct an atomic state to restore previous display setting */
7691 
7692 	/*
7693 	 * Attach connectors to drm_atomic_state
7694 	 */
7695 	conn_state = drm_atomic_get_connector_state(state, connector);
7696 
7697 	ret = PTR_ERR_OR_ZERO(conn_state);
7698 	if (ret)
7699 		goto err;
7700 
7701 	/* Attach crtc to drm_atomic_state*/
7702 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7703 
7704 	ret = PTR_ERR_OR_ZERO(crtc_state);
7705 	if (ret)
7706 		goto err;
7707 
7708 	/* force a restore */
7709 	crtc_state->mode_changed = true;
7710 
7711 	/* Attach plane to drm_atomic_state */
7712 	plane_state = drm_atomic_get_plane_state(state, plane);
7713 
7714 	ret = PTR_ERR_OR_ZERO(plane_state);
7715 	if (ret)
7716 		goto err;
7717 
7718 
7719 	/* Call commit internally with the state we just constructed */
7720 	ret = drm_atomic_commit(state);
7721 	if (!ret)
7722 		return 0;
7723 
7724 err:
7725 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7726 	drm_atomic_state_put(state);
7727 
7728 	return ret;
7729 }
7730 
7731 /*
7732  * This function handles all cases when set mode does not come upon hotplug.
7733  * This includes when a display is unplugged then plugged back into the
7734  * same port and when running without usermode desktop manager supprot
7735  */
7736 void dm_restore_drm_connector_state(struct drm_device *dev,
7737 				    struct drm_connector *connector)
7738 {
7739 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7740 	struct amdgpu_crtc *disconnected_acrtc;
7741 	struct dm_crtc_state *acrtc_state;
7742 
7743 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7744 		return;
7745 
7746 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7747 	if (!disconnected_acrtc)
7748 		return;
7749 
7750 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7751 	if (!acrtc_state->stream)
7752 		return;
7753 
7754 	/*
7755 	 * If the previous sink is not released and different from the current,
7756 	 * we deduce we are in a state where we can not rely on usermode call
7757 	 * to turn on the display, so we do it here
7758 	 */
7759 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7760 		dm_force_atomic_commit(&aconnector->base);
7761 }
7762 
7763 /*
7764  * Grabs all modesetting locks to serialize against any blocking commits,
7765  * Waits for completion of all non blocking commits.
7766  */
7767 static int do_aquire_global_lock(struct drm_device *dev,
7768 				 struct drm_atomic_state *state)
7769 {
7770 	struct drm_crtc *crtc;
7771 	struct drm_crtc_commit *commit;
7772 	long ret;
7773 
7774 	/*
7775 	 * Adding all modeset locks to aquire_ctx will
7776 	 * ensure that when the framework release it the
7777 	 * extra locks we are locking here will get released to
7778 	 */
7779 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7780 	if (ret)
7781 		return ret;
7782 
7783 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7784 		spin_lock(&crtc->commit_lock);
7785 		commit = list_first_entry_or_null(&crtc->commit_list,
7786 				struct drm_crtc_commit, commit_entry);
7787 		if (commit)
7788 			drm_crtc_commit_get(commit);
7789 		spin_unlock(&crtc->commit_lock);
7790 
7791 		if (!commit)
7792 			continue;
7793 
7794 		/*
7795 		 * Make sure all pending HW programming completed and
7796 		 * page flips done
7797 		 */
7798 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7799 
7800 		if (ret > 0)
7801 			ret = wait_for_completion_interruptible_timeout(
7802 					&commit->flip_done, 10*HZ);
7803 
7804 		if (ret == 0)
7805 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7806 				  "timed out\n", crtc->base.id, crtc->name);
7807 
7808 		drm_crtc_commit_put(commit);
7809 	}
7810 
7811 	return ret < 0 ? ret : 0;
7812 }
7813 
7814 static void get_freesync_config_for_crtc(
7815 	struct dm_crtc_state *new_crtc_state,
7816 	struct dm_connector_state *new_con_state)
7817 {
7818 	struct mod_freesync_config config = {0};
7819 	struct amdgpu_dm_connector *aconnector =
7820 			to_amdgpu_dm_connector(new_con_state->base.connector);
7821 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7822 	int vrefresh = drm_mode_vrefresh(mode);
7823 
7824 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7825 					vrefresh >= aconnector->min_vfreq &&
7826 					vrefresh <= aconnector->max_vfreq;
7827 
7828 	if (new_crtc_state->vrr_supported) {
7829 		new_crtc_state->stream->ignore_msa_timing_param = true;
7830 		config.state = new_crtc_state->base.vrr_enabled ?
7831 				VRR_STATE_ACTIVE_VARIABLE :
7832 				VRR_STATE_INACTIVE;
7833 		config.min_refresh_in_uhz =
7834 				aconnector->min_vfreq * 1000000;
7835 		config.max_refresh_in_uhz =
7836 				aconnector->max_vfreq * 1000000;
7837 		config.vsif_supported = true;
7838 		config.btr = true;
7839 	}
7840 
7841 	new_crtc_state->freesync_config = config;
7842 }
7843 
7844 static void reset_freesync_config_for_crtc(
7845 	struct dm_crtc_state *new_crtc_state)
7846 {
7847 	new_crtc_state->vrr_supported = false;
7848 
7849 	memset(&new_crtc_state->vrr_params, 0,
7850 	       sizeof(new_crtc_state->vrr_params));
7851 	memset(&new_crtc_state->vrr_infopacket, 0,
7852 	       sizeof(new_crtc_state->vrr_infopacket));
7853 }
7854 
7855 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7856 				struct drm_atomic_state *state,
7857 				struct drm_crtc *crtc,
7858 				struct drm_crtc_state *old_crtc_state,
7859 				struct drm_crtc_state *new_crtc_state,
7860 				bool enable,
7861 				bool *lock_and_validation_needed)
7862 {
7863 	struct dm_atomic_state *dm_state = NULL;
7864 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7865 	struct dc_stream_state *new_stream;
7866 	int ret = 0;
7867 
7868 	/*
7869 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7870 	 * update changed items
7871 	 */
7872 	struct amdgpu_crtc *acrtc = NULL;
7873 	struct amdgpu_dm_connector *aconnector = NULL;
7874 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7875 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7876 
7877 	new_stream = NULL;
7878 
7879 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7880 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7881 	acrtc = to_amdgpu_crtc(crtc);
7882 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7883 
7884 	/* TODO This hack should go away */
7885 	if (aconnector && enable) {
7886 		/* Make sure fake sink is created in plug-in scenario */
7887 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7888 							    &aconnector->base);
7889 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7890 							    &aconnector->base);
7891 
7892 		if (IS_ERR(drm_new_conn_state)) {
7893 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7894 			goto fail;
7895 		}
7896 
7897 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7898 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7899 
7900 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7901 			goto skip_modeset;
7902 
7903 		new_stream = create_validate_stream_for_sink(aconnector,
7904 							     &new_crtc_state->mode,
7905 							     dm_new_conn_state,
7906 							     dm_old_crtc_state->stream);
7907 
7908 		/*
7909 		 * we can have no stream on ACTION_SET if a display
7910 		 * was disconnected during S3, in this case it is not an
7911 		 * error, the OS will be updated after detection, and
7912 		 * will do the right thing on next atomic commit
7913 		 */
7914 
7915 		if (!new_stream) {
7916 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7917 					__func__, acrtc->base.base.id);
7918 			ret = -ENOMEM;
7919 			goto fail;
7920 		}
7921 
7922 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7923 
7924 		ret = fill_hdr_info_packet(drm_new_conn_state,
7925 					   &new_stream->hdr_static_metadata);
7926 		if (ret)
7927 			goto fail;
7928 
7929 		/*
7930 		 * If we already removed the old stream from the context
7931 		 * (and set the new stream to NULL) then we can't reuse
7932 		 * the old stream even if the stream and scaling are unchanged.
7933 		 * We'll hit the BUG_ON and black screen.
7934 		 *
7935 		 * TODO: Refactor this function to allow this check to work
7936 		 * in all conditions.
7937 		 */
7938 		if (dm_new_crtc_state->stream &&
7939 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7940 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7941 			new_crtc_state->mode_changed = false;
7942 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7943 					 new_crtc_state->mode_changed);
7944 		}
7945 	}
7946 
7947 	/* mode_changed flag may get updated above, need to check again */
7948 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7949 		goto skip_modeset;
7950 
7951 	DRM_DEBUG_DRIVER(
7952 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7953 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7954 		"connectors_changed:%d\n",
7955 		acrtc->crtc_id,
7956 		new_crtc_state->enable,
7957 		new_crtc_state->active,
7958 		new_crtc_state->planes_changed,
7959 		new_crtc_state->mode_changed,
7960 		new_crtc_state->active_changed,
7961 		new_crtc_state->connectors_changed);
7962 
7963 	/* Remove stream for any changed/disabled CRTC */
7964 	if (!enable) {
7965 
7966 		if (!dm_old_crtc_state->stream)
7967 			goto skip_modeset;
7968 
7969 		ret = dm_atomic_get_state(state, &dm_state);
7970 		if (ret)
7971 			goto fail;
7972 
7973 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7974 				crtc->base.id);
7975 
7976 		/* i.e. reset mode */
7977 		if (dc_remove_stream_from_ctx(
7978 				dm->dc,
7979 				dm_state->context,
7980 				dm_old_crtc_state->stream) != DC_OK) {
7981 			ret = -EINVAL;
7982 			goto fail;
7983 		}
7984 
7985 		dc_stream_release(dm_old_crtc_state->stream);
7986 		dm_new_crtc_state->stream = NULL;
7987 
7988 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7989 
7990 		*lock_and_validation_needed = true;
7991 
7992 	} else {/* Add stream for any updated/enabled CRTC */
7993 		/*
7994 		 * Quick fix to prevent NULL pointer on new_stream when
7995 		 * added MST connectors not found in existing crtc_state in the chained mode
7996 		 * TODO: need to dig out the root cause of that
7997 		 */
7998 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7999 			goto skip_modeset;
8000 
8001 		if (modereset_required(new_crtc_state))
8002 			goto skip_modeset;
8003 
8004 		if (modeset_required(new_crtc_state, new_stream,
8005 				     dm_old_crtc_state->stream)) {
8006 
8007 			WARN_ON(dm_new_crtc_state->stream);
8008 
8009 			ret = dm_atomic_get_state(state, &dm_state);
8010 			if (ret)
8011 				goto fail;
8012 
8013 			dm_new_crtc_state->stream = new_stream;
8014 
8015 			dc_stream_retain(new_stream);
8016 
8017 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8018 						crtc->base.id);
8019 
8020 			if (dc_add_stream_to_ctx(
8021 					dm->dc,
8022 					dm_state->context,
8023 					dm_new_crtc_state->stream) != DC_OK) {
8024 				ret = -EINVAL;
8025 				goto fail;
8026 			}
8027 
8028 			*lock_and_validation_needed = true;
8029 		}
8030 	}
8031 
8032 skip_modeset:
8033 	/* Release extra reference */
8034 	if (new_stream)
8035 		 dc_stream_release(new_stream);
8036 
8037 	/*
8038 	 * We want to do dc stream updates that do not require a
8039 	 * full modeset below.
8040 	 */
8041 	if (!(enable && aconnector && new_crtc_state->enable &&
8042 	      new_crtc_state->active))
8043 		return 0;
8044 	/*
8045 	 * Given above conditions, the dc state cannot be NULL because:
8046 	 * 1. We're in the process of enabling CRTCs (just been added
8047 	 *    to the dc context, or already is on the context)
8048 	 * 2. Has a valid connector attached, and
8049 	 * 3. Is currently active and enabled.
8050 	 * => The dc stream state currently exists.
8051 	 */
8052 	BUG_ON(dm_new_crtc_state->stream == NULL);
8053 
8054 	/* Scaling or underscan settings */
8055 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8056 		update_stream_scaling_settings(
8057 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8058 
8059 	/* ABM settings */
8060 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8061 
8062 	/*
8063 	 * Color management settings. We also update color properties
8064 	 * when a modeset is needed, to ensure it gets reprogrammed.
8065 	 */
8066 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8067 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8068 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8069 		if (ret)
8070 			goto fail;
8071 	}
8072 
8073 	/* Update Freesync settings. */
8074 	get_freesync_config_for_crtc(dm_new_crtc_state,
8075 				     dm_new_conn_state);
8076 
8077 	return ret;
8078 
8079 fail:
8080 	if (new_stream)
8081 		dc_stream_release(new_stream);
8082 	return ret;
8083 }
8084 
8085 static bool should_reset_plane(struct drm_atomic_state *state,
8086 			       struct drm_plane *plane,
8087 			       struct drm_plane_state *old_plane_state,
8088 			       struct drm_plane_state *new_plane_state)
8089 {
8090 	struct drm_plane *other;
8091 	struct drm_plane_state *old_other_state, *new_other_state;
8092 	struct drm_crtc_state *new_crtc_state;
8093 	int i;
8094 
8095 	/*
8096 	 * TODO: Remove this hack once the checks below are sufficient
8097 	 * enough to determine when we need to reset all the planes on
8098 	 * the stream.
8099 	 */
8100 	if (state->allow_modeset)
8101 		return true;
8102 
8103 	/* Exit early if we know that we're adding or removing the plane. */
8104 	if (old_plane_state->crtc != new_plane_state->crtc)
8105 		return true;
8106 
8107 	/* old crtc == new_crtc == NULL, plane not in context. */
8108 	if (!new_plane_state->crtc)
8109 		return false;
8110 
8111 	new_crtc_state =
8112 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8113 
8114 	if (!new_crtc_state)
8115 		return true;
8116 
8117 	/* CRTC Degamma changes currently require us to recreate planes. */
8118 	if (new_crtc_state->color_mgmt_changed)
8119 		return true;
8120 
8121 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8122 		return true;
8123 
8124 	/*
8125 	 * If there are any new primary or overlay planes being added or
8126 	 * removed then the z-order can potentially change. To ensure
8127 	 * correct z-order and pipe acquisition the current DC architecture
8128 	 * requires us to remove and recreate all existing planes.
8129 	 *
8130 	 * TODO: Come up with a more elegant solution for this.
8131 	 */
8132 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8133 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8134 			continue;
8135 
8136 		if (old_other_state->crtc != new_plane_state->crtc &&
8137 		    new_other_state->crtc != new_plane_state->crtc)
8138 			continue;
8139 
8140 		if (old_other_state->crtc != new_other_state->crtc)
8141 			return true;
8142 
8143 		/* TODO: Remove this once we can handle fast format changes. */
8144 		if (old_other_state->fb && new_other_state->fb &&
8145 		    old_other_state->fb->format != new_other_state->fb->format)
8146 			return true;
8147 	}
8148 
8149 	return false;
8150 }
8151 
8152 static int dm_update_plane_state(struct dc *dc,
8153 				 struct drm_atomic_state *state,
8154 				 struct drm_plane *plane,
8155 				 struct drm_plane_state *old_plane_state,
8156 				 struct drm_plane_state *new_plane_state,
8157 				 bool enable,
8158 				 bool *lock_and_validation_needed)
8159 {
8160 
8161 	struct dm_atomic_state *dm_state = NULL;
8162 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8163 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8164 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8165 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8166 	struct amdgpu_crtc *new_acrtc;
8167 	bool needs_reset;
8168 	int ret = 0;
8169 
8170 
8171 	new_plane_crtc = new_plane_state->crtc;
8172 	old_plane_crtc = old_plane_state->crtc;
8173 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8174 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8175 
8176 	/*TODO Implement better atomic check for cursor plane */
8177 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8178 		if (!enable || !new_plane_crtc ||
8179 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8180 			return 0;
8181 
8182 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8183 
8184 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8185 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8186 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8187 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8188 			return -EINVAL;
8189 		}
8190 
8191 		return 0;
8192 	}
8193 
8194 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8195 					 new_plane_state);
8196 
8197 	/* Remove any changed/removed planes */
8198 	if (!enable) {
8199 		if (!needs_reset)
8200 			return 0;
8201 
8202 		if (!old_plane_crtc)
8203 			return 0;
8204 
8205 		old_crtc_state = drm_atomic_get_old_crtc_state(
8206 				state, old_plane_crtc);
8207 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8208 
8209 		if (!dm_old_crtc_state->stream)
8210 			return 0;
8211 
8212 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8213 				plane->base.id, old_plane_crtc->base.id);
8214 
8215 		ret = dm_atomic_get_state(state, &dm_state);
8216 		if (ret)
8217 			return ret;
8218 
8219 		if (!dc_remove_plane_from_context(
8220 				dc,
8221 				dm_old_crtc_state->stream,
8222 				dm_old_plane_state->dc_state,
8223 				dm_state->context)) {
8224 
8225 			ret = EINVAL;
8226 			return ret;
8227 		}
8228 
8229 
8230 		dc_plane_state_release(dm_old_plane_state->dc_state);
8231 		dm_new_plane_state->dc_state = NULL;
8232 
8233 		*lock_and_validation_needed = true;
8234 
8235 	} else { /* Add new planes */
8236 		struct dc_plane_state *dc_new_plane_state;
8237 
8238 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8239 			return 0;
8240 
8241 		if (!new_plane_crtc)
8242 			return 0;
8243 
8244 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8245 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8246 
8247 		if (!dm_new_crtc_state->stream)
8248 			return 0;
8249 
8250 		if (!needs_reset)
8251 			return 0;
8252 
8253 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8254 		if (ret)
8255 			return ret;
8256 
8257 		WARN_ON(dm_new_plane_state->dc_state);
8258 
8259 		dc_new_plane_state = dc_create_plane_state(dc);
8260 		if (!dc_new_plane_state)
8261 			return -ENOMEM;
8262 
8263 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8264 				plane->base.id, new_plane_crtc->base.id);
8265 
8266 		ret = fill_dc_plane_attributes(
8267 			new_plane_crtc->dev->dev_private,
8268 			dc_new_plane_state,
8269 			new_plane_state,
8270 			new_crtc_state);
8271 		if (ret) {
8272 			dc_plane_state_release(dc_new_plane_state);
8273 			return ret;
8274 		}
8275 
8276 		ret = dm_atomic_get_state(state, &dm_state);
8277 		if (ret) {
8278 			dc_plane_state_release(dc_new_plane_state);
8279 			return ret;
8280 		}
8281 
8282 		/*
8283 		 * Any atomic check errors that occur after this will
8284 		 * not need a release. The plane state will be attached
8285 		 * to the stream, and therefore part of the atomic
8286 		 * state. It'll be released when the atomic state is
8287 		 * cleaned.
8288 		 */
8289 		if (!dc_add_plane_to_context(
8290 				dc,
8291 				dm_new_crtc_state->stream,
8292 				dc_new_plane_state,
8293 				dm_state->context)) {
8294 
8295 			dc_plane_state_release(dc_new_plane_state);
8296 			return -EINVAL;
8297 		}
8298 
8299 		dm_new_plane_state->dc_state = dc_new_plane_state;
8300 
8301 		/* Tell DC to do a full surface update every time there
8302 		 * is a plane change. Inefficient, but works for now.
8303 		 */
8304 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8305 
8306 		*lock_and_validation_needed = true;
8307 	}
8308 
8309 
8310 	return ret;
8311 }
8312 
8313 static int
8314 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8315 				    struct drm_atomic_state *state,
8316 				    enum surface_update_type *out_type)
8317 {
8318 	struct dc *dc = dm->dc;
8319 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8320 	int i, j, num_plane, ret = 0;
8321 	struct drm_plane_state *old_plane_state, *new_plane_state;
8322 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8323 	struct drm_crtc *new_plane_crtc;
8324 	struct drm_plane *plane;
8325 
8326 	struct drm_crtc *crtc;
8327 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8328 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8329 	struct dc_stream_status *status = NULL;
8330 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8331 	struct surface_info_bundle {
8332 		struct dc_surface_update surface_updates[MAX_SURFACES];
8333 		struct dc_plane_info plane_infos[MAX_SURFACES];
8334 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8335 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8336 		struct dc_stream_update stream_update;
8337 	} *bundle;
8338 
8339 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8340 
8341 	if (!bundle) {
8342 		DRM_ERROR("Failed to allocate update bundle\n");
8343 		/* Set type to FULL to avoid crashing in DC*/
8344 		update_type = UPDATE_TYPE_FULL;
8345 		goto cleanup;
8346 	}
8347 
8348 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8349 
8350 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8351 
8352 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8353 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8354 		num_plane = 0;
8355 
8356 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8357 			update_type = UPDATE_TYPE_FULL;
8358 			goto cleanup;
8359 		}
8360 
8361 		if (!new_dm_crtc_state->stream)
8362 			continue;
8363 
8364 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8365 			const struct amdgpu_framebuffer *amdgpu_fb =
8366 				to_amdgpu_framebuffer(new_plane_state->fb);
8367 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8368 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8369 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8370 			uint64_t tiling_flags;
8371 			bool tmz_surface = false;
8372 
8373 			new_plane_crtc = new_plane_state->crtc;
8374 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8375 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8376 
8377 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8378 				continue;
8379 
8380 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8381 				update_type = UPDATE_TYPE_FULL;
8382 				goto cleanup;
8383 			}
8384 
8385 			if (crtc != new_plane_crtc)
8386 				continue;
8387 
8388 			bundle->surface_updates[num_plane].surface =
8389 					new_dm_plane_state->dc_state;
8390 
8391 			if (new_crtc_state->mode_changed) {
8392 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8393 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8394 			}
8395 
8396 			if (new_crtc_state->color_mgmt_changed) {
8397 				bundle->surface_updates[num_plane].gamma =
8398 						new_dm_plane_state->dc_state->gamma_correction;
8399 				bundle->surface_updates[num_plane].in_transfer_func =
8400 						new_dm_plane_state->dc_state->in_transfer_func;
8401 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8402 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8403 				bundle->stream_update.gamut_remap =
8404 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8405 				bundle->stream_update.output_csc_transform =
8406 						&new_dm_crtc_state->stream->csc_color_matrix;
8407 				bundle->stream_update.out_transfer_func =
8408 						new_dm_crtc_state->stream->out_transfer_func;
8409 			}
8410 
8411 			ret = fill_dc_scaling_info(new_plane_state,
8412 						   scaling_info);
8413 			if (ret)
8414 				goto cleanup;
8415 
8416 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8417 
8418 			if (amdgpu_fb) {
8419 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8420 				if (ret)
8421 					goto cleanup;
8422 
8423 				ret = fill_dc_plane_info_and_addr(
8424 					dm->adev, new_plane_state, tiling_flags,
8425 					plane_info,
8426 					&flip_addr->address, tmz_surface,
8427 					false);
8428 				if (ret)
8429 					goto cleanup;
8430 
8431 				bundle->surface_updates[num_plane].plane_info = plane_info;
8432 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8433 			}
8434 
8435 			num_plane++;
8436 		}
8437 
8438 		if (num_plane == 0)
8439 			continue;
8440 
8441 		ret = dm_atomic_get_state(state, &dm_state);
8442 		if (ret)
8443 			goto cleanup;
8444 
8445 		old_dm_state = dm_atomic_get_old_state(state);
8446 		if (!old_dm_state) {
8447 			ret = -EINVAL;
8448 			goto cleanup;
8449 		}
8450 
8451 		status = dc_stream_get_status_from_state(old_dm_state->context,
8452 							 new_dm_crtc_state->stream);
8453 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8454 		/*
8455 		 * TODO: DC modifies the surface during this call so we need
8456 		 * to lock here - find a way to do this without locking.
8457 		 */
8458 		mutex_lock(&dm->dc_lock);
8459 		update_type = dc_check_update_surfaces_for_stream(
8460 				dc,	bundle->surface_updates, num_plane,
8461 				&bundle->stream_update, status);
8462 		mutex_unlock(&dm->dc_lock);
8463 
8464 		if (update_type > UPDATE_TYPE_MED) {
8465 			update_type = UPDATE_TYPE_FULL;
8466 			goto cleanup;
8467 		}
8468 	}
8469 
8470 cleanup:
8471 	kfree(bundle);
8472 
8473 	*out_type = update_type;
8474 	return ret;
8475 }
8476 #if defined(CONFIG_DRM_AMD_DC_DCN)
8477 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8478 {
8479 	struct drm_connector *connector;
8480 	struct drm_connector_state *conn_state;
8481 	struct amdgpu_dm_connector *aconnector = NULL;
8482 	int i;
8483 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8484 		if (conn_state->crtc != crtc)
8485 			continue;
8486 
8487 		aconnector = to_amdgpu_dm_connector(connector);
8488 		if (!aconnector->port || !aconnector->mst_port)
8489 			aconnector = NULL;
8490 		else
8491 			break;
8492 	}
8493 
8494 	if (!aconnector)
8495 		return 0;
8496 
8497 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8498 }
8499 #endif
8500 
8501 /**
8502  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8503  * @dev: The DRM device
8504  * @state: The atomic state to commit
8505  *
8506  * Validate that the given atomic state is programmable by DC into hardware.
8507  * This involves constructing a &struct dc_state reflecting the new hardware
8508  * state we wish to commit, then querying DC to see if it is programmable. It's
8509  * important not to modify the existing DC state. Otherwise, atomic_check
8510  * may unexpectedly commit hardware changes.
8511  *
8512  * When validating the DC state, it's important that the right locks are
8513  * acquired. For full updates case which removes/adds/updates streams on one
8514  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8515  * that any such full update commit will wait for completion of any outstanding
8516  * flip using DRMs synchronization events. See
8517  * dm_determine_update_type_for_commit()
8518  *
8519  * Note that DM adds the affected connectors for all CRTCs in state, when that
8520  * might not seem necessary. This is because DC stream creation requires the
8521  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8522  * be possible but non-trivial - a possible TODO item.
8523  *
8524  * Return: -Error code if validation failed.
8525  */
8526 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8527 				  struct drm_atomic_state *state)
8528 {
8529 	struct amdgpu_device *adev = dev->dev_private;
8530 	struct dm_atomic_state *dm_state = NULL;
8531 	struct dc *dc = adev->dm.dc;
8532 	struct drm_connector *connector;
8533 	struct drm_connector_state *old_con_state, *new_con_state;
8534 	struct drm_crtc *crtc;
8535 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8536 	struct drm_plane *plane;
8537 	struct drm_plane_state *old_plane_state, *new_plane_state;
8538 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8539 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8540 	enum dc_status status;
8541 	int ret, i;
8542 
8543 	/*
8544 	 * This bool will be set for true for any modeset/reset
8545 	 * or plane update which implies non fast surface update.
8546 	 */
8547 	bool lock_and_validation_needed = false;
8548 
8549 	ret = drm_atomic_helper_check_modeset(dev, state);
8550 	if (ret)
8551 		goto fail;
8552 
8553 	/* Check connector changes */
8554 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8555 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8556 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8557 
8558 		/* Skip connectors that are disabled or part of modeset already. */
8559 		if (!old_con_state->crtc && !new_con_state->crtc)
8560 			continue;
8561 
8562 		if (!new_con_state->crtc)
8563 			continue;
8564 
8565 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8566 		if (IS_ERR(new_crtc_state)) {
8567 			ret = PTR_ERR(new_crtc_state);
8568 			goto fail;
8569 		}
8570 
8571 		if (dm_old_con_state->abm_level !=
8572 		    dm_new_con_state->abm_level)
8573 			new_crtc_state->connectors_changed = true;
8574 	}
8575 
8576 #if defined(CONFIG_DRM_AMD_DC_DCN)
8577 	if (adev->asic_type >= CHIP_NAVI10) {
8578 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8579 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8580 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8581 				if (ret)
8582 					goto fail;
8583 			}
8584 		}
8585 	}
8586 #endif
8587 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8588 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8589 		    !new_crtc_state->color_mgmt_changed &&
8590 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8591 			continue;
8592 
8593 		if (!new_crtc_state->enable)
8594 			continue;
8595 
8596 		ret = drm_atomic_add_affected_connectors(state, crtc);
8597 		if (ret)
8598 			return ret;
8599 
8600 		ret = drm_atomic_add_affected_planes(state, crtc);
8601 		if (ret)
8602 			goto fail;
8603 	}
8604 
8605 	/*
8606 	 * Add all primary and overlay planes on the CRTC to the state
8607 	 * whenever a plane is enabled to maintain correct z-ordering
8608 	 * and to enable fast surface updates.
8609 	 */
8610 	drm_for_each_crtc(crtc, dev) {
8611 		bool modified = false;
8612 
8613 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8614 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8615 				continue;
8616 
8617 			if (new_plane_state->crtc == crtc ||
8618 			    old_plane_state->crtc == crtc) {
8619 				modified = true;
8620 				break;
8621 			}
8622 		}
8623 
8624 		if (!modified)
8625 			continue;
8626 
8627 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8628 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8629 				continue;
8630 
8631 			new_plane_state =
8632 				drm_atomic_get_plane_state(state, plane);
8633 
8634 			if (IS_ERR(new_plane_state)) {
8635 				ret = PTR_ERR(new_plane_state);
8636 				goto fail;
8637 			}
8638 		}
8639 	}
8640 
8641 	/* Remove exiting planes if they are modified */
8642 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8643 		ret = dm_update_plane_state(dc, state, plane,
8644 					    old_plane_state,
8645 					    new_plane_state,
8646 					    false,
8647 					    &lock_and_validation_needed);
8648 		if (ret)
8649 			goto fail;
8650 	}
8651 
8652 	/* Disable all crtcs which require disable */
8653 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8654 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8655 					   old_crtc_state,
8656 					   new_crtc_state,
8657 					   false,
8658 					   &lock_and_validation_needed);
8659 		if (ret)
8660 			goto fail;
8661 	}
8662 
8663 	/* Enable all crtcs which require enable */
8664 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8665 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8666 					   old_crtc_state,
8667 					   new_crtc_state,
8668 					   true,
8669 					   &lock_and_validation_needed);
8670 		if (ret)
8671 			goto fail;
8672 	}
8673 
8674 	/* Add new/modified planes */
8675 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8676 		ret = dm_update_plane_state(dc, state, plane,
8677 					    old_plane_state,
8678 					    new_plane_state,
8679 					    true,
8680 					    &lock_and_validation_needed);
8681 		if (ret)
8682 			goto fail;
8683 	}
8684 
8685 	/* Run this here since we want to validate the streams we created */
8686 	ret = drm_atomic_helper_check_planes(dev, state);
8687 	if (ret)
8688 		goto fail;
8689 
8690 	if (state->legacy_cursor_update) {
8691 		/*
8692 		 * This is a fast cursor update coming from the plane update
8693 		 * helper, check if it can be done asynchronously for better
8694 		 * performance.
8695 		 */
8696 		state->async_update =
8697 			!drm_atomic_helper_async_check(dev, state);
8698 
8699 		/*
8700 		 * Skip the remaining global validation if this is an async
8701 		 * update. Cursor updates can be done without affecting
8702 		 * state or bandwidth calcs and this avoids the performance
8703 		 * penalty of locking the private state object and
8704 		 * allocating a new dc_state.
8705 		 */
8706 		if (state->async_update)
8707 			return 0;
8708 	}
8709 
8710 	/* Check scaling and underscan changes*/
8711 	/* TODO Removed scaling changes validation due to inability to commit
8712 	 * new stream into context w\o causing full reset. Need to
8713 	 * decide how to handle.
8714 	 */
8715 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8716 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8717 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8718 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8719 
8720 		/* Skip any modesets/resets */
8721 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8722 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8723 			continue;
8724 
8725 		/* Skip any thing not scale or underscan changes */
8726 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8727 			continue;
8728 
8729 		overall_update_type = UPDATE_TYPE_FULL;
8730 		lock_and_validation_needed = true;
8731 	}
8732 
8733 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8734 	if (ret)
8735 		goto fail;
8736 
8737 	if (overall_update_type < update_type)
8738 		overall_update_type = update_type;
8739 
8740 	/*
8741 	 * lock_and_validation_needed was an old way to determine if we need to set
8742 	 * the global lock. Leaving it in to check if we broke any corner cases
8743 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8744 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8745 	 */
8746 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8747 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8748 
8749 	if (overall_update_type > UPDATE_TYPE_FAST) {
8750 		ret = dm_atomic_get_state(state, &dm_state);
8751 		if (ret)
8752 			goto fail;
8753 
8754 		ret = do_aquire_global_lock(dev, state);
8755 		if (ret)
8756 			goto fail;
8757 
8758 #if defined(CONFIG_DRM_AMD_DC_DCN)
8759 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8760 			goto fail;
8761 
8762 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8763 		if (ret)
8764 			goto fail;
8765 #endif
8766 
8767 		/*
8768 		 * Perform validation of MST topology in the state:
8769 		 * We need to perform MST atomic check before calling
8770 		 * dc_validate_global_state(), or there is a chance
8771 		 * to get stuck in an infinite loop and hang eventually.
8772 		 */
8773 		ret = drm_dp_mst_atomic_check(state);
8774 		if (ret)
8775 			goto fail;
8776 		status = dc_validate_global_state(dc, dm_state->context, false);
8777 		if (status != DC_OK) {
8778 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8779 				       dc_status_to_str(status), status);
8780 			ret = -EINVAL;
8781 			goto fail;
8782 		}
8783 	} else {
8784 		/*
8785 		 * The commit is a fast update. Fast updates shouldn't change
8786 		 * the DC context, affect global validation, and can have their
8787 		 * commit work done in parallel with other commits not touching
8788 		 * the same resource. If we have a new DC context as part of
8789 		 * the DM atomic state from validation we need to free it and
8790 		 * retain the existing one instead.
8791 		 *
8792 		 * Furthermore, since the DM atomic state only contains the DC
8793 		 * context and can safely be annulled, we can free the state
8794 		 * and clear the associated private object now to free
8795 		 * some memory and avoid a possible use-after-free later.
8796 		 */
8797 
8798 		for (i = 0; i < state->num_private_objs; i++) {
8799 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8800 
8801 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8802 				int j = state->num_private_objs-1;
8803 
8804 				dm_atomic_destroy_state(obj,
8805 						state->private_objs[i].state);
8806 
8807 				/* If i is not at the end of the array then the
8808 				 * last element needs to be moved to where i was
8809 				 * before the array can safely be truncated.
8810 				 */
8811 				if (i != j)
8812 					state->private_objs[i] =
8813 						state->private_objs[j];
8814 
8815 				state->private_objs[j].ptr = NULL;
8816 				state->private_objs[j].state = NULL;
8817 				state->private_objs[j].old_state = NULL;
8818 				state->private_objs[j].new_state = NULL;
8819 
8820 				state->num_private_objs = j;
8821 				break;
8822 			}
8823 		}
8824 	}
8825 
8826 	/* Store the overall update type for use later in atomic check. */
8827 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8828 		struct dm_crtc_state *dm_new_crtc_state =
8829 			to_dm_crtc_state(new_crtc_state);
8830 
8831 		dm_new_crtc_state->update_type = (int)overall_update_type;
8832 	}
8833 
8834 	/* Must be success */
8835 	WARN_ON(ret);
8836 	return ret;
8837 
8838 fail:
8839 	if (ret == -EDEADLK)
8840 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8841 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8842 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8843 	else
8844 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8845 
8846 	return ret;
8847 }
8848 
8849 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8850 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8851 {
8852 	uint8_t dpcd_data;
8853 	bool capable = false;
8854 
8855 	if (amdgpu_dm_connector->dc_link &&
8856 		dm_helpers_dp_read_dpcd(
8857 				NULL,
8858 				amdgpu_dm_connector->dc_link,
8859 				DP_DOWN_STREAM_PORT_COUNT,
8860 				&dpcd_data,
8861 				sizeof(dpcd_data))) {
8862 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8863 	}
8864 
8865 	return capable;
8866 }
8867 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8868 					struct edid *edid)
8869 {
8870 	int i;
8871 	bool edid_check_required;
8872 	struct detailed_timing *timing;
8873 	struct detailed_non_pixel *data;
8874 	struct detailed_data_monitor_range *range;
8875 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8876 			to_amdgpu_dm_connector(connector);
8877 	struct dm_connector_state *dm_con_state = NULL;
8878 
8879 	struct drm_device *dev = connector->dev;
8880 	struct amdgpu_device *adev = dev->dev_private;
8881 	bool freesync_capable = false;
8882 
8883 	if (!connector->state) {
8884 		DRM_ERROR("%s - Connector has no state", __func__);
8885 		goto update;
8886 	}
8887 
8888 	if (!edid) {
8889 		dm_con_state = to_dm_connector_state(connector->state);
8890 
8891 		amdgpu_dm_connector->min_vfreq = 0;
8892 		amdgpu_dm_connector->max_vfreq = 0;
8893 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8894 
8895 		goto update;
8896 	}
8897 
8898 	dm_con_state = to_dm_connector_state(connector->state);
8899 
8900 	edid_check_required = false;
8901 	if (!amdgpu_dm_connector->dc_sink) {
8902 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8903 		goto update;
8904 	}
8905 	if (!adev->dm.freesync_module)
8906 		goto update;
8907 	/*
8908 	 * if edid non zero restrict freesync only for dp and edp
8909 	 */
8910 	if (edid) {
8911 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8912 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8913 			edid_check_required = is_dp_capable_without_timing_msa(
8914 						adev->dm.dc,
8915 						amdgpu_dm_connector);
8916 		}
8917 	}
8918 	if (edid_check_required == true && (edid->version > 1 ||
8919 	   (edid->version == 1 && edid->revision > 1))) {
8920 		for (i = 0; i < 4; i++) {
8921 
8922 			timing	= &edid->detailed_timings[i];
8923 			data	= &timing->data.other_data;
8924 			range	= &data->data.range;
8925 			/*
8926 			 * Check if monitor has continuous frequency mode
8927 			 */
8928 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8929 				continue;
8930 			/*
8931 			 * Check for flag range limits only. If flag == 1 then
8932 			 * no additional timing information provided.
8933 			 * Default GTF, GTF Secondary curve and CVT are not
8934 			 * supported
8935 			 */
8936 			if (range->flags != 1)
8937 				continue;
8938 
8939 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8940 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8941 			amdgpu_dm_connector->pixel_clock_mhz =
8942 				range->pixel_clock_mhz * 10;
8943 			break;
8944 		}
8945 
8946 		if (amdgpu_dm_connector->max_vfreq -
8947 		    amdgpu_dm_connector->min_vfreq > 10) {
8948 
8949 			freesync_capable = true;
8950 		}
8951 	}
8952 
8953 update:
8954 	if (dm_con_state)
8955 		dm_con_state->freesync_capable = freesync_capable;
8956 
8957 	if (connector->vrr_capable_property)
8958 		drm_connector_set_vrr_capable_property(connector,
8959 						       freesync_capable);
8960 }
8961 
8962 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8963 {
8964 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8965 
8966 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8967 		return;
8968 	if (link->type == dc_connection_none)
8969 		return;
8970 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8971 					dpcd_data, sizeof(dpcd_data))) {
8972 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8973 
8974 		if (dpcd_data[0] == 0) {
8975 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8976 			link->psr_settings.psr_feature_enabled = false;
8977 		} else {
8978 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8979 			link->psr_settings.psr_feature_enabled = true;
8980 		}
8981 
8982 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8983 	}
8984 }
8985 
8986 /*
8987  * amdgpu_dm_link_setup_psr() - configure psr link
8988  * @stream: stream state
8989  *
8990  * Return: true if success
8991  */
8992 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8993 {
8994 	struct dc_link *link = NULL;
8995 	struct psr_config psr_config = {0};
8996 	struct psr_context psr_context = {0};
8997 	bool ret = false;
8998 
8999 	if (stream == NULL)
9000 		return false;
9001 
9002 	link = stream->link;
9003 
9004 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9005 
9006 	if (psr_config.psr_version > 0) {
9007 		psr_config.psr_exit_link_training_required = 0x1;
9008 		psr_config.psr_frame_capture_indication_req = 0;
9009 		psr_config.psr_rfb_setup_time = 0x37;
9010 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9011 		psr_config.allow_smu_optimizations = 0x0;
9012 
9013 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9014 
9015 	}
9016 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9017 
9018 	return ret;
9019 }
9020 
9021 /*
9022  * amdgpu_dm_psr_enable() - enable psr f/w
9023  * @stream: stream state
9024  *
9025  * Return: true if success
9026  */
9027 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9028 {
9029 	struct dc_link *link = stream->link;
9030 	unsigned int vsync_rate_hz = 0;
9031 	struct dc_static_screen_params params = {0};
9032 	/* Calculate number of static frames before generating interrupt to
9033 	 * enter PSR.
9034 	 */
9035 	// Init fail safe of 2 frames static
9036 	unsigned int num_frames_static = 2;
9037 
9038 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9039 
9040 	vsync_rate_hz = div64_u64(div64_u64((
9041 			stream->timing.pix_clk_100hz * 100),
9042 			stream->timing.v_total),
9043 			stream->timing.h_total);
9044 
9045 	/* Round up
9046 	 * Calculate number of frames such that at least 30 ms of time has
9047 	 * passed.
9048 	 */
9049 	if (vsync_rate_hz != 0) {
9050 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9051 		num_frames_static = (30000 / frame_time_microsec) + 1;
9052 	}
9053 
9054 	params.triggers.cursor_update = true;
9055 	params.triggers.overlay_update = true;
9056 	params.triggers.surface_update = true;
9057 	params.num_frames = num_frames_static;
9058 
9059 	dc_stream_set_static_screen_params(link->ctx->dc,
9060 					   &stream, 1,
9061 					   &params);
9062 
9063 	return dc_link_set_psr_allow_active(link, true, false);
9064 }
9065 
9066 /*
9067  * amdgpu_dm_psr_disable() - disable psr f/w
9068  * @stream:  stream state
9069  *
9070  * Return: true if success
9071  */
9072 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9073 {
9074 
9075 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9076 
9077 	return dc_link_set_psr_allow_active(stream->link, false, true);
9078 }
9079