1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140 
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 				struct drm_plane *plane,
143 				unsigned long possible_crtcs,
144 				const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 			       struct drm_plane *plane,
147 			       uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
150 				    uint32_t link_index,
151 				    struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 				  struct amdgpu_encoder *aencoder,
154 				  uint32_t link_index);
155 
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157 
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 				   struct drm_atomic_state *state,
160 				   bool nonblock);
161 
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163 
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 				  struct drm_atomic_state *state);
166 
167 static void handle_cursor_update(struct drm_plane *plane,
168 				 struct drm_plane_state *old_plane_state);
169 
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 
175 
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 	if (crtc >= adev->mode_info.num_crtc)
192 		return 0;
193 	else {
194 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 				acrtc->base.state);
197 
198 
199 		if (acrtc_state->stream == NULL) {
200 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 				  crtc);
202 			return 0;
203 		}
204 
205 		return dc_stream_get_vblank_counter(acrtc_state->stream);
206 	}
207 }
208 
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 				  u32 *vbl, u32 *position)
211 {
212 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
213 
214 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 		return -EINVAL;
216 	else {
217 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 						acrtc->base.state);
220 
221 		if (acrtc_state->stream ==  NULL) {
222 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 				  crtc);
224 			return 0;
225 		}
226 
227 		/*
228 		 * TODO rework base driver to use values directly.
229 		 * for now parse it back into reg-format
230 		 */
231 		dc_stream_get_scanoutpos(acrtc_state->stream,
232 					 &v_blank_start,
233 					 &v_blank_end,
234 					 &h_position,
235 					 &v_position);
236 
237 		*position = v_position | (h_position << 16);
238 		*vbl = v_blank_start | (v_blank_end << 16);
239 	}
240 
241 	return 0;
242 }
243 
244 static bool dm_is_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return true;
248 }
249 
250 static int dm_wait_for_idle(void *handle)
251 {
252 	/* XXX todo */
253 	return 0;
254 }
255 
256 static bool dm_check_soft_reset(void *handle)
257 {
258 	return false;
259 }
260 
261 static int dm_soft_reset(void *handle)
262 {
263 	/* XXX todo */
264 	return 0;
265 }
266 
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 		     int otg_inst)
270 {
271 	struct drm_device *dev = adev->ddev;
272 	struct drm_crtc *crtc;
273 	struct amdgpu_crtc *amdgpu_crtc;
274 
275 	if (otg_inst == -1) {
276 		WARN_ON(1);
277 		return adev->mode_info.crtcs[0];
278 	}
279 
280 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 		amdgpu_crtc = to_amdgpu_crtc(crtc);
282 
283 		if (amdgpu_crtc->otg_inst == otg_inst)
284 			return amdgpu_crtc;
285 	}
286 
287 	return NULL;
288 }
289 
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295 
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 	struct amdgpu_crtc *amdgpu_crtc;
306 	struct common_irq_params *irq_params = interrupt_params;
307 	struct amdgpu_device *adev = irq_params->adev;
308 	unsigned long flags;
309 	struct drm_pending_vblank_event *e;
310 	struct dm_crtc_state *acrtc_state;
311 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 	bool vrr_active;
313 
314 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315 
316 	/* IRQ could occur when in initial stage */
317 	/* TODO work and BO cleanup */
318 	if (amdgpu_crtc == NULL) {
319 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 		return;
321 	}
322 
323 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
324 
325 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 						 amdgpu_crtc->pflip_status,
328 						 AMDGPU_FLIP_SUBMITTED,
329 						 amdgpu_crtc->crtc_id,
330 						 amdgpu_crtc);
331 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 		return;
333 	}
334 
335 	/* page flip completed. */
336 	e = amdgpu_crtc->event;
337 	amdgpu_crtc->event = NULL;
338 
339 	if (!e)
340 		WARN_ON(1);
341 
342 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344 
345 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 	if (!vrr_active ||
347 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 				      &v_blank_end, &hpos, &vpos) ||
349 	    (vpos < v_blank_start)) {
350 		/* Update to correct count and vblank timestamp if racing with
351 		 * vblank irq. This also updates to the correct vblank timestamp
352 		 * even in VRR mode, as scanout is past the front-porch atm.
353 		 */
354 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355 
356 		/* Wake up userspace by sending the pageflip event with proper
357 		 * count and timestamp of vblank of flip completion.
358 		 */
359 		if (e) {
360 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361 
362 			/* Event sent, so done with vblank for this flip */
363 			drm_crtc_vblank_put(&amdgpu_crtc->base);
364 		}
365 	} else if (e) {
366 		/* VRR active and inside front-porch: vblank count and
367 		 * timestamp for pageflip event will only be up to date after
368 		 * drm_crtc_handle_vblank() has been executed from late vblank
369 		 * irq handler after start of back-porch (vline 0). We queue the
370 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 		 * updated timestamp and count, once it runs after us.
372 		 *
373 		 * We need to open-code this instead of using the helper
374 		 * drm_crtc_arm_vblank_event(), as that helper would
375 		 * call drm_crtc_accurate_vblank_count(), which we must
376 		 * not call in VRR mode while we are in front-porch!
377 		 */
378 
379 		/* sequence will be replaced by real count during send-out. */
380 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 		e->pipe = amdgpu_crtc->crtc_id;
382 
383 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 		e = NULL;
385 	}
386 
387 	/* Keep track of vblank of this flip for flip throttling. We use the
388 	 * cooked hw counter, as that one incremented at start of this vblank
389 	 * of pageflip completion, so last_flip_vblank is the forbidden count
390 	 * for queueing new pageflips if vsync + VRR is enabled.
391 	 */
392 	amdgpu_crtc->last_flip_vblank =
393 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394 
395 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397 
398 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 			 vrr_active, (int) !e);
401 }
402 
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 	struct common_irq_params *irq_params = interrupt_params;
406 	struct amdgpu_device *adev = irq_params->adev;
407 	struct amdgpu_crtc *acrtc;
408 	struct dm_crtc_state *acrtc_state;
409 	unsigned long flags;
410 
411 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 
413 	if (acrtc) {
414 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
415 
416 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 			      acrtc->crtc_id,
418 			      amdgpu_dm_vrr_active(acrtc_state));
419 
420 		/* Core vblank handling is done here after end of front-porch in
421 		 * vrr mode, as vblank timestamping will give valid results
422 		 * while now done after front-porch. This will also deliver
423 		 * page-flip completion events that have been queued to us
424 		 * if a pageflip happened inside front-porch.
425 		 */
426 		if (amdgpu_dm_vrr_active(acrtc_state)) {
427 			drm_crtc_handle_vblank(&acrtc->base);
428 
429 			/* BTR processing for pre-DCE12 ASICs */
430 			if (acrtc_state->stream &&
431 			    adev->family < AMDGPU_FAMILY_AI) {
432 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 				mod_freesync_handle_v_update(
434 				    adev->dm.freesync_module,
435 				    acrtc_state->stream,
436 				    &acrtc_state->vrr_params);
437 
438 				dc_stream_adjust_vmin_vmax(
439 				    adev->dm.dc,
440 				    acrtc_state->stream,
441 				    &acrtc_state->vrr_params.adjust);
442 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 			}
444 		}
445 	}
446 }
447 
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct dm_crtc_state *acrtc_state;
461 	unsigned long flags;
462 
463 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 	if (!acrtc)
465 		return;
466 
467 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
468 
469 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 			 amdgpu_dm_vrr_active(acrtc_state),
471 			 acrtc_state->active_planes);
472 
473 	/**
474 	 * Core vblank handling at start of front-porch is only possible
475 	 * in non-vrr mode, as only there vblank timestamping will give
476 	 * valid results while done in front-porch. Otherwise defer it
477 	 * to dm_vupdate_high_irq after end of front-porch.
478 	 */
479 	if (!amdgpu_dm_vrr_active(acrtc_state))
480 		drm_crtc_handle_vblank(&acrtc->base);
481 
482 	/**
483 	 * Following stuff must happen at start of vblank, for crc
484 	 * computation and below-the-range btr support in vrr mode.
485 	 */
486 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487 
488 	/* BTR updates need to happen before VUPDATE on Vega and above. */
489 	if (adev->family < AMDGPU_FAMILY_AI)
490 		return;
491 
492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
493 
494 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 		mod_freesync_handle_v_update(adev->dm.freesync_module,
497 					     acrtc_state->stream,
498 					     &acrtc_state->vrr_params);
499 
500 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 					   &acrtc_state->vrr_params.adjust);
502 	}
503 
504 	/*
505 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 	 * In that case, pageflip completion interrupts won't fire and pageflip
507 	 * completion events won't get delivered. Prevent this by sending
508 	 * pending pageflip events from here if a flip is still pending.
509 	 *
510 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 	 * avoid race conditions between flip programming and completion,
512 	 * which could cause too early flip completion events.
513 	 */
514 	if (adev->family >= AMDGPU_FAMILY_RV &&
515 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 	    acrtc_state->active_planes == 0) {
517 		if (acrtc->event) {
518 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 			acrtc->event = NULL;
520 			drm_crtc_vblank_put(&acrtc->base);
521 		}
522 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 	}
524 
525 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527 
528 static int dm_set_clockgating_state(void *handle,
529 		  enum amd_clockgating_state state)
530 {
531 	return 0;
532 }
533 
534 static int dm_set_powergating_state(void *handle,
535 		  enum amd_powergating_state state)
536 {
537 	return 0;
538 }
539 
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542 
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 	struct drm_device *dev = connector->dev;
547 	struct amdgpu_device *adev = dev->dev_private;
548 	struct dm_comressor_info *compressor = &adev->dm.compressor;
549 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 	struct drm_display_mode *mode;
551 	unsigned long max_size = 0;
552 
553 	if (adev->dm.dc->fbc_compressor == NULL)
554 		return;
555 
556 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 		return;
558 
559 	if (compressor->bo_ptr)
560 		return;
561 
562 
563 	list_for_each_entry(mode, &connector->modes, head) {
564 		if (max_size < mode->htotal * mode->vtotal)
565 			max_size = mode->htotal * mode->vtotal;
566 	}
567 
568 	if (max_size) {
569 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 			    &compressor->gpu_addr, &compressor->cpu_addr);
572 
573 		if (r)
574 			DRM_ERROR("DM: Failed to initialize FBC\n");
575 		else {
576 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 		}
579 
580 	}
581 
582 }
583 
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 					  int pipe, bool *enabled,
586 					  unsigned char *buf, int max_bytes)
587 {
588 	struct drm_device *dev = dev_get_drvdata(kdev);
589 	struct amdgpu_device *adev = dev->dev_private;
590 	struct drm_connector *connector;
591 	struct drm_connector_list_iter conn_iter;
592 	struct amdgpu_dm_connector *aconnector;
593 	int ret = 0;
594 
595 	*enabled = false;
596 
597 	mutex_lock(&adev->dm.audio_lock);
598 
599 	drm_connector_list_iter_begin(dev, &conn_iter);
600 	drm_for_each_connector_iter(connector, &conn_iter) {
601 		aconnector = to_amdgpu_dm_connector(connector);
602 		if (aconnector->audio_inst != port)
603 			continue;
604 
605 		*enabled = true;
606 		ret = drm_eld_size(connector->eld);
607 		memcpy(buf, connector->eld, min(max_bytes, ret));
608 
609 		break;
610 	}
611 	drm_connector_list_iter_end(&conn_iter);
612 
613 	mutex_unlock(&adev->dm.audio_lock);
614 
615 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616 
617 	return ret;
618 }
619 
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 	.get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623 
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 				       struct device *hda_kdev, void *data)
626 {
627 	struct drm_device *dev = dev_get_drvdata(kdev);
628 	struct amdgpu_device *adev = dev->dev_private;
629 	struct drm_audio_component *acomp = data;
630 
631 	acomp->ops = &amdgpu_dm_audio_component_ops;
632 	acomp->dev = kdev;
633 	adev->dm.audio_component = acomp;
634 
635 	return 0;
636 }
637 
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 					  struct device *hda_kdev, void *data)
640 {
641 	struct drm_device *dev = dev_get_drvdata(kdev);
642 	struct amdgpu_device *adev = dev->dev_private;
643 	struct drm_audio_component *acomp = data;
644 
645 	acomp->ops = NULL;
646 	acomp->dev = NULL;
647 	adev->dm.audio_component = NULL;
648 }
649 
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 	.bind	= amdgpu_dm_audio_component_bind,
652 	.unbind	= amdgpu_dm_audio_component_unbind,
653 };
654 
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 	int i, ret;
658 
659 	if (!amdgpu_audio)
660 		return 0;
661 
662 	adev->mode_info.audio.enabled = true;
663 
664 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665 
666 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 		adev->mode_info.audio.pin[i].channels = -1;
668 		adev->mode_info.audio.pin[i].rate = -1;
669 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 		adev->mode_info.audio.pin[i].status_bits = 0;
671 		adev->mode_info.audio.pin[i].category_code = 0;
672 		adev->mode_info.audio.pin[i].connected = false;
673 		adev->mode_info.audio.pin[i].id =
674 			adev->dm.dc->res_pool->audios[i]->inst;
675 		adev->mode_info.audio.pin[i].offset = 0;
676 	}
677 
678 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 	if (ret < 0)
680 		return ret;
681 
682 	adev->dm.audio_registered = true;
683 
684 	return 0;
685 }
686 
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 	if (!amdgpu_audio)
690 		return;
691 
692 	if (!adev->mode_info.audio.enabled)
693 		return;
694 
695 	if (adev->dm.audio_registered) {
696 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 		adev->dm.audio_registered = false;
698 	}
699 
700 	/* TODO: Disable audio? */
701 
702 	adev->mode_info.audio.enabled = false;
703 }
704 
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 	struct drm_audio_component *acomp = adev->dm.audio_component;
708 
709 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711 
712 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 						 pin, -1);
714 	}
715 }
716 
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 	const struct dmcub_firmware_header_v1_0 *hdr;
720 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 	struct abm *abm = adev->dm.dc->res_pool->abm;
725 	struct dmub_srv_hw_params hw_params;
726 	enum dmub_status status;
727 	const unsigned char *fw_inst_const, *fw_bss_data;
728 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 	bool has_hw_support;
730 
731 	if (!dmub_srv)
732 		/* DMUB isn't supported on the ASIC. */
733 		return 0;
734 
735 	if (!fb_info) {
736 		DRM_ERROR("No framebuffer info for DMUB service.\n");
737 		return -EINVAL;
738 	}
739 
740 	if (!dmub_fw) {
741 		/* Firmware required for DMUB support. */
742 		DRM_ERROR("No firmware provided for DMUB.\n");
743 		return -EINVAL;
744 	}
745 
746 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 	if (status != DMUB_STATUS_OK) {
748 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 		return -EINVAL;
750 	}
751 
752 	if (!has_hw_support) {
753 		DRM_INFO("DMUB unsupported on ASIC\n");
754 		return 0;
755 	}
756 
757 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758 
759 	fw_inst_const = dmub_fw->data +
760 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 			PSP_HEADER_BYTES;
762 
763 	fw_bss_data = dmub_fw->data +
764 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 		      le32_to_cpu(hdr->inst_const_bytes);
766 
767 	/* Copy firmware and bios info into FB memory. */
768 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770 
771 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772 
773 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 	 * amdgpu_ucode_init_single_fw will load dmub firmware
775 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 	 * will be done by dm_dmub_hw_init
777 	 */
778 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 				fw_inst_const_size);
781 	}
782 
783 	if (fw_bss_data_size)
784 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 		       fw_bss_data, fw_bss_data_size);
786 
787 	/* Copy firmware bios info into FB memory. */
788 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 	       adev->bios_size);
790 
791 	/* Reset regions that need to be reset. */
792 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794 
795 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797 
798 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800 
801 	/* Initialize hardware. */
802 	memset(&hw_params, 0, sizeof(hw_params));
803 	hw_params.fb_base = adev->gmc.fb_start;
804 	hw_params.fb_offset = adev->gmc.aper_base;
805 
806 	/* backdoor load firmware and trigger dmub running */
807 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 		hw_params.load_inst_const = true;
809 
810 	if (dmcu)
811 		hw_params.psp_version = dmcu->psp_version;
812 
813 	for (i = 0; i < fb_info->num_fb; ++i)
814 		hw_params.fb[i] = &fb_info->fb[i];
815 
816 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 	if (status != DMUB_STATUS_OK) {
818 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 		return -EINVAL;
820 	}
821 
822 	/* Wait for firmware load to finish. */
823 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 	if (status != DMUB_STATUS_OK)
825 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826 
827 	/* Init DMCU and ABM if available. */
828 	if (dmcu && abm) {
829 		dmcu->funcs->dmcu_init(dmcu);
830 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 	}
832 
833 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 	if (!adev->dm.dc->ctx->dmub_srv) {
835 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 		return -ENOMEM;
837 	}
838 
839 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 		 adev->dm.dmcub_fw_version);
841 
842 	return 0;
843 }
844 
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 	struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 	struct dc_callback_init init_params;
850 #endif
851 	int r;
852 
853 	adev->dm.ddev = adev->ddev;
854 	adev->dm.adev = adev;
855 
856 	/* Zero all the fields */
857 	memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 	memset(&init_params, 0, sizeof(init_params));
860 #endif
861 
862 	mutex_init(&adev->dm.dc_lock);
863 	mutex_init(&adev->dm.audio_lock);
864 
865 	if(amdgpu_dm_irq_init(adev)) {
866 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 		goto error;
868 	}
869 
870 	init_data.asic_id.chip_family = adev->family;
871 
872 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874 
875 	init_data.asic_id.vram_width = adev->gmc.vram_width;
876 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 	init_data.asic_id.atombios_base_address =
878 		adev->mode_info.atom_context->bios;
879 
880 	init_data.driver = adev;
881 
882 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883 
884 	if (!adev->dm.cgs_device) {
885 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 		goto error;
887 	}
888 
889 	init_data.cgs_device = adev->dm.cgs_device;
890 
891 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892 
893 	switch (adev->asic_type) {
894 	case CHIP_CARRIZO:
895 	case CHIP_STONEY:
896 	case CHIP_RAVEN:
897 	case CHIP_RENOIR:
898 		init_data.flags.gpu_vm_support = true;
899 		break;
900 	default:
901 		break;
902 	}
903 
904 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 		init_data.flags.fbc_support = true;
906 
907 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 		init_data.flags.multi_mon_pp_mclk_switch = true;
909 
910 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 		init_data.flags.disable_fractional_pwm = true;
912 
913 	init_data.flags.power_down_display_on_boot = true;
914 
915 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916 
917 	/* Display Core create. */
918 	adev->dm.dc = dc_create(&init_data);
919 
920 	if (adev->dm.dc) {
921 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 	} else {
923 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 		goto error;
925 	}
926 
927 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 	}
931 
932 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 		adev->dm.dc->debug.disable_stutter = true;
937 
938 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 		adev->dm.dc->debug.disable_dsc = true;
940 
941 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 		adev->dm.dc->debug.disable_clock_gate = true;
943 
944 	r = dm_dmub_hw_init(adev);
945 	if (r) {
946 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 		goto error;
948 	}
949 
950 	dc_hardware_init(adev->dm.dc);
951 
952 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 	if (!adev->dm.freesync_module) {
954 		DRM_ERROR(
955 		"amdgpu: failed to initialize freesync_module.\n");
956 	} else
957 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 				adev->dm.freesync_module);
959 
960 	amdgpu_dm_init_color_mod();
961 
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 	if (adev->asic_type >= CHIP_RAVEN) {
964 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965 
966 		if (!adev->dm.hdcp_workqueue)
967 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 		else
969 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 
971 		dc_init_callbacks(adev->dm.dc, &init_params);
972 	}
973 #endif
974 	if (amdgpu_dm_initialize_drm_device(adev)) {
975 		DRM_ERROR(
976 		"amdgpu: failed to initialize sw for display support.\n");
977 		goto error;
978 	}
979 
980 	/* Update the actual used number of crtc */
981 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 
983 	/* create fake encoders for MST */
984 	dm_dp_create_fake_mst_encoders(adev);
985 
986 	/* TODO: Add_display_info? */
987 
988 	/* TODO use dynamic cursor width */
989 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991 
992 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 		DRM_ERROR(
994 		"amdgpu: failed to initialize sw for display support.\n");
995 		goto error;
996 	}
997 
998 	DRM_DEBUG_DRIVER("KMS initialized.\n");
999 
1000 	return 0;
1001 error:
1002 	amdgpu_dm_fini(adev);
1003 
1004 	return -EINVAL;
1005 }
1006 
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 	}
1014 
1015 	amdgpu_dm_audio_fini(adev);
1016 
1017 	amdgpu_dm_destroy_drm_device(&adev->dm);
1018 
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 	if (adev->dm.hdcp_workqueue) {
1021 		hdcp_destroy(adev->dm.hdcp_workqueue);
1022 		adev->dm.hdcp_workqueue = NULL;
1023 	}
1024 
1025 	if (adev->dm.dc)
1026 		dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 	if (adev->dm.dc->ctx->dmub_srv) {
1029 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 		adev->dm.dc->ctx->dmub_srv = NULL;
1031 	}
1032 
1033 	if (adev->dm.dmub_bo)
1034 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 				      &adev->dm.dmub_bo_gpu_addr,
1036 				      &adev->dm.dmub_bo_cpu_addr);
1037 
1038 	/* DC Destroy TODO: Replace destroy DAL */
1039 	if (adev->dm.dc)
1040 		dc_destroy(&adev->dm.dc);
1041 	/*
1042 	 * TODO: pageflip, vlank interrupt
1043 	 *
1044 	 * amdgpu_dm_irq_fini(adev);
1045 	 */
1046 
1047 	if (adev->dm.cgs_device) {
1048 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 		adev->dm.cgs_device = NULL;
1050 	}
1051 	if (adev->dm.freesync_module) {
1052 		mod_freesync_destroy(adev->dm.freesync_module);
1053 		adev->dm.freesync_module = NULL;
1054 	}
1055 
1056 	mutex_destroy(&adev->dm.audio_lock);
1057 	mutex_destroy(&adev->dm.dc_lock);
1058 
1059 	return;
1060 }
1061 
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 	const char *fw_name_dmcu = NULL;
1065 	int r;
1066 	const struct dmcu_firmware_header_v1_0 *hdr;
1067 
1068 	switch(adev->asic_type) {
1069 	case CHIP_BONAIRE:
1070 	case CHIP_HAWAII:
1071 	case CHIP_KAVERI:
1072 	case CHIP_KABINI:
1073 	case CHIP_MULLINS:
1074 	case CHIP_TONGA:
1075 	case CHIP_FIJI:
1076 	case CHIP_CARRIZO:
1077 	case CHIP_STONEY:
1078 	case CHIP_POLARIS11:
1079 	case CHIP_POLARIS10:
1080 	case CHIP_POLARIS12:
1081 	case CHIP_VEGAM:
1082 	case CHIP_VEGA10:
1083 	case CHIP_VEGA12:
1084 	case CHIP_VEGA20:
1085 	case CHIP_NAVI10:
1086 	case CHIP_NAVI14:
1087 	case CHIP_RENOIR:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 	case CHIP_SIENNA_CICHLID:
1090 	case CHIP_NAVY_FLOUNDER:
1091 #endif
1092 		return 0;
1093 	case CHIP_NAVI12:
1094 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1095 		break;
1096 	case CHIP_RAVEN:
1097 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1100 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1101 		else
1102 			return 0;
1103 		break;
1104 	default:
1105 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1106 		return -EINVAL;
1107 	}
1108 
1109 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1111 		return 0;
1112 	}
1113 
1114 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1115 	if (r == -ENOENT) {
1116 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 		adev->dm.fw_dmcu = NULL;
1119 		return 0;
1120 	}
1121 	if (r) {
1122 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1123 			fw_name_dmcu);
1124 		return r;
1125 	}
1126 
1127 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1128 	if (r) {
1129 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1130 			fw_name_dmcu);
1131 		release_firmware(adev->dm.fw_dmcu);
1132 		adev->dm.fw_dmcu = NULL;
1133 		return r;
1134 	}
1135 
1136 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1137 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1138 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1139 	adev->firmware.fw_size +=
1140 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1141 
1142 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1143 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1144 	adev->firmware.fw_size +=
1145 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1146 
1147 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1148 
1149 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1150 
1151 	return 0;
1152 }
1153 
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1155 {
1156 	struct amdgpu_device *adev = ctx;
1157 
1158 	return dm_read_reg(adev->dm.dc->ctx, address);
1159 }
1160 
1161 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1162 				     uint32_t value)
1163 {
1164 	struct amdgpu_device *adev = ctx;
1165 
1166 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1167 }
1168 
1169 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1170 {
1171 	struct dmub_srv_create_params create_params;
1172 	struct dmub_srv_region_params region_params;
1173 	struct dmub_srv_region_info region_info;
1174 	struct dmub_srv_fb_params fb_params;
1175 	struct dmub_srv_fb_info *fb_info;
1176 	struct dmub_srv *dmub_srv;
1177 	const struct dmcub_firmware_header_v1_0 *hdr;
1178 	const char *fw_name_dmub;
1179 	enum dmub_asic dmub_asic;
1180 	enum dmub_status status;
1181 	int r;
1182 
1183 	switch (adev->asic_type) {
1184 	case CHIP_RENOIR:
1185 		dmub_asic = DMUB_ASIC_DCN21;
1186 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1187 		break;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 	case CHIP_SIENNA_CICHLID:
1190 		dmub_asic = DMUB_ASIC_DCN30;
1191 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1192 		break;
1193 	case CHIP_NAVY_FLOUNDER:
1194 		dmub_asic = DMUB_ASIC_DCN30;
1195 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1196 		break;
1197 #endif
1198 
1199 	default:
1200 		/* ASIC doesn't support DMUB. */
1201 		return 0;
1202 	}
1203 
1204 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1205 	if (r) {
1206 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1207 		return 0;
1208 	}
1209 
1210 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1211 	if (r) {
1212 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1213 		return 0;
1214 	}
1215 
1216 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1217 
1218 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1219 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1220 			AMDGPU_UCODE_ID_DMCUB;
1221 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1222 			adev->dm.dmub_fw;
1223 		adev->firmware.fw_size +=
1224 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1225 
1226 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 			 adev->dm.dmcub_fw_version);
1228 	}
1229 
1230 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231 
1232 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1233 	dmub_srv = adev->dm.dmub_srv;
1234 
1235 	if (!dmub_srv) {
1236 		DRM_ERROR("Failed to allocate DMUB service!\n");
1237 		return -ENOMEM;
1238 	}
1239 
1240 	memset(&create_params, 0, sizeof(create_params));
1241 	create_params.user_ctx = adev;
1242 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1243 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1244 	create_params.asic = dmub_asic;
1245 
1246 	/* Create the DMUB service. */
1247 	status = dmub_srv_create(dmub_srv, &create_params);
1248 	if (status != DMUB_STATUS_OK) {
1249 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1250 		return -EINVAL;
1251 	}
1252 
1253 	/* Calculate the size of all the regions for the DMUB service. */
1254 	memset(&region_params, 0, sizeof(region_params));
1255 
1256 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1257 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1258 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1259 	region_params.vbios_size = adev->bios_size;
1260 	region_params.fw_bss_data = region_params.bss_data_size ?
1261 		adev->dm.dmub_fw->data +
1262 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1263 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1264 	region_params.fw_inst_const =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		PSP_HEADER_BYTES;
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	release_firmware(adev->dm.dmub_fw);
1338 	adev->dm.dmub_fw = NULL;
1339 
1340 	release_firmware(adev->dm.fw_dmcu);
1341 	adev->dm.fw_dmcu = NULL;
1342 
1343 	return 0;
1344 }
1345 
1346 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1347 {
1348 	struct amdgpu_dm_connector *aconnector;
1349 	struct drm_connector *connector;
1350 	struct drm_connector_list_iter iter;
1351 	int ret = 0;
1352 
1353 	drm_connector_list_iter_begin(dev, &iter);
1354 	drm_for_each_connector_iter(connector, &iter) {
1355 		aconnector = to_amdgpu_dm_connector(connector);
1356 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1357 		    aconnector->mst_mgr.aux) {
1358 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1359 					 aconnector,
1360 					 aconnector->base.base.id);
1361 
1362 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1363 			if (ret < 0) {
1364 				DRM_ERROR("DM_MST: Failed to start MST\n");
1365 				aconnector->dc_link->type =
1366 					dc_connection_single;
1367 				break;
1368 			}
1369 		}
1370 	}
1371 	drm_connector_list_iter_end(&iter);
1372 
1373 	return ret;
1374 }
1375 
1376 static int dm_late_init(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	struct dmcu_iram_parameters params;
1381 	unsigned int linear_lut[16];
1382 	int i;
1383 	struct dmcu *dmcu = NULL;
1384 	bool ret = true;
1385 
1386 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1387 		return detect_mst_link_for_all_connectors(adev->ddev);
1388 
1389 	dmcu = adev->dm.dc->res_pool->dmcu;
1390 
1391 	for (i = 0; i < 16; i++)
1392 		linear_lut[i] = 0xFFFF * i / 15;
1393 
1394 	params.set = 0;
1395 	params.backlight_ramping_start = 0xCCCC;
1396 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1397 	params.backlight_lut_array_size = 16;
1398 	params.backlight_lut_array = linear_lut;
1399 
1400 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1401 	 * 0xFFFF x 0.01 = 0x28F
1402 	 */
1403 	params.min_abm_backlight = 0x28F;
1404 
1405 	/* In the case where abm is implemented on dmcub,
1406 	 * dmcu object will be null.
1407 	 * ABM 2.4 and up are implemented on dmcub.
1408 	 */
1409 	if (dmcu)
1410 		ret = dmcu_load_iram(dmcu, params);
1411 	else if (adev->dm.dc->ctx->dmub_srv)
1412 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1413 
1414 	if (!ret)
1415 		return -EINVAL;
1416 
1417 	return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419 
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 	struct amdgpu_dm_connector *aconnector;
1423 	struct drm_connector *connector;
1424 	struct drm_connector_list_iter iter;
1425 	struct drm_dp_mst_topology_mgr *mgr;
1426 	int ret;
1427 	bool need_hotplug = false;
1428 
1429 	drm_connector_list_iter_begin(dev, &iter);
1430 	drm_for_each_connector_iter(connector, &iter) {
1431 		aconnector = to_amdgpu_dm_connector(connector);
1432 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 		    aconnector->mst_port)
1434 			continue;
1435 
1436 		mgr = &aconnector->mst_mgr;
1437 
1438 		if (suspend) {
1439 			drm_dp_mst_topology_mgr_suspend(mgr);
1440 		} else {
1441 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 			if (ret < 0) {
1443 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 				need_hotplug = true;
1445 			}
1446 		}
1447 	}
1448 	drm_connector_list_iter_end(&iter);
1449 
1450 	if (need_hotplug)
1451 		drm_kms_helper_hotplug_event(dev);
1452 }
1453 
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 	struct smu_context *smu = &adev->smu;
1457 	int ret = 0;
1458 
1459 	if (!is_support_sw_smu(adev))
1460 		return 0;
1461 
1462 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 	 * on window driver dc implementation.
1464 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 	 * should be passed to smu during boot up and resume from s3.
1466 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 	 * dcn20_resource_construct
1468 	 * then call pplib functions below to pass the settings to smu:
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * smu_set_watermarks_table
1471 	 * navi10_set_watermarks_table
1472 	 * smu_write_watermarks_table
1473 	 *
1474 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 	 * dc has implemented different flow for window driver:
1476 	 * dc_hardware_init / dc_set_power_state
1477 	 * dcn10_init_hw
1478 	 * notify_wm_ranges
1479 	 * set_wm_ranges
1480 	 * -- Linux
1481 	 * smu_set_watermarks_for_clock_ranges
1482 	 * renoir_set_watermarks_table
1483 	 * smu_write_watermarks_table
1484 	 *
1485 	 * For Linux,
1486 	 * dc_hardware_init -> amdgpu_dm_init
1487 	 * dc_set_power_state --> dm_resume
1488 	 *
1489 	 * therefore, this function apply to navi10/12/14 but not Renoir
1490 	 * *
1491 	 */
1492 	switch(adev->asic_type) {
1493 	case CHIP_NAVI10:
1494 	case CHIP_NAVI14:
1495 	case CHIP_NAVI12:
1496 		break;
1497 	default:
1498 		return 0;
1499 	}
1500 
1501 	ret = smu_write_watermarks_table(smu);
1502 	if (ret) {
1503 		DRM_ERROR("Failed to update WMTABLE!\n");
1504 		return ret;
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /**
1511  * dm_hw_init() - Initialize DC device
1512  * @handle: The base driver device containing the amdgpu_dm device.
1513  *
1514  * Initialize the &struct amdgpu_display_manager device. This involves calling
1515  * the initializers of each DM component, then populating the struct with them.
1516  *
1517  * Although the function implies hardware initialization, both hardware and
1518  * software are initialized here. Splitting them out to their relevant init
1519  * hooks is a future TODO item.
1520  *
1521  * Some notable things that are initialized here:
1522  *
1523  * - Display Core, both software and hardware
1524  * - DC modules that we need (freesync and color management)
1525  * - DRM software states
1526  * - Interrupt sources and handlers
1527  * - Vblank support
1528  * - Debug FS entries, if enabled
1529  */
1530 static int dm_hw_init(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 	/* Create DAL display manager */
1534 	amdgpu_dm_init(adev);
1535 	amdgpu_dm_hpd_init(adev);
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * dm_hw_fini() - Teardown DC device
1542  * @handle: The base driver device containing the amdgpu_dm device.
1543  *
1544  * Teardown components within &struct amdgpu_display_manager that require
1545  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546  * were loaded. Also flush IRQ workqueues and disable them.
1547  */
1548 static int dm_hw_fini(void *handle)
1549 {
1550 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 
1552 	amdgpu_dm_hpd_fini(adev);
1553 
1554 	amdgpu_dm_irq_fini(adev);
1555 	amdgpu_dm_fini(adev);
1556 	return 0;
1557 }
1558 
1559 
1560 static int dm_enable_vblank(struct drm_crtc *crtc);
1561 static void dm_disable_vblank(struct drm_crtc *crtc);
1562 
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1564 				 struct dc_state *state, bool enable)
1565 {
1566 	enum dc_irq_source irq_source;
1567 	struct amdgpu_crtc *acrtc;
1568 	int rc = -EBUSY;
1569 	int i = 0;
1570 
1571 	for (i = 0; i < state->stream_count; i++) {
1572 		acrtc = get_crtc_by_otg_inst(
1573 				adev, state->stream_status[i].primary_otg_inst);
1574 
1575 		if (acrtc && state->stream_status[i].plane_count != 0) {
1576 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1577 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1578 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1580 			if (rc)
1581 				DRM_WARN("Failed to %s pflip interrupts\n",
1582 					 enable ? "enable" : "disable");
1583 
1584 			if (enable) {
1585 				rc = dm_enable_vblank(&acrtc->base);
1586 				if (rc)
1587 					DRM_WARN("Failed to enable vblank interrupts\n");
1588 			} else {
1589 				dm_disable_vblank(&acrtc->base);
1590 			}
1591 
1592 		}
1593 	}
1594 
1595 }
1596 
1597 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1598 {
1599 	struct dc_state *context = NULL;
1600 	enum dc_status res = DC_ERROR_UNEXPECTED;
1601 	int i;
1602 	struct dc_stream_state *del_streams[MAX_PIPES];
1603 	int del_streams_count = 0;
1604 
1605 	memset(del_streams, 0, sizeof(del_streams));
1606 
1607 	context = dc_create_state(dc);
1608 	if (context == NULL)
1609 		goto context_alloc_fail;
1610 
1611 	dc_resource_state_copy_construct_current(dc, context);
1612 
1613 	/* First remove from context all streams */
1614 	for (i = 0; i < context->stream_count; i++) {
1615 		struct dc_stream_state *stream = context->streams[i];
1616 
1617 		del_streams[del_streams_count++] = stream;
1618 	}
1619 
1620 	/* Remove all planes for removed streams and then remove the streams */
1621 	for (i = 0; i < del_streams_count; i++) {
1622 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1623 			res = DC_FAIL_DETACH_SURFACES;
1624 			goto fail;
1625 		}
1626 
1627 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1628 		if (res != DC_OK)
1629 			goto fail;
1630 	}
1631 
1632 
1633 	res = dc_validate_global_state(dc, context, false);
1634 
1635 	if (res != DC_OK) {
1636 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1637 		goto fail;
1638 	}
1639 
1640 	res = dc_commit_state(dc, context);
1641 
1642 fail:
1643 	dc_release_state(context);
1644 
1645 context_alloc_fail:
1646 	return res;
1647 }
1648 
1649 static int dm_suspend(void *handle)
1650 {
1651 	struct amdgpu_device *adev = handle;
1652 	struct amdgpu_display_manager *dm = &adev->dm;
1653 	int ret = 0;
1654 
1655 	if (adev->in_gpu_reset) {
1656 		mutex_lock(&dm->dc_lock);
1657 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1658 
1659 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1660 
1661 		amdgpu_dm_commit_zero_streams(dm->dc);
1662 
1663 		amdgpu_dm_irq_suspend(adev);
1664 
1665 		return ret;
1666 	}
1667 
1668 	WARN_ON(adev->dm.cached_state);
1669 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1670 
1671 	s3_handle_mst(adev->ddev, true);
1672 
1673 	amdgpu_dm_irq_suspend(adev);
1674 
1675 
1676 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1677 
1678 	return 0;
1679 }
1680 
1681 static struct amdgpu_dm_connector *
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1683 					     struct drm_crtc *crtc)
1684 {
1685 	uint32_t i;
1686 	struct drm_connector_state *new_con_state;
1687 	struct drm_connector *connector;
1688 	struct drm_crtc *crtc_from_state;
1689 
1690 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1691 		crtc_from_state = new_con_state->crtc;
1692 
1693 		if (crtc_from_state == crtc)
1694 			return to_amdgpu_dm_connector(connector);
1695 	}
1696 
1697 	return NULL;
1698 }
1699 
1700 static void emulated_link_detect(struct dc_link *link)
1701 {
1702 	struct dc_sink_init_data sink_init_data = { 0 };
1703 	struct display_sink_capability sink_caps = { 0 };
1704 	enum dc_edid_status edid_status;
1705 	struct dc_context *dc_ctx = link->ctx;
1706 	struct dc_sink *sink = NULL;
1707 	struct dc_sink *prev_sink = NULL;
1708 
1709 	link->type = dc_connection_none;
1710 	prev_sink = link->local_sink;
1711 
1712 	if (prev_sink != NULL)
1713 		dc_sink_retain(prev_sink);
1714 
1715 	switch (link->connector_signal) {
1716 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1717 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1719 		break;
1720 	}
1721 
1722 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1723 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1725 		break;
1726 	}
1727 
1728 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1729 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1731 		break;
1732 	}
1733 
1734 	case SIGNAL_TYPE_LVDS: {
1735 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1737 		break;
1738 	}
1739 
1740 	case SIGNAL_TYPE_EDP: {
1741 		sink_caps.transaction_type =
1742 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1743 		sink_caps.signal = SIGNAL_TYPE_EDP;
1744 		break;
1745 	}
1746 
1747 	case SIGNAL_TYPE_DISPLAY_PORT: {
1748 		sink_caps.transaction_type =
1749 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1750 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1751 		break;
1752 	}
1753 
1754 	default:
1755 		DC_ERROR("Invalid connector type! signal:%d\n",
1756 			link->connector_signal);
1757 		return;
1758 	}
1759 
1760 	sink_init_data.link = link;
1761 	sink_init_data.sink_signal = sink_caps.signal;
1762 
1763 	sink = dc_sink_create(&sink_init_data);
1764 	if (!sink) {
1765 		DC_ERROR("Failed to create sink!\n");
1766 		return;
1767 	}
1768 
1769 	/* dc_sink_create returns a new reference */
1770 	link->local_sink = sink;
1771 
1772 	edid_status = dm_helpers_read_local_edid(
1773 			link->ctx,
1774 			link,
1775 			sink);
1776 
1777 	if (edid_status != EDID_OK)
1778 		DC_ERROR("Failed to read EDID");
1779 
1780 }
1781 
1782 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1783 				     struct amdgpu_display_manager *dm)
1784 {
1785 	struct {
1786 		struct dc_surface_update surface_updates[MAX_SURFACES];
1787 		struct dc_plane_info plane_infos[MAX_SURFACES];
1788 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1789 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1790 		struct dc_stream_update stream_update;
1791 	} * bundle;
1792 	int k, m;
1793 
1794 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1795 
1796 	if (!bundle) {
1797 		dm_error("Failed to allocate update bundle\n");
1798 		goto cleanup;
1799 	}
1800 
1801 	for (k = 0; k < dc_state->stream_count; k++) {
1802 		bundle->stream_update.stream = dc_state->streams[k];
1803 
1804 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1805 			bundle->surface_updates[m].surface =
1806 				dc_state->stream_status->plane_states[m];
1807 			bundle->surface_updates[m].surface->force_full_update =
1808 				true;
1809 		}
1810 		dc_commit_updates_for_stream(
1811 			dm->dc, bundle->surface_updates,
1812 			dc_state->stream_status->plane_count,
1813 			dc_state->streams[k], &bundle->stream_update, dc_state);
1814 	}
1815 
1816 cleanup:
1817 	kfree(bundle);
1818 
1819 	return;
1820 }
1821 
1822 static int dm_resume(void *handle)
1823 {
1824 	struct amdgpu_device *adev = handle;
1825 	struct drm_device *ddev = adev->ddev;
1826 	struct amdgpu_display_manager *dm = &adev->dm;
1827 	struct amdgpu_dm_connector *aconnector;
1828 	struct drm_connector *connector;
1829 	struct drm_connector_list_iter iter;
1830 	struct drm_crtc *crtc;
1831 	struct drm_crtc_state *new_crtc_state;
1832 	struct dm_crtc_state *dm_new_crtc_state;
1833 	struct drm_plane *plane;
1834 	struct drm_plane_state *new_plane_state;
1835 	struct dm_plane_state *dm_new_plane_state;
1836 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1837 	enum dc_connection_type new_connection_type = dc_connection_none;
1838 	struct dc_state *dc_state;
1839 	int i, r, j;
1840 
1841 	if (adev->in_gpu_reset) {
1842 		dc_state = dm->cached_dc_state;
1843 
1844 		r = dm_dmub_hw_init(adev);
1845 		if (r)
1846 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1847 
1848 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1849 		dc_resume(dm->dc);
1850 
1851 		amdgpu_dm_irq_resume_early(adev);
1852 
1853 		for (i = 0; i < dc_state->stream_count; i++) {
1854 			dc_state->streams[i]->mode_changed = true;
1855 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1856 				dc_state->stream_status->plane_states[j]->update_flags.raw
1857 					= 0xffffffff;
1858 			}
1859 		}
1860 
1861 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1862 
1863 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1864 
1865 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1866 
1867 		dc_release_state(dm->cached_dc_state);
1868 		dm->cached_dc_state = NULL;
1869 
1870 		amdgpu_dm_irq_resume_late(adev);
1871 
1872 		mutex_unlock(&dm->dc_lock);
1873 
1874 		return 0;
1875 	}
1876 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 	dc_release_state(dm_state->context);
1878 	dm_state->context = dc_create_state(dm->dc);
1879 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 	dc_resource_state_construct(dm->dc, dm_state->context);
1881 
1882 	/* Before powering on DC we need to re-initialize DMUB. */
1883 	r = dm_dmub_hw_init(adev);
1884 	if (r)
1885 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1886 
1887 	/* power on hardware */
1888 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1889 
1890 	/* program HPD filter */
1891 	dc_resume(dm->dc);
1892 
1893 	/*
1894 	 * early enable HPD Rx IRQ, should be done before set mode as short
1895 	 * pulse interrupts are used for MST
1896 	 */
1897 	amdgpu_dm_irq_resume_early(adev);
1898 
1899 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 	s3_handle_mst(ddev, false);
1901 
1902 	/* Do detection*/
1903 	drm_connector_list_iter_begin(ddev, &iter);
1904 	drm_for_each_connector_iter(connector, &iter) {
1905 		aconnector = to_amdgpu_dm_connector(connector);
1906 
1907 		/*
1908 		 * this is the case when traversing through already created
1909 		 * MST connectors, should be skipped
1910 		 */
1911 		if (aconnector->mst_port)
1912 			continue;
1913 
1914 		mutex_lock(&aconnector->hpd_lock);
1915 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1916 			DRM_ERROR("KMS: Failed to detect connector\n");
1917 
1918 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1919 			emulated_link_detect(aconnector->dc_link);
1920 		else
1921 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1922 
1923 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1924 			aconnector->fake_enable = false;
1925 
1926 		if (aconnector->dc_sink)
1927 			dc_sink_release(aconnector->dc_sink);
1928 		aconnector->dc_sink = NULL;
1929 		amdgpu_dm_update_connector_after_detect(aconnector);
1930 		mutex_unlock(&aconnector->hpd_lock);
1931 	}
1932 	drm_connector_list_iter_end(&iter);
1933 
1934 	/* Force mode set in atomic commit */
1935 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1936 		new_crtc_state->active_changed = true;
1937 
1938 	/*
1939 	 * atomic_check is expected to create the dc states. We need to release
1940 	 * them here, since they were duplicated as part of the suspend
1941 	 * procedure.
1942 	 */
1943 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1944 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1945 		if (dm_new_crtc_state->stream) {
1946 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1947 			dc_stream_release(dm_new_crtc_state->stream);
1948 			dm_new_crtc_state->stream = NULL;
1949 		}
1950 	}
1951 
1952 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1953 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1954 		if (dm_new_plane_state->dc_state) {
1955 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1956 			dc_plane_state_release(dm_new_plane_state->dc_state);
1957 			dm_new_plane_state->dc_state = NULL;
1958 		}
1959 	}
1960 
1961 	drm_atomic_helper_resume(ddev, dm->cached_state);
1962 
1963 	dm->cached_state = NULL;
1964 
1965 	amdgpu_dm_irq_resume_late(adev);
1966 
1967 	amdgpu_dm_smu_write_watermarks_table(adev);
1968 
1969 	return 0;
1970 }
1971 
1972 /**
1973  * DOC: DM Lifecycle
1974  *
1975  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977  * the base driver's device list to be initialized and torn down accordingly.
1978  *
1979  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1980  */
1981 
1982 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1983 	.name = "dm",
1984 	.early_init = dm_early_init,
1985 	.late_init = dm_late_init,
1986 	.sw_init = dm_sw_init,
1987 	.sw_fini = dm_sw_fini,
1988 	.hw_init = dm_hw_init,
1989 	.hw_fini = dm_hw_fini,
1990 	.suspend = dm_suspend,
1991 	.resume = dm_resume,
1992 	.is_idle = dm_is_idle,
1993 	.wait_for_idle = dm_wait_for_idle,
1994 	.check_soft_reset = dm_check_soft_reset,
1995 	.soft_reset = dm_soft_reset,
1996 	.set_clockgating_state = dm_set_clockgating_state,
1997 	.set_powergating_state = dm_set_powergating_state,
1998 };
1999 
2000 const struct amdgpu_ip_block_version dm_ip_block =
2001 {
2002 	.type = AMD_IP_BLOCK_TYPE_DCE,
2003 	.major = 1,
2004 	.minor = 0,
2005 	.rev = 0,
2006 	.funcs = &amdgpu_dm_funcs,
2007 };
2008 
2009 
2010 /**
2011  * DOC: atomic
2012  *
2013  * *WIP*
2014  */
2015 
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2017 	.fb_create = amdgpu_display_user_framebuffer_create,
2018 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2019 	.atomic_check = amdgpu_dm_atomic_check,
2020 	.atomic_commit = amdgpu_dm_atomic_commit,
2021 };
2022 
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2024 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2025 };
2026 
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2028 {
2029 	u32 max_cll, min_cll, max, min, q, r;
2030 	struct amdgpu_dm_backlight_caps *caps;
2031 	struct amdgpu_display_manager *dm;
2032 	struct drm_connector *conn_base;
2033 	struct amdgpu_device *adev;
2034 	struct dc_link *link = NULL;
2035 	static const u8 pre_computed_values[] = {
2036 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2038 
2039 	if (!aconnector || !aconnector->dc_link)
2040 		return;
2041 
2042 	link = aconnector->dc_link;
2043 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2044 		return;
2045 
2046 	conn_base = &aconnector->base;
2047 	adev = conn_base->dev->dev_private;
2048 	dm = &adev->dm;
2049 	caps = &dm->backlight_caps;
2050 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2051 	caps->aux_support = false;
2052 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2053 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2054 
2055 	if (caps->ext_caps->bits.oled == 1 ||
2056 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2057 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2058 		caps->aux_support = true;
2059 
2060 	/* From the specification (CTA-861-G), for calculating the maximum
2061 	 * luminance we need to use:
2062 	 *	Luminance = 50*2**(CV/32)
2063 	 * Where CV is a one-byte value.
2064 	 * For calculating this expression we may need float point precision;
2065 	 * to avoid this complexity level, we take advantage that CV is divided
2066 	 * by a constant. From the Euclids division algorithm, we know that CV
2067 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 	 * need to pre-compute the value of r/32. For pre-computing the values
2070 	 * We just used the following Ruby line:
2071 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 	 * The results of the above expressions can be verified at
2073 	 * pre_computed_values.
2074 	 */
2075 	q = max_cll >> 5;
2076 	r = max_cll % 32;
2077 	max = (1 << q) * pre_computed_values[r];
2078 
2079 	// min luminance: maxLum * (CV/255)^2 / 100
2080 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2081 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2082 
2083 	caps->aux_max_input_signal = max;
2084 	caps->aux_min_input_signal = min;
2085 }
2086 
2087 void amdgpu_dm_update_connector_after_detect(
2088 		struct amdgpu_dm_connector *aconnector)
2089 {
2090 	struct drm_connector *connector = &aconnector->base;
2091 	struct drm_device *dev = connector->dev;
2092 	struct dc_sink *sink;
2093 
2094 	/* MST handled by drm_mst framework */
2095 	if (aconnector->mst_mgr.mst_state == true)
2096 		return;
2097 
2098 
2099 	sink = aconnector->dc_link->local_sink;
2100 	if (sink)
2101 		dc_sink_retain(sink);
2102 
2103 	/*
2104 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 	 * the connector sink is set to either fake or physical sink depends on link status.
2106 	 * Skip if already done during boot.
2107 	 */
2108 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2109 			&& aconnector->dc_em_sink) {
2110 
2111 		/*
2112 		 * For S3 resume with headless use eml_sink to fake stream
2113 		 * because on resume connector->sink is set to NULL
2114 		 */
2115 		mutex_lock(&dev->mode_config.mutex);
2116 
2117 		if (sink) {
2118 			if (aconnector->dc_sink) {
2119 				amdgpu_dm_update_freesync_caps(connector, NULL);
2120 				/*
2121 				 * retain and release below are used to
2122 				 * bump up refcount for sink because the link doesn't point
2123 				 * to it anymore after disconnect, so on next crtc to connector
2124 				 * reshuffle by UMD we will get into unwanted dc_sink release
2125 				 */
2126 				dc_sink_release(aconnector->dc_sink);
2127 			}
2128 			aconnector->dc_sink = sink;
2129 			dc_sink_retain(aconnector->dc_sink);
2130 			amdgpu_dm_update_freesync_caps(connector,
2131 					aconnector->edid);
2132 		} else {
2133 			amdgpu_dm_update_freesync_caps(connector, NULL);
2134 			if (!aconnector->dc_sink) {
2135 				aconnector->dc_sink = aconnector->dc_em_sink;
2136 				dc_sink_retain(aconnector->dc_sink);
2137 			}
2138 		}
2139 
2140 		mutex_unlock(&dev->mode_config.mutex);
2141 
2142 		if (sink)
2143 			dc_sink_release(sink);
2144 		return;
2145 	}
2146 
2147 	/*
2148 	 * TODO: temporary guard to look for proper fix
2149 	 * if this sink is MST sink, we should not do anything
2150 	 */
2151 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2152 		dc_sink_release(sink);
2153 		return;
2154 	}
2155 
2156 	if (aconnector->dc_sink == sink) {
2157 		/*
2158 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2159 		 * Do nothing!!
2160 		 */
2161 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 				aconnector->connector_id);
2163 		if (sink)
2164 			dc_sink_release(sink);
2165 		return;
2166 	}
2167 
2168 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 		aconnector->connector_id, aconnector->dc_sink, sink);
2170 
2171 	mutex_lock(&dev->mode_config.mutex);
2172 
2173 	/*
2174 	 * 1. Update status of the drm connector
2175 	 * 2. Send an event and let userspace tell us what to do
2176 	 */
2177 	if (sink) {
2178 		/*
2179 		 * TODO: check if we still need the S3 mode update workaround.
2180 		 * If yes, put it here.
2181 		 */
2182 		if (aconnector->dc_sink)
2183 			amdgpu_dm_update_freesync_caps(connector, NULL);
2184 
2185 		aconnector->dc_sink = sink;
2186 		dc_sink_retain(aconnector->dc_sink);
2187 		if (sink->dc_edid.length == 0) {
2188 			aconnector->edid = NULL;
2189 			if (aconnector->dc_link->aux_mode) {
2190 				drm_dp_cec_unset_edid(
2191 					&aconnector->dm_dp_aux.aux);
2192 			}
2193 		} else {
2194 			aconnector->edid =
2195 				(struct edid *)sink->dc_edid.raw_edid;
2196 
2197 			drm_connector_update_edid_property(connector,
2198 							   aconnector->edid);
2199 			drm_add_edid_modes(connector, aconnector->edid);
2200 
2201 			if (aconnector->dc_link->aux_mode)
2202 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2203 						    aconnector->edid);
2204 		}
2205 
2206 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2207 		update_connector_ext_caps(aconnector);
2208 	} else {
2209 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2210 		amdgpu_dm_update_freesync_caps(connector, NULL);
2211 		drm_connector_update_edid_property(connector, NULL);
2212 		aconnector->num_modes = 0;
2213 		dc_sink_release(aconnector->dc_sink);
2214 		aconnector->dc_sink = NULL;
2215 		aconnector->edid = NULL;
2216 #ifdef CONFIG_DRM_AMD_DC_HDCP
2217 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2218 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2219 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2220 #endif
2221 	}
2222 
2223 	mutex_unlock(&dev->mode_config.mutex);
2224 
2225 	if (sink)
2226 		dc_sink_release(sink);
2227 }
2228 
2229 static void handle_hpd_irq(void *param)
2230 {
2231 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2232 	struct drm_connector *connector = &aconnector->base;
2233 	struct drm_device *dev = connector->dev;
2234 	enum dc_connection_type new_connection_type = dc_connection_none;
2235 #ifdef CONFIG_DRM_AMD_DC_HDCP
2236 	struct amdgpu_device *adev = dev->dev_private;
2237 #endif
2238 
2239 	/*
2240 	 * In case of failure or MST no need to update connector status or notify the OS
2241 	 * since (for MST case) MST does this in its own context.
2242 	 */
2243 	mutex_lock(&aconnector->hpd_lock);
2244 
2245 #ifdef CONFIG_DRM_AMD_DC_HDCP
2246 	if (adev->dm.hdcp_workqueue)
2247 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2248 #endif
2249 	if (aconnector->fake_enable)
2250 		aconnector->fake_enable = false;
2251 
2252 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2253 		DRM_ERROR("KMS: Failed to detect connector\n");
2254 
2255 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2256 		emulated_link_detect(aconnector->dc_link);
2257 
2258 
2259 		drm_modeset_lock_all(dev);
2260 		dm_restore_drm_connector_state(dev, connector);
2261 		drm_modeset_unlock_all(dev);
2262 
2263 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2264 			drm_kms_helper_hotplug_event(dev);
2265 
2266 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2267 		amdgpu_dm_update_connector_after_detect(aconnector);
2268 
2269 
2270 		drm_modeset_lock_all(dev);
2271 		dm_restore_drm_connector_state(dev, connector);
2272 		drm_modeset_unlock_all(dev);
2273 
2274 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2275 			drm_kms_helper_hotplug_event(dev);
2276 	}
2277 	mutex_unlock(&aconnector->hpd_lock);
2278 
2279 }
2280 
2281 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2282 {
2283 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2284 	uint8_t dret;
2285 	bool new_irq_handled = false;
2286 	int dpcd_addr;
2287 	int dpcd_bytes_to_read;
2288 
2289 	const int max_process_count = 30;
2290 	int process_count = 0;
2291 
2292 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2293 
2294 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2295 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2296 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2297 		dpcd_addr = DP_SINK_COUNT;
2298 	} else {
2299 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2300 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2301 		dpcd_addr = DP_SINK_COUNT_ESI;
2302 	}
2303 
2304 	dret = drm_dp_dpcd_read(
2305 		&aconnector->dm_dp_aux.aux,
2306 		dpcd_addr,
2307 		esi,
2308 		dpcd_bytes_to_read);
2309 
2310 	while (dret == dpcd_bytes_to_read &&
2311 		process_count < max_process_count) {
2312 		uint8_t retry;
2313 		dret = 0;
2314 
2315 		process_count++;
2316 
2317 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2318 		/* handle HPD short pulse irq */
2319 		if (aconnector->mst_mgr.mst_state)
2320 			drm_dp_mst_hpd_irq(
2321 				&aconnector->mst_mgr,
2322 				esi,
2323 				&new_irq_handled);
2324 
2325 		if (new_irq_handled) {
2326 			/* ACK at DPCD to notify down stream */
2327 			const int ack_dpcd_bytes_to_write =
2328 				dpcd_bytes_to_read - 1;
2329 
2330 			for (retry = 0; retry < 3; retry++) {
2331 				uint8_t wret;
2332 
2333 				wret = drm_dp_dpcd_write(
2334 					&aconnector->dm_dp_aux.aux,
2335 					dpcd_addr + 1,
2336 					&esi[1],
2337 					ack_dpcd_bytes_to_write);
2338 				if (wret == ack_dpcd_bytes_to_write)
2339 					break;
2340 			}
2341 
2342 			/* check if there is new irq to be handled */
2343 			dret = drm_dp_dpcd_read(
2344 				&aconnector->dm_dp_aux.aux,
2345 				dpcd_addr,
2346 				esi,
2347 				dpcd_bytes_to_read);
2348 
2349 			new_irq_handled = false;
2350 		} else {
2351 			break;
2352 		}
2353 	}
2354 
2355 	if (process_count == max_process_count)
2356 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2357 }
2358 
2359 static void handle_hpd_rx_irq(void *param)
2360 {
2361 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2362 	struct drm_connector *connector = &aconnector->base;
2363 	struct drm_device *dev = connector->dev;
2364 	struct dc_link *dc_link = aconnector->dc_link;
2365 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2366 	enum dc_connection_type new_connection_type = dc_connection_none;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368 	union hpd_irq_data hpd_irq_data;
2369 	struct amdgpu_device *adev = dev->dev_private;
2370 
2371 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2372 #endif
2373 
2374 	/*
2375 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2376 	 * conflict, after implement i2c helper, this mutex should be
2377 	 * retired.
2378 	 */
2379 	if (dc_link->type != dc_connection_mst_branch)
2380 		mutex_lock(&aconnector->hpd_lock);
2381 
2382 
2383 #ifdef CONFIG_DRM_AMD_DC_HDCP
2384 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2385 #else
2386 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2387 #endif
2388 			!is_mst_root_connector) {
2389 		/* Downstream Port status changed. */
2390 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2391 			DRM_ERROR("KMS: Failed to detect connector\n");
2392 
2393 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2394 			emulated_link_detect(dc_link);
2395 
2396 			if (aconnector->fake_enable)
2397 				aconnector->fake_enable = false;
2398 
2399 			amdgpu_dm_update_connector_after_detect(aconnector);
2400 
2401 
2402 			drm_modeset_lock_all(dev);
2403 			dm_restore_drm_connector_state(dev, connector);
2404 			drm_modeset_unlock_all(dev);
2405 
2406 			drm_kms_helper_hotplug_event(dev);
2407 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2408 
2409 			if (aconnector->fake_enable)
2410 				aconnector->fake_enable = false;
2411 
2412 			amdgpu_dm_update_connector_after_detect(aconnector);
2413 
2414 
2415 			drm_modeset_lock_all(dev);
2416 			dm_restore_drm_connector_state(dev, connector);
2417 			drm_modeset_unlock_all(dev);
2418 
2419 			drm_kms_helper_hotplug_event(dev);
2420 		}
2421 	}
2422 #ifdef CONFIG_DRM_AMD_DC_HDCP
2423 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2424 		if (adev->dm.hdcp_workqueue)
2425 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2426 	}
2427 #endif
2428 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2429 	    (dc_link->type == dc_connection_mst_branch))
2430 		dm_handle_hpd_rx_irq(aconnector);
2431 
2432 	if (dc_link->type != dc_connection_mst_branch) {
2433 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2434 		mutex_unlock(&aconnector->hpd_lock);
2435 	}
2436 }
2437 
2438 static void register_hpd_handlers(struct amdgpu_device *adev)
2439 {
2440 	struct drm_device *dev = adev->ddev;
2441 	struct drm_connector *connector;
2442 	struct amdgpu_dm_connector *aconnector;
2443 	const struct dc_link *dc_link;
2444 	struct dc_interrupt_params int_params = {0};
2445 
2446 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2447 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2448 
2449 	list_for_each_entry(connector,
2450 			&dev->mode_config.connector_list, head)	{
2451 
2452 		aconnector = to_amdgpu_dm_connector(connector);
2453 		dc_link = aconnector->dc_link;
2454 
2455 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2456 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2457 			int_params.irq_source = dc_link->irq_source_hpd;
2458 
2459 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2460 					handle_hpd_irq,
2461 					(void *) aconnector);
2462 		}
2463 
2464 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2465 
2466 			/* Also register for DP short pulse (hpd_rx). */
2467 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2468 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2469 
2470 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2471 					handle_hpd_rx_irq,
2472 					(void *) aconnector);
2473 		}
2474 	}
2475 }
2476 
2477 /* Register IRQ sources and initialize IRQ callbacks */
2478 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2479 {
2480 	struct dc *dc = adev->dm.dc;
2481 	struct common_irq_params *c_irq_params;
2482 	struct dc_interrupt_params int_params = {0};
2483 	int r;
2484 	int i;
2485 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2486 
2487 	if (adev->asic_type >= CHIP_VEGA10)
2488 		client_id = SOC15_IH_CLIENTID_DCE;
2489 
2490 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2491 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2492 
2493 	/*
2494 	 * Actions of amdgpu_irq_add_id():
2495 	 * 1. Register a set() function with base driver.
2496 	 *    Base driver will call set() function to enable/disable an
2497 	 *    interrupt in DC hardware.
2498 	 * 2. Register amdgpu_dm_irq_handler().
2499 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2500 	 *    coming from DC hardware.
2501 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2502 	 *    for acknowledging and handling. */
2503 
2504 	/* Use VBLANK interrupt */
2505 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2506 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2507 		if (r) {
2508 			DRM_ERROR("Failed to add crtc irq id!\n");
2509 			return r;
2510 		}
2511 
2512 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2513 		int_params.irq_source =
2514 			dc_interrupt_to_irq_source(dc, i, 0);
2515 
2516 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2517 
2518 		c_irq_params->adev = adev;
2519 		c_irq_params->irq_src = int_params.irq_source;
2520 
2521 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2522 				dm_crtc_high_irq, c_irq_params);
2523 	}
2524 
2525 	/* Use VUPDATE interrupt */
2526 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2527 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2528 		if (r) {
2529 			DRM_ERROR("Failed to add vupdate irq id!\n");
2530 			return r;
2531 		}
2532 
2533 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2534 		int_params.irq_source =
2535 			dc_interrupt_to_irq_source(dc, i, 0);
2536 
2537 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2538 
2539 		c_irq_params->adev = adev;
2540 		c_irq_params->irq_src = int_params.irq_source;
2541 
2542 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2543 				dm_vupdate_high_irq, c_irq_params);
2544 	}
2545 
2546 	/* Use GRPH_PFLIP interrupt */
2547 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2548 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2549 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2550 		if (r) {
2551 			DRM_ERROR("Failed to add page flip irq id!\n");
2552 			return r;
2553 		}
2554 
2555 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2556 		int_params.irq_source =
2557 			dc_interrupt_to_irq_source(dc, i, 0);
2558 
2559 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2560 
2561 		c_irq_params->adev = adev;
2562 		c_irq_params->irq_src = int_params.irq_source;
2563 
2564 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2565 				dm_pflip_high_irq, c_irq_params);
2566 
2567 	}
2568 
2569 	/* HPD */
2570 	r = amdgpu_irq_add_id(adev, client_id,
2571 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2572 	if (r) {
2573 		DRM_ERROR("Failed to add hpd irq id!\n");
2574 		return r;
2575 	}
2576 
2577 	register_hpd_handlers(adev);
2578 
2579 	return 0;
2580 }
2581 
2582 #if defined(CONFIG_DRM_AMD_DC_DCN)
2583 /* Register IRQ sources and initialize IRQ callbacks */
2584 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2585 {
2586 	struct dc *dc = adev->dm.dc;
2587 	struct common_irq_params *c_irq_params;
2588 	struct dc_interrupt_params int_params = {0};
2589 	int r;
2590 	int i;
2591 
2592 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2593 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2594 
2595 	/*
2596 	 * Actions of amdgpu_irq_add_id():
2597 	 * 1. Register a set() function with base driver.
2598 	 *    Base driver will call set() function to enable/disable an
2599 	 *    interrupt in DC hardware.
2600 	 * 2. Register amdgpu_dm_irq_handler().
2601 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2602 	 *    coming from DC hardware.
2603 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2604 	 *    for acknowledging and handling.
2605 	 */
2606 
2607 	/* Use VSTARTUP interrupt */
2608 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2609 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2610 			i++) {
2611 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2612 
2613 		if (r) {
2614 			DRM_ERROR("Failed to add crtc irq id!\n");
2615 			return r;
2616 		}
2617 
2618 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2619 		int_params.irq_source =
2620 			dc_interrupt_to_irq_source(dc, i, 0);
2621 
2622 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2623 
2624 		c_irq_params->adev = adev;
2625 		c_irq_params->irq_src = int_params.irq_source;
2626 
2627 		amdgpu_dm_irq_register_interrupt(
2628 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2629 	}
2630 
2631 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2632 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2633 	 * to trigger at end of each vblank, regardless of state of the lock,
2634 	 * matching DCE behaviour.
2635 	 */
2636 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2637 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2638 	     i++) {
2639 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2640 
2641 		if (r) {
2642 			DRM_ERROR("Failed to add vupdate irq id!\n");
2643 			return r;
2644 		}
2645 
2646 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2647 		int_params.irq_source =
2648 			dc_interrupt_to_irq_source(dc, i, 0);
2649 
2650 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2651 
2652 		c_irq_params->adev = adev;
2653 		c_irq_params->irq_src = int_params.irq_source;
2654 
2655 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2656 				dm_vupdate_high_irq, c_irq_params);
2657 	}
2658 
2659 	/* Use GRPH_PFLIP interrupt */
2660 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2661 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2662 			i++) {
2663 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2664 		if (r) {
2665 			DRM_ERROR("Failed to add page flip irq id!\n");
2666 			return r;
2667 		}
2668 
2669 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2670 		int_params.irq_source =
2671 			dc_interrupt_to_irq_source(dc, i, 0);
2672 
2673 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2674 
2675 		c_irq_params->adev = adev;
2676 		c_irq_params->irq_src = int_params.irq_source;
2677 
2678 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2679 				dm_pflip_high_irq, c_irq_params);
2680 
2681 	}
2682 
2683 	/* HPD */
2684 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2685 			&adev->hpd_irq);
2686 	if (r) {
2687 		DRM_ERROR("Failed to add hpd irq id!\n");
2688 		return r;
2689 	}
2690 
2691 	register_hpd_handlers(adev);
2692 
2693 	return 0;
2694 }
2695 #endif
2696 
2697 /*
2698  * Acquires the lock for the atomic state object and returns
2699  * the new atomic state.
2700  *
2701  * This should only be called during atomic check.
2702  */
2703 static int dm_atomic_get_state(struct drm_atomic_state *state,
2704 			       struct dm_atomic_state **dm_state)
2705 {
2706 	struct drm_device *dev = state->dev;
2707 	struct amdgpu_device *adev = dev->dev_private;
2708 	struct amdgpu_display_manager *dm = &adev->dm;
2709 	struct drm_private_state *priv_state;
2710 
2711 	if (*dm_state)
2712 		return 0;
2713 
2714 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2715 	if (IS_ERR(priv_state))
2716 		return PTR_ERR(priv_state);
2717 
2718 	*dm_state = to_dm_atomic_state(priv_state);
2719 
2720 	return 0;
2721 }
2722 
2723 static struct dm_atomic_state *
2724 dm_atomic_get_new_state(struct drm_atomic_state *state)
2725 {
2726 	struct drm_device *dev = state->dev;
2727 	struct amdgpu_device *adev = dev->dev_private;
2728 	struct amdgpu_display_manager *dm = &adev->dm;
2729 	struct drm_private_obj *obj;
2730 	struct drm_private_state *new_obj_state;
2731 	int i;
2732 
2733 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2734 		if (obj->funcs == dm->atomic_obj.funcs)
2735 			return to_dm_atomic_state(new_obj_state);
2736 	}
2737 
2738 	return NULL;
2739 }
2740 
2741 static struct dm_atomic_state *
2742 dm_atomic_get_old_state(struct drm_atomic_state *state)
2743 {
2744 	struct drm_device *dev = state->dev;
2745 	struct amdgpu_device *adev = dev->dev_private;
2746 	struct amdgpu_display_manager *dm = &adev->dm;
2747 	struct drm_private_obj *obj;
2748 	struct drm_private_state *old_obj_state;
2749 	int i;
2750 
2751 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2752 		if (obj->funcs == dm->atomic_obj.funcs)
2753 			return to_dm_atomic_state(old_obj_state);
2754 	}
2755 
2756 	return NULL;
2757 }
2758 
2759 static struct drm_private_state *
2760 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2761 {
2762 	struct dm_atomic_state *old_state, *new_state;
2763 
2764 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2765 	if (!new_state)
2766 		return NULL;
2767 
2768 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2769 
2770 	old_state = to_dm_atomic_state(obj->state);
2771 
2772 	if (old_state && old_state->context)
2773 		new_state->context = dc_copy_state(old_state->context);
2774 
2775 	if (!new_state->context) {
2776 		kfree(new_state);
2777 		return NULL;
2778 	}
2779 
2780 	return &new_state->base;
2781 }
2782 
2783 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2784 				    struct drm_private_state *state)
2785 {
2786 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2787 
2788 	if (dm_state && dm_state->context)
2789 		dc_release_state(dm_state->context);
2790 
2791 	kfree(dm_state);
2792 }
2793 
2794 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2795 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2796 	.atomic_destroy_state = dm_atomic_destroy_state,
2797 };
2798 
2799 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2800 {
2801 	struct dm_atomic_state *state;
2802 	int r;
2803 
2804 	adev->mode_info.mode_config_initialized = true;
2805 
2806 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2807 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2808 
2809 	adev->ddev->mode_config.max_width = 16384;
2810 	adev->ddev->mode_config.max_height = 16384;
2811 
2812 	adev->ddev->mode_config.preferred_depth = 24;
2813 	adev->ddev->mode_config.prefer_shadow = 1;
2814 	/* indicates support for immediate flip */
2815 	adev->ddev->mode_config.async_page_flip = true;
2816 
2817 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2818 
2819 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2820 	if (!state)
2821 		return -ENOMEM;
2822 
2823 	state->context = dc_create_state(adev->dm.dc);
2824 	if (!state->context) {
2825 		kfree(state);
2826 		return -ENOMEM;
2827 	}
2828 
2829 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2830 
2831 	drm_atomic_private_obj_init(adev->ddev,
2832 				    &adev->dm.atomic_obj,
2833 				    &state->base,
2834 				    &dm_atomic_state_funcs);
2835 
2836 	r = amdgpu_display_modeset_create_props(adev);
2837 	if (r) {
2838 		dc_release_state(state->context);
2839 		kfree(state);
2840 		return r;
2841 	}
2842 
2843 	r = amdgpu_dm_audio_init(adev);
2844 	if (r) {
2845 		dc_release_state(state->context);
2846 		kfree(state);
2847 		return r;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2854 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2855 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2856 
2857 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2858 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2859 
2860 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2861 {
2862 #if defined(CONFIG_ACPI)
2863 	struct amdgpu_dm_backlight_caps caps;
2864 
2865 	memset(&caps, 0, sizeof(caps));
2866 
2867 	if (dm->backlight_caps.caps_valid)
2868 		return;
2869 
2870 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2871 	if (caps.caps_valid) {
2872 		dm->backlight_caps.caps_valid = true;
2873 		if (caps.aux_support)
2874 			return;
2875 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2876 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2877 	} else {
2878 		dm->backlight_caps.min_input_signal =
2879 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2880 		dm->backlight_caps.max_input_signal =
2881 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2882 	}
2883 #else
2884 	if (dm->backlight_caps.aux_support)
2885 		return;
2886 
2887 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2888 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2889 #endif
2890 }
2891 
2892 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2893 {
2894 	bool rc;
2895 
2896 	if (!link)
2897 		return 1;
2898 
2899 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2900 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2901 
2902 	return rc ? 0 : 1;
2903 }
2904 
2905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
2906 				unsigned *min, unsigned *max)
2907 {
2908 	if (!caps)
2909 		return 0;
2910 
2911 	if (caps->aux_support) {
2912 		// Firmware limits are in nits, DC API wants millinits.
2913 		*max = 1000 * caps->aux_max_input_signal;
2914 		*min = 1000 * caps->aux_min_input_signal;
2915 	} else {
2916 		// Firmware limits are 8-bit, PWM control is 16-bit.
2917 		*max = 0x101 * caps->max_input_signal;
2918 		*min = 0x101 * caps->min_input_signal;
2919 	}
2920 	return 1;
2921 }
2922 
2923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
2924 					uint32_t brightness)
2925 {
2926 	unsigned min, max;
2927 
2928 	if (!get_brightness_range(caps, &min, &max))
2929 		return brightness;
2930 
2931 	// Rescale 0..255 to min..max
2932 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
2933 				       AMDGPU_MAX_BL_LEVEL);
2934 }
2935 
2936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
2937 				      uint32_t brightness)
2938 {
2939 	unsigned min, max;
2940 
2941 	if (!get_brightness_range(caps, &min, &max))
2942 		return brightness;
2943 
2944 	if (brightness < min)
2945 		return 0;
2946 	// Rescale min..max to 0..255
2947 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
2948 				 max - min);
2949 }
2950 
2951 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2952 {
2953 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2954 	struct amdgpu_dm_backlight_caps caps;
2955 	struct dc_link *link = NULL;
2956 	u32 brightness;
2957 	bool rc;
2958 
2959 	amdgpu_dm_update_backlight_caps(dm);
2960 	caps = dm->backlight_caps;
2961 
2962 	link = (struct dc_link *)dm->backlight_link;
2963 
2964 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
2965 	// Change brightness based on AUX property
2966 	if (caps.aux_support)
2967 		return set_backlight_via_aux(link, brightness);
2968 
2969 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2970 
2971 	return rc ? 0 : 1;
2972 }
2973 
2974 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2975 {
2976 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2977 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2978 
2979 	if (ret == DC_ERROR_UNEXPECTED)
2980 		return bd->props.brightness;
2981 	return convert_brightness_to_user(&dm->backlight_caps, ret);
2982 }
2983 
2984 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2985 	.options = BL_CORE_SUSPENDRESUME,
2986 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2987 	.update_status	= amdgpu_dm_backlight_update_status,
2988 };
2989 
2990 static void
2991 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2992 {
2993 	char bl_name[16];
2994 	struct backlight_properties props = { 0 };
2995 
2996 	amdgpu_dm_update_backlight_caps(dm);
2997 
2998 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2999 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3000 	props.type = BACKLIGHT_RAW;
3001 
3002 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3003 			dm->adev->ddev->primary->index);
3004 
3005 	dm->backlight_dev = backlight_device_register(bl_name,
3006 			dm->adev->ddev->dev,
3007 			dm,
3008 			&amdgpu_dm_backlight_ops,
3009 			&props);
3010 
3011 	if (IS_ERR(dm->backlight_dev))
3012 		DRM_ERROR("DM: Backlight registration failed!\n");
3013 	else
3014 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3015 }
3016 
3017 #endif
3018 
3019 static int initialize_plane(struct amdgpu_display_manager *dm,
3020 			    struct amdgpu_mode_info *mode_info, int plane_id,
3021 			    enum drm_plane_type plane_type,
3022 			    const struct dc_plane_cap *plane_cap)
3023 {
3024 	struct drm_plane *plane;
3025 	unsigned long possible_crtcs;
3026 	int ret = 0;
3027 
3028 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3029 	if (!plane) {
3030 		DRM_ERROR("KMS: Failed to allocate plane\n");
3031 		return -ENOMEM;
3032 	}
3033 	plane->type = plane_type;
3034 
3035 	/*
3036 	 * HACK: IGT tests expect that the primary plane for a CRTC
3037 	 * can only have one possible CRTC. Only expose support for
3038 	 * any CRTC if they're not going to be used as a primary plane
3039 	 * for a CRTC - like overlay or underlay planes.
3040 	 */
3041 	possible_crtcs = 1 << plane_id;
3042 	if (plane_id >= dm->dc->caps.max_streams)
3043 		possible_crtcs = 0xff;
3044 
3045 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3046 
3047 	if (ret) {
3048 		DRM_ERROR("KMS: Failed to initialize plane\n");
3049 		kfree(plane);
3050 		return ret;
3051 	}
3052 
3053 	if (mode_info)
3054 		mode_info->planes[plane_id] = plane;
3055 
3056 	return ret;
3057 }
3058 
3059 
3060 static void register_backlight_device(struct amdgpu_display_manager *dm,
3061 				      struct dc_link *link)
3062 {
3063 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3064 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3065 
3066 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3067 	    link->type != dc_connection_none) {
3068 		/*
3069 		 * Event if registration failed, we should continue with
3070 		 * DM initialization because not having a backlight control
3071 		 * is better then a black screen.
3072 		 */
3073 		amdgpu_dm_register_backlight_device(dm);
3074 
3075 		if (dm->backlight_dev)
3076 			dm->backlight_link = link;
3077 	}
3078 #endif
3079 }
3080 
3081 
3082 /*
3083  * In this architecture, the association
3084  * connector -> encoder -> crtc
3085  * id not really requried. The crtc and connector will hold the
3086  * display_index as an abstraction to use with DAL component
3087  *
3088  * Returns 0 on success
3089  */
3090 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3091 {
3092 	struct amdgpu_display_manager *dm = &adev->dm;
3093 	int32_t i;
3094 	struct amdgpu_dm_connector *aconnector = NULL;
3095 	struct amdgpu_encoder *aencoder = NULL;
3096 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3097 	uint32_t link_cnt;
3098 	int32_t primary_planes;
3099 	enum dc_connection_type new_connection_type = dc_connection_none;
3100 	const struct dc_plane_cap *plane;
3101 
3102 	link_cnt = dm->dc->caps.max_links;
3103 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3104 		DRM_ERROR("DM: Failed to initialize mode config\n");
3105 		return -EINVAL;
3106 	}
3107 
3108 	/* There is one primary plane per CRTC */
3109 	primary_planes = dm->dc->caps.max_streams;
3110 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3111 
3112 	/*
3113 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3114 	 * Order is reversed to match iteration order in atomic check.
3115 	 */
3116 	for (i = (primary_planes - 1); i >= 0; i--) {
3117 		plane = &dm->dc->caps.planes[i];
3118 
3119 		if (initialize_plane(dm, mode_info, i,
3120 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3121 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3122 			goto fail;
3123 		}
3124 	}
3125 
3126 	/*
3127 	 * Initialize overlay planes, index starting after primary planes.
3128 	 * These planes have a higher DRM index than the primary planes since
3129 	 * they should be considered as having a higher z-order.
3130 	 * Order is reversed to match iteration order in atomic check.
3131 	 *
3132 	 * Only support DCN for now, and only expose one so we don't encourage
3133 	 * userspace to use up all the pipes.
3134 	 */
3135 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3136 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3137 
3138 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3139 			continue;
3140 
3141 		if (!plane->blends_with_above || !plane->blends_with_below)
3142 			continue;
3143 
3144 		if (!plane->pixel_format_support.argb8888)
3145 			continue;
3146 
3147 		if (initialize_plane(dm, NULL, primary_planes + i,
3148 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3149 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3150 			goto fail;
3151 		}
3152 
3153 		/* Only create one overlay plane. */
3154 		break;
3155 	}
3156 
3157 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3158 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3159 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3160 			goto fail;
3161 		}
3162 
3163 	dm->display_indexes_num = dm->dc->caps.max_streams;
3164 
3165 	/* loops over all connectors on the board */
3166 	for (i = 0; i < link_cnt; i++) {
3167 		struct dc_link *link = NULL;
3168 
3169 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3170 			DRM_ERROR(
3171 				"KMS: Cannot support more than %d display indexes\n",
3172 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3173 			continue;
3174 		}
3175 
3176 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3177 		if (!aconnector)
3178 			goto fail;
3179 
3180 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3181 		if (!aencoder)
3182 			goto fail;
3183 
3184 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3185 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3186 			goto fail;
3187 		}
3188 
3189 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3190 			DRM_ERROR("KMS: Failed to initialize connector\n");
3191 			goto fail;
3192 		}
3193 
3194 		link = dc_get_link_at_index(dm->dc, i);
3195 
3196 		if (!dc_link_detect_sink(link, &new_connection_type))
3197 			DRM_ERROR("KMS: Failed to detect connector\n");
3198 
3199 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3200 			emulated_link_detect(link);
3201 			amdgpu_dm_update_connector_after_detect(aconnector);
3202 
3203 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3204 			amdgpu_dm_update_connector_after_detect(aconnector);
3205 			register_backlight_device(dm, link);
3206 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3207 				amdgpu_dm_set_psr_caps(link);
3208 		}
3209 
3210 
3211 	}
3212 
3213 	/* Software is initialized. Now we can register interrupt handlers. */
3214 	switch (adev->asic_type) {
3215 	case CHIP_BONAIRE:
3216 	case CHIP_HAWAII:
3217 	case CHIP_KAVERI:
3218 	case CHIP_KABINI:
3219 	case CHIP_MULLINS:
3220 	case CHIP_TONGA:
3221 	case CHIP_FIJI:
3222 	case CHIP_CARRIZO:
3223 	case CHIP_STONEY:
3224 	case CHIP_POLARIS11:
3225 	case CHIP_POLARIS10:
3226 	case CHIP_POLARIS12:
3227 	case CHIP_VEGAM:
3228 	case CHIP_VEGA10:
3229 	case CHIP_VEGA12:
3230 	case CHIP_VEGA20:
3231 		if (dce110_register_irq_handlers(dm->adev)) {
3232 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3233 			goto fail;
3234 		}
3235 		break;
3236 #if defined(CONFIG_DRM_AMD_DC_DCN)
3237 	case CHIP_RAVEN:
3238 	case CHIP_NAVI12:
3239 	case CHIP_NAVI10:
3240 	case CHIP_NAVI14:
3241 	case CHIP_RENOIR:
3242 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3243 	case CHIP_SIENNA_CICHLID:
3244 	case CHIP_NAVY_FLOUNDER:
3245 #endif
3246 		if (dcn10_register_irq_handlers(dm->adev)) {
3247 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3248 			goto fail;
3249 		}
3250 		break;
3251 #endif
3252 	default:
3253 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3254 		goto fail;
3255 	}
3256 
3257 	/* No userspace support. */
3258 	dm->dc->debug.disable_tri_buf = true;
3259 
3260 	return 0;
3261 fail:
3262 	kfree(aencoder);
3263 	kfree(aconnector);
3264 
3265 	return -EINVAL;
3266 }
3267 
3268 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3269 {
3270 	drm_mode_config_cleanup(dm->ddev);
3271 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3272 	return;
3273 }
3274 
3275 /******************************************************************************
3276  * amdgpu_display_funcs functions
3277  *****************************************************************************/
3278 
3279 /*
3280  * dm_bandwidth_update - program display watermarks
3281  *
3282  * @adev: amdgpu_device pointer
3283  *
3284  * Calculate and program the display watermarks and line buffer allocation.
3285  */
3286 static void dm_bandwidth_update(struct amdgpu_device *adev)
3287 {
3288 	/* TODO: implement later */
3289 }
3290 
3291 static const struct amdgpu_display_funcs dm_display_funcs = {
3292 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3293 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3294 	.backlight_set_level = NULL, /* never called for DC */
3295 	.backlight_get_level = NULL, /* never called for DC */
3296 	.hpd_sense = NULL,/* called unconditionally */
3297 	.hpd_set_polarity = NULL, /* called unconditionally */
3298 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3299 	.page_flip_get_scanoutpos =
3300 		dm_crtc_get_scanoutpos,/* called unconditionally */
3301 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3302 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3303 };
3304 
3305 #if defined(CONFIG_DEBUG_KERNEL_DC)
3306 
3307 static ssize_t s3_debug_store(struct device *device,
3308 			      struct device_attribute *attr,
3309 			      const char *buf,
3310 			      size_t count)
3311 {
3312 	int ret;
3313 	int s3_state;
3314 	struct drm_device *drm_dev = dev_get_drvdata(device);
3315 	struct amdgpu_device *adev = drm_dev->dev_private;
3316 
3317 	ret = kstrtoint(buf, 0, &s3_state);
3318 
3319 	if (ret == 0) {
3320 		if (s3_state) {
3321 			dm_resume(adev);
3322 			drm_kms_helper_hotplug_event(adev->ddev);
3323 		} else
3324 			dm_suspend(adev);
3325 	}
3326 
3327 	return ret == 0 ? count : 0;
3328 }
3329 
3330 DEVICE_ATTR_WO(s3_debug);
3331 
3332 #endif
3333 
3334 static int dm_early_init(void *handle)
3335 {
3336 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3337 
3338 	switch (adev->asic_type) {
3339 	case CHIP_BONAIRE:
3340 	case CHIP_HAWAII:
3341 		adev->mode_info.num_crtc = 6;
3342 		adev->mode_info.num_hpd = 6;
3343 		adev->mode_info.num_dig = 6;
3344 		break;
3345 	case CHIP_KAVERI:
3346 		adev->mode_info.num_crtc = 4;
3347 		adev->mode_info.num_hpd = 6;
3348 		adev->mode_info.num_dig = 7;
3349 		break;
3350 	case CHIP_KABINI:
3351 	case CHIP_MULLINS:
3352 		adev->mode_info.num_crtc = 2;
3353 		adev->mode_info.num_hpd = 6;
3354 		adev->mode_info.num_dig = 6;
3355 		break;
3356 	case CHIP_FIJI:
3357 	case CHIP_TONGA:
3358 		adev->mode_info.num_crtc = 6;
3359 		adev->mode_info.num_hpd = 6;
3360 		adev->mode_info.num_dig = 7;
3361 		break;
3362 	case CHIP_CARRIZO:
3363 		adev->mode_info.num_crtc = 3;
3364 		adev->mode_info.num_hpd = 6;
3365 		adev->mode_info.num_dig = 9;
3366 		break;
3367 	case CHIP_STONEY:
3368 		adev->mode_info.num_crtc = 2;
3369 		adev->mode_info.num_hpd = 6;
3370 		adev->mode_info.num_dig = 9;
3371 		break;
3372 	case CHIP_POLARIS11:
3373 	case CHIP_POLARIS12:
3374 		adev->mode_info.num_crtc = 5;
3375 		adev->mode_info.num_hpd = 5;
3376 		adev->mode_info.num_dig = 5;
3377 		break;
3378 	case CHIP_POLARIS10:
3379 	case CHIP_VEGAM:
3380 		adev->mode_info.num_crtc = 6;
3381 		adev->mode_info.num_hpd = 6;
3382 		adev->mode_info.num_dig = 6;
3383 		break;
3384 	case CHIP_VEGA10:
3385 	case CHIP_VEGA12:
3386 	case CHIP_VEGA20:
3387 		adev->mode_info.num_crtc = 6;
3388 		adev->mode_info.num_hpd = 6;
3389 		adev->mode_info.num_dig = 6;
3390 		break;
3391 #if defined(CONFIG_DRM_AMD_DC_DCN)
3392 	case CHIP_RAVEN:
3393 		adev->mode_info.num_crtc = 4;
3394 		adev->mode_info.num_hpd = 4;
3395 		adev->mode_info.num_dig = 4;
3396 		break;
3397 #endif
3398 	case CHIP_NAVI10:
3399 	case CHIP_NAVI12:
3400 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3401 	case CHIP_SIENNA_CICHLID:
3402 	case CHIP_NAVY_FLOUNDER:
3403 #endif
3404 		adev->mode_info.num_crtc = 6;
3405 		adev->mode_info.num_hpd = 6;
3406 		adev->mode_info.num_dig = 6;
3407 		break;
3408 	case CHIP_NAVI14:
3409 		adev->mode_info.num_crtc = 5;
3410 		adev->mode_info.num_hpd = 5;
3411 		adev->mode_info.num_dig = 5;
3412 		break;
3413 	case CHIP_RENOIR:
3414 		adev->mode_info.num_crtc = 4;
3415 		adev->mode_info.num_hpd = 4;
3416 		adev->mode_info.num_dig = 4;
3417 		break;
3418 	default:
3419 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3420 		return -EINVAL;
3421 	}
3422 
3423 	amdgpu_dm_set_irq_funcs(adev);
3424 
3425 	if (adev->mode_info.funcs == NULL)
3426 		adev->mode_info.funcs = &dm_display_funcs;
3427 
3428 	/*
3429 	 * Note: Do NOT change adev->audio_endpt_rreg and
3430 	 * adev->audio_endpt_wreg because they are initialised in
3431 	 * amdgpu_device_init()
3432 	 */
3433 #if defined(CONFIG_DEBUG_KERNEL_DC)
3434 	device_create_file(
3435 		adev->ddev->dev,
3436 		&dev_attr_s3_debug);
3437 #endif
3438 
3439 	return 0;
3440 }
3441 
3442 static bool modeset_required(struct drm_crtc_state *crtc_state,
3443 			     struct dc_stream_state *new_stream,
3444 			     struct dc_stream_state *old_stream)
3445 {
3446 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3447 		return false;
3448 
3449 	if (!crtc_state->enable)
3450 		return false;
3451 
3452 	return crtc_state->active;
3453 }
3454 
3455 static bool modereset_required(struct drm_crtc_state *crtc_state)
3456 {
3457 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3458 		return false;
3459 
3460 	return !crtc_state->enable || !crtc_state->active;
3461 }
3462 
3463 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3464 {
3465 	drm_encoder_cleanup(encoder);
3466 	kfree(encoder);
3467 }
3468 
3469 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3470 	.destroy = amdgpu_dm_encoder_destroy,
3471 };
3472 
3473 
3474 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3475 				struct dc_scaling_info *scaling_info)
3476 {
3477 	int scale_w, scale_h;
3478 
3479 	memset(scaling_info, 0, sizeof(*scaling_info));
3480 
3481 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3482 	scaling_info->src_rect.x = state->src_x >> 16;
3483 	scaling_info->src_rect.y = state->src_y >> 16;
3484 
3485 	scaling_info->src_rect.width = state->src_w >> 16;
3486 	if (scaling_info->src_rect.width == 0)
3487 		return -EINVAL;
3488 
3489 	scaling_info->src_rect.height = state->src_h >> 16;
3490 	if (scaling_info->src_rect.height == 0)
3491 		return -EINVAL;
3492 
3493 	scaling_info->dst_rect.x = state->crtc_x;
3494 	scaling_info->dst_rect.y = state->crtc_y;
3495 
3496 	if (state->crtc_w == 0)
3497 		return -EINVAL;
3498 
3499 	scaling_info->dst_rect.width = state->crtc_w;
3500 
3501 	if (state->crtc_h == 0)
3502 		return -EINVAL;
3503 
3504 	scaling_info->dst_rect.height = state->crtc_h;
3505 
3506 	/* DRM doesn't specify clipping on destination output. */
3507 	scaling_info->clip_rect = scaling_info->dst_rect;
3508 
3509 	/* TODO: Validate scaling per-format with DC plane caps */
3510 	scale_w = scaling_info->dst_rect.width * 1000 /
3511 		  scaling_info->src_rect.width;
3512 
3513 	if (scale_w < 250 || scale_w > 16000)
3514 		return -EINVAL;
3515 
3516 	scale_h = scaling_info->dst_rect.height * 1000 /
3517 		  scaling_info->src_rect.height;
3518 
3519 	if (scale_h < 250 || scale_h > 16000)
3520 		return -EINVAL;
3521 
3522 	/*
3523 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3524 	 * assume reasonable defaults based on the format.
3525 	 */
3526 
3527 	return 0;
3528 }
3529 
3530 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3531 		       uint64_t *tiling_flags, bool *tmz_surface)
3532 {
3533 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3534 	int r = amdgpu_bo_reserve(rbo, false);
3535 
3536 	if (unlikely(r)) {
3537 		/* Don't show error message when returning -ERESTARTSYS */
3538 		if (r != -ERESTARTSYS)
3539 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3540 		return r;
3541 	}
3542 
3543 	if (tiling_flags)
3544 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3545 
3546 	if (tmz_surface)
3547 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3548 
3549 	amdgpu_bo_unreserve(rbo);
3550 
3551 	return r;
3552 }
3553 
3554 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3555 {
3556 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3557 
3558 	return offset ? (address + offset * 256) : 0;
3559 }
3560 
3561 static int
3562 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3563 			  const struct amdgpu_framebuffer *afb,
3564 			  const enum surface_pixel_format format,
3565 			  const enum dc_rotation_angle rotation,
3566 			  const struct plane_size *plane_size,
3567 			  const union dc_tiling_info *tiling_info,
3568 			  const uint64_t info,
3569 			  struct dc_plane_dcc_param *dcc,
3570 			  struct dc_plane_address *address,
3571 			  bool force_disable_dcc)
3572 {
3573 	struct dc *dc = adev->dm.dc;
3574 	struct dc_dcc_surface_param input;
3575 	struct dc_surface_dcc_cap output;
3576 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3577 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3578 	uint64_t dcc_address;
3579 
3580 	memset(&input, 0, sizeof(input));
3581 	memset(&output, 0, sizeof(output));
3582 
3583 	if (force_disable_dcc)
3584 		return 0;
3585 
3586 	if (!offset)
3587 		return 0;
3588 
3589 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3590 		return 0;
3591 
3592 	if (!dc->cap_funcs.get_dcc_compression_cap)
3593 		return -EINVAL;
3594 
3595 	input.format = format;
3596 	input.surface_size.width = plane_size->surface_size.width;
3597 	input.surface_size.height = plane_size->surface_size.height;
3598 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3599 
3600 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3601 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3602 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3603 		input.scan = SCAN_DIRECTION_VERTICAL;
3604 
3605 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3606 		return -EINVAL;
3607 
3608 	if (!output.capable)
3609 		return -EINVAL;
3610 
3611 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3612 		return -EINVAL;
3613 
3614 	dcc->enable = 1;
3615 	dcc->meta_pitch =
3616 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3617 	dcc->independent_64b_blks = i64b;
3618 
3619 	dcc_address = get_dcc_address(afb->address, info);
3620 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3621 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3622 
3623 	return 0;
3624 }
3625 
3626 static int
3627 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3628 			     const struct amdgpu_framebuffer *afb,
3629 			     const enum surface_pixel_format format,
3630 			     const enum dc_rotation_angle rotation,
3631 			     const uint64_t tiling_flags,
3632 			     union dc_tiling_info *tiling_info,
3633 			     struct plane_size *plane_size,
3634 			     struct dc_plane_dcc_param *dcc,
3635 			     struct dc_plane_address *address,
3636 			     bool tmz_surface,
3637 			     bool force_disable_dcc)
3638 {
3639 	const struct drm_framebuffer *fb = &afb->base;
3640 	int ret;
3641 
3642 	memset(tiling_info, 0, sizeof(*tiling_info));
3643 	memset(plane_size, 0, sizeof(*plane_size));
3644 	memset(dcc, 0, sizeof(*dcc));
3645 	memset(address, 0, sizeof(*address));
3646 
3647 	address->tmz_surface = tmz_surface;
3648 
3649 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3650 		plane_size->surface_size.x = 0;
3651 		plane_size->surface_size.y = 0;
3652 		plane_size->surface_size.width = fb->width;
3653 		plane_size->surface_size.height = fb->height;
3654 		plane_size->surface_pitch =
3655 			fb->pitches[0] / fb->format->cpp[0];
3656 
3657 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3658 		address->grph.addr.low_part = lower_32_bits(afb->address);
3659 		address->grph.addr.high_part = upper_32_bits(afb->address);
3660 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3661 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3662 
3663 		plane_size->surface_size.x = 0;
3664 		plane_size->surface_size.y = 0;
3665 		plane_size->surface_size.width = fb->width;
3666 		plane_size->surface_size.height = fb->height;
3667 		plane_size->surface_pitch =
3668 			fb->pitches[0] / fb->format->cpp[0];
3669 
3670 		plane_size->chroma_size.x = 0;
3671 		plane_size->chroma_size.y = 0;
3672 		/* TODO: set these based on surface format */
3673 		plane_size->chroma_size.width = fb->width / 2;
3674 		plane_size->chroma_size.height = fb->height / 2;
3675 
3676 		plane_size->chroma_pitch =
3677 			fb->pitches[1] / fb->format->cpp[1];
3678 
3679 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3680 		address->video_progressive.luma_addr.low_part =
3681 			lower_32_bits(afb->address);
3682 		address->video_progressive.luma_addr.high_part =
3683 			upper_32_bits(afb->address);
3684 		address->video_progressive.chroma_addr.low_part =
3685 			lower_32_bits(chroma_addr);
3686 		address->video_progressive.chroma_addr.high_part =
3687 			upper_32_bits(chroma_addr);
3688 	}
3689 
3690 	/* Fill GFX8 params */
3691 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3692 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3693 
3694 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3695 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3696 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3697 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3698 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3699 
3700 		/* XXX fix me for VI */
3701 		tiling_info->gfx8.num_banks = num_banks;
3702 		tiling_info->gfx8.array_mode =
3703 				DC_ARRAY_2D_TILED_THIN1;
3704 		tiling_info->gfx8.tile_split = tile_split;
3705 		tiling_info->gfx8.bank_width = bankw;
3706 		tiling_info->gfx8.bank_height = bankh;
3707 		tiling_info->gfx8.tile_aspect = mtaspect;
3708 		tiling_info->gfx8.tile_mode =
3709 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3710 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3711 			== DC_ARRAY_1D_TILED_THIN1) {
3712 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3713 	}
3714 
3715 	tiling_info->gfx8.pipe_config =
3716 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3717 
3718 	if (adev->asic_type == CHIP_VEGA10 ||
3719 	    adev->asic_type == CHIP_VEGA12 ||
3720 	    adev->asic_type == CHIP_VEGA20 ||
3721 	    adev->asic_type == CHIP_NAVI10 ||
3722 	    adev->asic_type == CHIP_NAVI14 ||
3723 	    adev->asic_type == CHIP_NAVI12 ||
3724 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3725 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3726 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3727 #endif
3728 	    adev->asic_type == CHIP_RENOIR ||
3729 	    adev->asic_type == CHIP_RAVEN) {
3730 		/* Fill GFX9 params */
3731 		tiling_info->gfx9.num_pipes =
3732 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3733 		tiling_info->gfx9.num_banks =
3734 			adev->gfx.config.gb_addr_config_fields.num_banks;
3735 		tiling_info->gfx9.pipe_interleave =
3736 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3737 		tiling_info->gfx9.num_shader_engines =
3738 			adev->gfx.config.gb_addr_config_fields.num_se;
3739 		tiling_info->gfx9.max_compressed_frags =
3740 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3741 		tiling_info->gfx9.num_rb_per_se =
3742 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3743 		tiling_info->gfx9.swizzle =
3744 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3745 		tiling_info->gfx9.shaderEnable = 1;
3746 
3747 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3748 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3749 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3750 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3751 #endif
3752 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3753 						plane_size, tiling_info,
3754 						tiling_flags, dcc, address,
3755 						force_disable_dcc);
3756 		if (ret)
3757 			return ret;
3758 	}
3759 
3760 	return 0;
3761 }
3762 
3763 static void
3764 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3765 			       bool *per_pixel_alpha, bool *global_alpha,
3766 			       int *global_alpha_value)
3767 {
3768 	*per_pixel_alpha = false;
3769 	*global_alpha = false;
3770 	*global_alpha_value = 0xff;
3771 
3772 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3773 		return;
3774 
3775 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3776 		static const uint32_t alpha_formats[] = {
3777 			DRM_FORMAT_ARGB8888,
3778 			DRM_FORMAT_RGBA8888,
3779 			DRM_FORMAT_ABGR8888,
3780 		};
3781 		uint32_t format = plane_state->fb->format->format;
3782 		unsigned int i;
3783 
3784 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3785 			if (format == alpha_formats[i]) {
3786 				*per_pixel_alpha = true;
3787 				break;
3788 			}
3789 		}
3790 	}
3791 
3792 	if (plane_state->alpha < 0xffff) {
3793 		*global_alpha = true;
3794 		*global_alpha_value = plane_state->alpha >> 8;
3795 	}
3796 }
3797 
3798 static int
3799 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3800 			    const enum surface_pixel_format format,
3801 			    enum dc_color_space *color_space)
3802 {
3803 	bool full_range;
3804 
3805 	*color_space = COLOR_SPACE_SRGB;
3806 
3807 	/* DRM color properties only affect non-RGB formats. */
3808 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3809 		return 0;
3810 
3811 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3812 
3813 	switch (plane_state->color_encoding) {
3814 	case DRM_COLOR_YCBCR_BT601:
3815 		if (full_range)
3816 			*color_space = COLOR_SPACE_YCBCR601;
3817 		else
3818 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3819 		break;
3820 
3821 	case DRM_COLOR_YCBCR_BT709:
3822 		if (full_range)
3823 			*color_space = COLOR_SPACE_YCBCR709;
3824 		else
3825 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3826 		break;
3827 
3828 	case DRM_COLOR_YCBCR_BT2020:
3829 		if (full_range)
3830 			*color_space = COLOR_SPACE_2020_YCBCR;
3831 		else
3832 			return -EINVAL;
3833 		break;
3834 
3835 	default:
3836 		return -EINVAL;
3837 	}
3838 
3839 	return 0;
3840 }
3841 
3842 static int
3843 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3844 			    const struct drm_plane_state *plane_state,
3845 			    const uint64_t tiling_flags,
3846 			    struct dc_plane_info *plane_info,
3847 			    struct dc_plane_address *address,
3848 			    bool tmz_surface,
3849 			    bool force_disable_dcc)
3850 {
3851 	const struct drm_framebuffer *fb = plane_state->fb;
3852 	const struct amdgpu_framebuffer *afb =
3853 		to_amdgpu_framebuffer(plane_state->fb);
3854 	struct drm_format_name_buf format_name;
3855 	int ret;
3856 
3857 	memset(plane_info, 0, sizeof(*plane_info));
3858 
3859 	switch (fb->format->format) {
3860 	case DRM_FORMAT_C8:
3861 		plane_info->format =
3862 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3863 		break;
3864 	case DRM_FORMAT_RGB565:
3865 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3866 		break;
3867 	case DRM_FORMAT_XRGB8888:
3868 	case DRM_FORMAT_ARGB8888:
3869 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3870 		break;
3871 	case DRM_FORMAT_XRGB2101010:
3872 	case DRM_FORMAT_ARGB2101010:
3873 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3874 		break;
3875 	case DRM_FORMAT_XBGR2101010:
3876 	case DRM_FORMAT_ABGR2101010:
3877 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3878 		break;
3879 	case DRM_FORMAT_XBGR8888:
3880 	case DRM_FORMAT_ABGR8888:
3881 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3882 		break;
3883 	case DRM_FORMAT_NV21:
3884 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3885 		break;
3886 	case DRM_FORMAT_NV12:
3887 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3888 		break;
3889 	case DRM_FORMAT_P010:
3890 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3891 		break;
3892 	case DRM_FORMAT_XRGB16161616F:
3893 	case DRM_FORMAT_ARGB16161616F:
3894 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3895 		break;
3896 	case DRM_FORMAT_XBGR16161616F:
3897 	case DRM_FORMAT_ABGR16161616F:
3898 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3899 		break;
3900 	default:
3901 		DRM_ERROR(
3902 			"Unsupported screen format %s\n",
3903 			drm_get_format_name(fb->format->format, &format_name));
3904 		return -EINVAL;
3905 	}
3906 
3907 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3908 	case DRM_MODE_ROTATE_0:
3909 		plane_info->rotation = ROTATION_ANGLE_0;
3910 		break;
3911 	case DRM_MODE_ROTATE_90:
3912 		plane_info->rotation = ROTATION_ANGLE_90;
3913 		break;
3914 	case DRM_MODE_ROTATE_180:
3915 		plane_info->rotation = ROTATION_ANGLE_180;
3916 		break;
3917 	case DRM_MODE_ROTATE_270:
3918 		plane_info->rotation = ROTATION_ANGLE_270;
3919 		break;
3920 	default:
3921 		plane_info->rotation = ROTATION_ANGLE_0;
3922 		break;
3923 	}
3924 
3925 	plane_info->visible = true;
3926 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3927 
3928 	plane_info->layer_index = 0;
3929 
3930 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3931 					  &plane_info->color_space);
3932 	if (ret)
3933 		return ret;
3934 
3935 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3936 					   plane_info->rotation, tiling_flags,
3937 					   &plane_info->tiling_info,
3938 					   &plane_info->plane_size,
3939 					   &plane_info->dcc, address, tmz_surface,
3940 					   force_disable_dcc);
3941 	if (ret)
3942 		return ret;
3943 
3944 	fill_blending_from_plane_state(
3945 		plane_state, &plane_info->per_pixel_alpha,
3946 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3947 
3948 	return 0;
3949 }
3950 
3951 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3952 				    struct dc_plane_state *dc_plane_state,
3953 				    struct drm_plane_state *plane_state,
3954 				    struct drm_crtc_state *crtc_state)
3955 {
3956 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3957 	const struct amdgpu_framebuffer *amdgpu_fb =
3958 		to_amdgpu_framebuffer(plane_state->fb);
3959 	struct dc_scaling_info scaling_info;
3960 	struct dc_plane_info plane_info;
3961 	uint64_t tiling_flags;
3962 	int ret;
3963 	bool tmz_surface = false;
3964 	bool force_disable_dcc = false;
3965 
3966 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3967 	if (ret)
3968 		return ret;
3969 
3970 	dc_plane_state->src_rect = scaling_info.src_rect;
3971 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3972 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3973 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3974 
3975 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3976 	if (ret)
3977 		return ret;
3978 
3979 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3980 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3981 					  &plane_info,
3982 					  &dc_plane_state->address,
3983 					  tmz_surface,
3984 					  force_disable_dcc);
3985 	if (ret)
3986 		return ret;
3987 
3988 	dc_plane_state->format = plane_info.format;
3989 	dc_plane_state->color_space = plane_info.color_space;
3990 	dc_plane_state->format = plane_info.format;
3991 	dc_plane_state->plane_size = plane_info.plane_size;
3992 	dc_plane_state->rotation = plane_info.rotation;
3993 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3994 	dc_plane_state->stereo_format = plane_info.stereo_format;
3995 	dc_plane_state->tiling_info = plane_info.tiling_info;
3996 	dc_plane_state->visible = plane_info.visible;
3997 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3998 	dc_plane_state->global_alpha = plane_info.global_alpha;
3999 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4000 	dc_plane_state->dcc = plane_info.dcc;
4001 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4002 
4003 	/*
4004 	 * Always set input transfer function, since plane state is refreshed
4005 	 * every time.
4006 	 */
4007 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4008 	if (ret)
4009 		return ret;
4010 
4011 	return 0;
4012 }
4013 
4014 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4015 					   const struct dm_connector_state *dm_state,
4016 					   struct dc_stream_state *stream)
4017 {
4018 	enum amdgpu_rmx_type rmx_type;
4019 
4020 	struct rect src = { 0 }; /* viewport in composition space*/
4021 	struct rect dst = { 0 }; /* stream addressable area */
4022 
4023 	/* no mode. nothing to be done */
4024 	if (!mode)
4025 		return;
4026 
4027 	/* Full screen scaling by default */
4028 	src.width = mode->hdisplay;
4029 	src.height = mode->vdisplay;
4030 	dst.width = stream->timing.h_addressable;
4031 	dst.height = stream->timing.v_addressable;
4032 
4033 	if (dm_state) {
4034 		rmx_type = dm_state->scaling;
4035 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4036 			if (src.width * dst.height <
4037 					src.height * dst.width) {
4038 				/* height needs less upscaling/more downscaling */
4039 				dst.width = src.width *
4040 						dst.height / src.height;
4041 			} else {
4042 				/* width needs less upscaling/more downscaling */
4043 				dst.height = src.height *
4044 						dst.width / src.width;
4045 			}
4046 		} else if (rmx_type == RMX_CENTER) {
4047 			dst = src;
4048 		}
4049 
4050 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4051 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4052 
4053 		if (dm_state->underscan_enable) {
4054 			dst.x += dm_state->underscan_hborder / 2;
4055 			dst.y += dm_state->underscan_vborder / 2;
4056 			dst.width -= dm_state->underscan_hborder;
4057 			dst.height -= dm_state->underscan_vborder;
4058 		}
4059 	}
4060 
4061 	stream->src = src;
4062 	stream->dst = dst;
4063 
4064 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4065 			dst.x, dst.y, dst.width, dst.height);
4066 
4067 }
4068 
4069 static enum dc_color_depth
4070 convert_color_depth_from_display_info(const struct drm_connector *connector,
4071 				      bool is_y420, int requested_bpc)
4072 {
4073 	uint8_t bpc;
4074 
4075 	if (is_y420) {
4076 		bpc = 8;
4077 
4078 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4079 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4080 			bpc = 16;
4081 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4082 			bpc = 12;
4083 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4084 			bpc = 10;
4085 	} else {
4086 		bpc = (uint8_t)connector->display_info.bpc;
4087 		/* Assume 8 bpc by default if no bpc is specified. */
4088 		bpc = bpc ? bpc : 8;
4089 	}
4090 
4091 	if (requested_bpc > 0) {
4092 		/*
4093 		 * Cap display bpc based on the user requested value.
4094 		 *
4095 		 * The value for state->max_bpc may not correctly updated
4096 		 * depending on when the connector gets added to the state
4097 		 * or if this was called outside of atomic check, so it
4098 		 * can't be used directly.
4099 		 */
4100 		bpc = min_t(u8, bpc, requested_bpc);
4101 
4102 		/* Round down to the nearest even number. */
4103 		bpc = bpc - (bpc & 1);
4104 	}
4105 
4106 	switch (bpc) {
4107 	case 0:
4108 		/*
4109 		 * Temporary Work around, DRM doesn't parse color depth for
4110 		 * EDID revision before 1.4
4111 		 * TODO: Fix edid parsing
4112 		 */
4113 		return COLOR_DEPTH_888;
4114 	case 6:
4115 		return COLOR_DEPTH_666;
4116 	case 8:
4117 		return COLOR_DEPTH_888;
4118 	case 10:
4119 		return COLOR_DEPTH_101010;
4120 	case 12:
4121 		return COLOR_DEPTH_121212;
4122 	case 14:
4123 		return COLOR_DEPTH_141414;
4124 	case 16:
4125 		return COLOR_DEPTH_161616;
4126 	default:
4127 		return COLOR_DEPTH_UNDEFINED;
4128 	}
4129 }
4130 
4131 static enum dc_aspect_ratio
4132 get_aspect_ratio(const struct drm_display_mode *mode_in)
4133 {
4134 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4135 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4136 }
4137 
4138 static enum dc_color_space
4139 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4140 {
4141 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4142 
4143 	switch (dc_crtc_timing->pixel_encoding)	{
4144 	case PIXEL_ENCODING_YCBCR422:
4145 	case PIXEL_ENCODING_YCBCR444:
4146 	case PIXEL_ENCODING_YCBCR420:
4147 	{
4148 		/*
4149 		 * 27030khz is the separation point between HDTV and SDTV
4150 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4151 		 * respectively
4152 		 */
4153 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4154 			if (dc_crtc_timing->flags.Y_ONLY)
4155 				color_space =
4156 					COLOR_SPACE_YCBCR709_LIMITED;
4157 			else
4158 				color_space = COLOR_SPACE_YCBCR709;
4159 		} else {
4160 			if (dc_crtc_timing->flags.Y_ONLY)
4161 				color_space =
4162 					COLOR_SPACE_YCBCR601_LIMITED;
4163 			else
4164 				color_space = COLOR_SPACE_YCBCR601;
4165 		}
4166 
4167 	}
4168 	break;
4169 	case PIXEL_ENCODING_RGB:
4170 		color_space = COLOR_SPACE_SRGB;
4171 		break;
4172 
4173 	default:
4174 		WARN_ON(1);
4175 		break;
4176 	}
4177 
4178 	return color_space;
4179 }
4180 
4181 static bool adjust_colour_depth_from_display_info(
4182 	struct dc_crtc_timing *timing_out,
4183 	const struct drm_display_info *info)
4184 {
4185 	enum dc_color_depth depth = timing_out->display_color_depth;
4186 	int normalized_clk;
4187 	do {
4188 		normalized_clk = timing_out->pix_clk_100hz / 10;
4189 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4190 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4191 			normalized_clk /= 2;
4192 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4193 		switch (depth) {
4194 		case COLOR_DEPTH_888:
4195 			break;
4196 		case COLOR_DEPTH_101010:
4197 			normalized_clk = (normalized_clk * 30) / 24;
4198 			break;
4199 		case COLOR_DEPTH_121212:
4200 			normalized_clk = (normalized_clk * 36) / 24;
4201 			break;
4202 		case COLOR_DEPTH_161616:
4203 			normalized_clk = (normalized_clk * 48) / 24;
4204 			break;
4205 		default:
4206 			/* The above depths are the only ones valid for HDMI. */
4207 			return false;
4208 		}
4209 		if (normalized_clk <= info->max_tmds_clock) {
4210 			timing_out->display_color_depth = depth;
4211 			return true;
4212 		}
4213 	} while (--depth > COLOR_DEPTH_666);
4214 	return false;
4215 }
4216 
4217 static void fill_stream_properties_from_drm_display_mode(
4218 	struct dc_stream_state *stream,
4219 	const struct drm_display_mode *mode_in,
4220 	const struct drm_connector *connector,
4221 	const struct drm_connector_state *connector_state,
4222 	const struct dc_stream_state *old_stream,
4223 	int requested_bpc)
4224 {
4225 	struct dc_crtc_timing *timing_out = &stream->timing;
4226 	const struct drm_display_info *info = &connector->display_info;
4227 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4228 	struct hdmi_vendor_infoframe hv_frame;
4229 	struct hdmi_avi_infoframe avi_frame;
4230 
4231 	memset(&hv_frame, 0, sizeof(hv_frame));
4232 	memset(&avi_frame, 0, sizeof(avi_frame));
4233 
4234 	timing_out->h_border_left = 0;
4235 	timing_out->h_border_right = 0;
4236 	timing_out->v_border_top = 0;
4237 	timing_out->v_border_bottom = 0;
4238 	/* TODO: un-hardcode */
4239 	if (drm_mode_is_420_only(info, mode_in)
4240 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4241 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4242 	else if (drm_mode_is_420_also(info, mode_in)
4243 			&& aconnector->force_yuv420_output)
4244 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4245 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4246 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4247 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4248 	else
4249 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4250 
4251 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4252 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4253 		connector,
4254 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4255 		requested_bpc);
4256 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4257 	timing_out->hdmi_vic = 0;
4258 
4259 	if(old_stream) {
4260 		timing_out->vic = old_stream->timing.vic;
4261 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4262 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4263 	} else {
4264 		timing_out->vic = drm_match_cea_mode(mode_in);
4265 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4266 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4267 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4268 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4269 	}
4270 
4271 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4272 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4273 		timing_out->vic = avi_frame.video_code;
4274 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4275 		timing_out->hdmi_vic = hv_frame.vic;
4276 	}
4277 
4278 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4279 	timing_out->h_total = mode_in->crtc_htotal;
4280 	timing_out->h_sync_width =
4281 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4282 	timing_out->h_front_porch =
4283 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4284 	timing_out->v_total = mode_in->crtc_vtotal;
4285 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4286 	timing_out->v_front_porch =
4287 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4288 	timing_out->v_sync_width =
4289 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4290 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4291 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4292 
4293 	stream->output_color_space = get_output_color_space(timing_out);
4294 
4295 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4296 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4297 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4298 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4299 		    drm_mode_is_420_also(info, mode_in) &&
4300 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4301 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4302 			adjust_colour_depth_from_display_info(timing_out, info);
4303 		}
4304 	}
4305 }
4306 
4307 static void fill_audio_info(struct audio_info *audio_info,
4308 			    const struct drm_connector *drm_connector,
4309 			    const struct dc_sink *dc_sink)
4310 {
4311 	int i = 0;
4312 	int cea_revision = 0;
4313 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4314 
4315 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4316 	audio_info->product_id = edid_caps->product_id;
4317 
4318 	cea_revision = drm_connector->display_info.cea_rev;
4319 
4320 	strscpy(audio_info->display_name,
4321 		edid_caps->display_name,
4322 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4323 
4324 	if (cea_revision >= 3) {
4325 		audio_info->mode_count = edid_caps->audio_mode_count;
4326 
4327 		for (i = 0; i < audio_info->mode_count; ++i) {
4328 			audio_info->modes[i].format_code =
4329 					(enum audio_format_code)
4330 					(edid_caps->audio_modes[i].format_code);
4331 			audio_info->modes[i].channel_count =
4332 					edid_caps->audio_modes[i].channel_count;
4333 			audio_info->modes[i].sample_rates.all =
4334 					edid_caps->audio_modes[i].sample_rate;
4335 			audio_info->modes[i].sample_size =
4336 					edid_caps->audio_modes[i].sample_size;
4337 		}
4338 	}
4339 
4340 	audio_info->flags.all = edid_caps->speaker_flags;
4341 
4342 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4343 	if (drm_connector->latency_present[0]) {
4344 		audio_info->video_latency = drm_connector->video_latency[0];
4345 		audio_info->audio_latency = drm_connector->audio_latency[0];
4346 	}
4347 
4348 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4349 
4350 }
4351 
4352 static void
4353 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4354 				      struct drm_display_mode *dst_mode)
4355 {
4356 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4357 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4358 	dst_mode->crtc_clock = src_mode->crtc_clock;
4359 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4360 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4361 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4362 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4363 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4364 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4365 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4366 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4367 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4368 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4369 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4370 }
4371 
4372 static void
4373 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4374 					const struct drm_display_mode *native_mode,
4375 					bool scale_enabled)
4376 {
4377 	if (scale_enabled) {
4378 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4379 	} else if (native_mode->clock == drm_mode->clock &&
4380 			native_mode->htotal == drm_mode->htotal &&
4381 			native_mode->vtotal == drm_mode->vtotal) {
4382 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4383 	} else {
4384 		/* no scaling nor amdgpu inserted, no need to patch */
4385 	}
4386 }
4387 
4388 static struct dc_sink *
4389 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4390 {
4391 	struct dc_sink_init_data sink_init_data = { 0 };
4392 	struct dc_sink *sink = NULL;
4393 	sink_init_data.link = aconnector->dc_link;
4394 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4395 
4396 	sink = dc_sink_create(&sink_init_data);
4397 	if (!sink) {
4398 		DRM_ERROR("Failed to create sink!\n");
4399 		return NULL;
4400 	}
4401 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4402 
4403 	return sink;
4404 }
4405 
4406 static void set_multisync_trigger_params(
4407 		struct dc_stream_state *stream)
4408 {
4409 	if (stream->triggered_crtc_reset.enabled) {
4410 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4411 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4412 	}
4413 }
4414 
4415 static void set_master_stream(struct dc_stream_state *stream_set[],
4416 			      int stream_count)
4417 {
4418 	int j, highest_rfr = 0, master_stream = 0;
4419 
4420 	for (j = 0;  j < stream_count; j++) {
4421 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4422 			int refresh_rate = 0;
4423 
4424 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4425 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4426 			if (refresh_rate > highest_rfr) {
4427 				highest_rfr = refresh_rate;
4428 				master_stream = j;
4429 			}
4430 		}
4431 	}
4432 	for (j = 0;  j < stream_count; j++) {
4433 		if (stream_set[j])
4434 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4435 	}
4436 }
4437 
4438 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4439 {
4440 	int i = 0;
4441 
4442 	if (context->stream_count < 2)
4443 		return;
4444 	for (i = 0; i < context->stream_count ; i++) {
4445 		if (!context->streams[i])
4446 			continue;
4447 		/*
4448 		 * TODO: add a function to read AMD VSDB bits and set
4449 		 * crtc_sync_master.multi_sync_enabled flag
4450 		 * For now it's set to false
4451 		 */
4452 		set_multisync_trigger_params(context->streams[i]);
4453 	}
4454 	set_master_stream(context->streams, context->stream_count);
4455 }
4456 
4457 static struct dc_stream_state *
4458 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4459 		       const struct drm_display_mode *drm_mode,
4460 		       const struct dm_connector_state *dm_state,
4461 		       const struct dc_stream_state *old_stream,
4462 		       int requested_bpc)
4463 {
4464 	struct drm_display_mode *preferred_mode = NULL;
4465 	struct drm_connector *drm_connector;
4466 	const struct drm_connector_state *con_state =
4467 		dm_state ? &dm_state->base : NULL;
4468 	struct dc_stream_state *stream = NULL;
4469 	struct drm_display_mode mode = *drm_mode;
4470 	bool native_mode_found = false;
4471 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4472 	int mode_refresh;
4473 	int preferred_refresh = 0;
4474 #if defined(CONFIG_DRM_AMD_DC_DCN)
4475 	struct dsc_dec_dpcd_caps dsc_caps;
4476 #endif
4477 	uint32_t link_bandwidth_kbps;
4478 
4479 	struct dc_sink *sink = NULL;
4480 	if (aconnector == NULL) {
4481 		DRM_ERROR("aconnector is NULL!\n");
4482 		return stream;
4483 	}
4484 
4485 	drm_connector = &aconnector->base;
4486 
4487 	if (!aconnector->dc_sink) {
4488 		sink = create_fake_sink(aconnector);
4489 		if (!sink)
4490 			return stream;
4491 	} else {
4492 		sink = aconnector->dc_sink;
4493 		dc_sink_retain(sink);
4494 	}
4495 
4496 	stream = dc_create_stream_for_sink(sink);
4497 
4498 	if (stream == NULL) {
4499 		DRM_ERROR("Failed to create stream for sink!\n");
4500 		goto finish;
4501 	}
4502 
4503 	stream->dm_stream_context = aconnector;
4504 
4505 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4506 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4507 
4508 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4509 		/* Search for preferred mode */
4510 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4511 			native_mode_found = true;
4512 			break;
4513 		}
4514 	}
4515 	if (!native_mode_found)
4516 		preferred_mode = list_first_entry_or_null(
4517 				&aconnector->base.modes,
4518 				struct drm_display_mode,
4519 				head);
4520 
4521 	mode_refresh = drm_mode_vrefresh(&mode);
4522 
4523 	if (preferred_mode == NULL) {
4524 		/*
4525 		 * This may not be an error, the use case is when we have no
4526 		 * usermode calls to reset and set mode upon hotplug. In this
4527 		 * case, we call set mode ourselves to restore the previous mode
4528 		 * and the modelist may not be filled in in time.
4529 		 */
4530 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4531 	} else {
4532 		decide_crtc_timing_for_drm_display_mode(
4533 				&mode, preferred_mode,
4534 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4535 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4536 	}
4537 
4538 	if (!dm_state)
4539 		drm_mode_set_crtcinfo(&mode, 0);
4540 
4541 	/*
4542 	* If scaling is enabled and refresh rate didn't change
4543 	* we copy the vic and polarities of the old timings
4544 	*/
4545 	if (!scale || mode_refresh != preferred_refresh)
4546 		fill_stream_properties_from_drm_display_mode(stream,
4547 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4548 	else
4549 		fill_stream_properties_from_drm_display_mode(stream,
4550 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4551 
4552 	stream->timing.flags.DSC = 0;
4553 
4554 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4555 #if defined(CONFIG_DRM_AMD_DC_DCN)
4556 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4557 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4558 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4559 				      &dsc_caps);
4560 #endif
4561 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4562 							     dc_link_get_link_cap(aconnector->dc_link));
4563 
4564 #if defined(CONFIG_DRM_AMD_DC_DCN)
4565 		if (dsc_caps.is_dsc_supported)
4566 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4567 						  &dsc_caps,
4568 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4569 						  link_bandwidth_kbps,
4570 						  &stream->timing,
4571 						  &stream->timing.dsc_cfg))
4572 				stream->timing.flags.DSC = 1;
4573 #endif
4574 	}
4575 
4576 	update_stream_scaling_settings(&mode, dm_state, stream);
4577 
4578 	fill_audio_info(
4579 		&stream->audio_info,
4580 		drm_connector,
4581 		sink);
4582 
4583 	update_stream_signal(stream, sink);
4584 
4585 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4586 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4587 	if (stream->link->psr_settings.psr_feature_enabled) {
4588 		//
4589 		// should decide stream support vsc sdp colorimetry capability
4590 		// before building vsc info packet
4591 		//
4592 		stream->use_vsc_sdp_for_colorimetry = false;
4593 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4594 			stream->use_vsc_sdp_for_colorimetry =
4595 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4596 		} else {
4597 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4598 				stream->use_vsc_sdp_for_colorimetry = true;
4599 		}
4600 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4601 	}
4602 finish:
4603 	dc_sink_release(sink);
4604 
4605 	return stream;
4606 }
4607 
4608 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4609 {
4610 	drm_crtc_cleanup(crtc);
4611 	kfree(crtc);
4612 }
4613 
4614 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4615 				  struct drm_crtc_state *state)
4616 {
4617 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4618 
4619 	/* TODO Destroy dc_stream objects are stream object is flattened */
4620 	if (cur->stream)
4621 		dc_stream_release(cur->stream);
4622 
4623 
4624 	__drm_atomic_helper_crtc_destroy_state(state);
4625 
4626 
4627 	kfree(state);
4628 }
4629 
4630 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4631 {
4632 	struct dm_crtc_state *state;
4633 
4634 	if (crtc->state)
4635 		dm_crtc_destroy_state(crtc, crtc->state);
4636 
4637 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4638 	if (WARN_ON(!state))
4639 		return;
4640 
4641 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4642 }
4643 
4644 static struct drm_crtc_state *
4645 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4646 {
4647 	struct dm_crtc_state *state, *cur;
4648 
4649 	cur = to_dm_crtc_state(crtc->state);
4650 
4651 	if (WARN_ON(!crtc->state))
4652 		return NULL;
4653 
4654 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4655 	if (!state)
4656 		return NULL;
4657 
4658 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4659 
4660 	if (cur->stream) {
4661 		state->stream = cur->stream;
4662 		dc_stream_retain(state->stream);
4663 	}
4664 
4665 	state->active_planes = cur->active_planes;
4666 	state->vrr_params = cur->vrr_params;
4667 	state->vrr_infopacket = cur->vrr_infopacket;
4668 	state->abm_level = cur->abm_level;
4669 	state->vrr_supported = cur->vrr_supported;
4670 	state->freesync_config = cur->freesync_config;
4671 	state->crc_src = cur->crc_src;
4672 	state->cm_has_degamma = cur->cm_has_degamma;
4673 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4674 
4675 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4676 
4677 	return &state->base;
4678 }
4679 
4680 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4681 {
4682 	enum dc_irq_source irq_source;
4683 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4684 	struct amdgpu_device *adev = crtc->dev->dev_private;
4685 	int rc;
4686 
4687 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4688 
4689 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4690 
4691 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4692 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4693 	return rc;
4694 }
4695 
4696 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4697 {
4698 	enum dc_irq_source irq_source;
4699 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4700 	struct amdgpu_device *adev = crtc->dev->dev_private;
4701 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4702 	int rc = 0;
4703 
4704 	if (enable) {
4705 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4706 		if (amdgpu_dm_vrr_active(acrtc_state))
4707 			rc = dm_set_vupdate_irq(crtc, true);
4708 	} else {
4709 		/* vblank irq off -> vupdate irq off */
4710 		rc = dm_set_vupdate_irq(crtc, false);
4711 	}
4712 
4713 	if (rc)
4714 		return rc;
4715 
4716 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4717 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4718 }
4719 
4720 static int dm_enable_vblank(struct drm_crtc *crtc)
4721 {
4722 	return dm_set_vblank(crtc, true);
4723 }
4724 
4725 static void dm_disable_vblank(struct drm_crtc *crtc)
4726 {
4727 	dm_set_vblank(crtc, false);
4728 }
4729 
4730 /* Implemented only the options currently availible for the driver */
4731 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4732 	.reset = dm_crtc_reset_state,
4733 	.destroy = amdgpu_dm_crtc_destroy,
4734 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4735 	.set_config = drm_atomic_helper_set_config,
4736 	.page_flip = drm_atomic_helper_page_flip,
4737 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4738 	.atomic_destroy_state = dm_crtc_destroy_state,
4739 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4740 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4741 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4742 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4743 	.enable_vblank = dm_enable_vblank,
4744 	.disable_vblank = dm_disable_vblank,
4745 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4746 };
4747 
4748 static enum drm_connector_status
4749 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4750 {
4751 	bool connected;
4752 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4753 
4754 	/*
4755 	 * Notes:
4756 	 * 1. This interface is NOT called in context of HPD irq.
4757 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4758 	 * makes it a bad place for *any* MST-related activity.
4759 	 */
4760 
4761 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4762 	    !aconnector->fake_enable)
4763 		connected = (aconnector->dc_sink != NULL);
4764 	else
4765 		connected = (aconnector->base.force == DRM_FORCE_ON);
4766 
4767 	return (connected ? connector_status_connected :
4768 			connector_status_disconnected);
4769 }
4770 
4771 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4772 					    struct drm_connector_state *connector_state,
4773 					    struct drm_property *property,
4774 					    uint64_t val)
4775 {
4776 	struct drm_device *dev = connector->dev;
4777 	struct amdgpu_device *adev = dev->dev_private;
4778 	struct dm_connector_state *dm_old_state =
4779 		to_dm_connector_state(connector->state);
4780 	struct dm_connector_state *dm_new_state =
4781 		to_dm_connector_state(connector_state);
4782 
4783 	int ret = -EINVAL;
4784 
4785 	if (property == dev->mode_config.scaling_mode_property) {
4786 		enum amdgpu_rmx_type rmx_type;
4787 
4788 		switch (val) {
4789 		case DRM_MODE_SCALE_CENTER:
4790 			rmx_type = RMX_CENTER;
4791 			break;
4792 		case DRM_MODE_SCALE_ASPECT:
4793 			rmx_type = RMX_ASPECT;
4794 			break;
4795 		case DRM_MODE_SCALE_FULLSCREEN:
4796 			rmx_type = RMX_FULL;
4797 			break;
4798 		case DRM_MODE_SCALE_NONE:
4799 		default:
4800 			rmx_type = RMX_OFF;
4801 			break;
4802 		}
4803 
4804 		if (dm_old_state->scaling == rmx_type)
4805 			return 0;
4806 
4807 		dm_new_state->scaling = rmx_type;
4808 		ret = 0;
4809 	} else if (property == adev->mode_info.underscan_hborder_property) {
4810 		dm_new_state->underscan_hborder = val;
4811 		ret = 0;
4812 	} else if (property == adev->mode_info.underscan_vborder_property) {
4813 		dm_new_state->underscan_vborder = val;
4814 		ret = 0;
4815 	} else if (property == adev->mode_info.underscan_property) {
4816 		dm_new_state->underscan_enable = val;
4817 		ret = 0;
4818 	} else if (property == adev->mode_info.abm_level_property) {
4819 		dm_new_state->abm_level = val;
4820 		ret = 0;
4821 	}
4822 
4823 	return ret;
4824 }
4825 
4826 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4827 					    const struct drm_connector_state *state,
4828 					    struct drm_property *property,
4829 					    uint64_t *val)
4830 {
4831 	struct drm_device *dev = connector->dev;
4832 	struct amdgpu_device *adev = dev->dev_private;
4833 	struct dm_connector_state *dm_state =
4834 		to_dm_connector_state(state);
4835 	int ret = -EINVAL;
4836 
4837 	if (property == dev->mode_config.scaling_mode_property) {
4838 		switch (dm_state->scaling) {
4839 		case RMX_CENTER:
4840 			*val = DRM_MODE_SCALE_CENTER;
4841 			break;
4842 		case RMX_ASPECT:
4843 			*val = DRM_MODE_SCALE_ASPECT;
4844 			break;
4845 		case RMX_FULL:
4846 			*val = DRM_MODE_SCALE_FULLSCREEN;
4847 			break;
4848 		case RMX_OFF:
4849 		default:
4850 			*val = DRM_MODE_SCALE_NONE;
4851 			break;
4852 		}
4853 		ret = 0;
4854 	} else if (property == adev->mode_info.underscan_hborder_property) {
4855 		*val = dm_state->underscan_hborder;
4856 		ret = 0;
4857 	} else if (property == adev->mode_info.underscan_vborder_property) {
4858 		*val = dm_state->underscan_vborder;
4859 		ret = 0;
4860 	} else if (property == adev->mode_info.underscan_property) {
4861 		*val = dm_state->underscan_enable;
4862 		ret = 0;
4863 	} else if (property == adev->mode_info.abm_level_property) {
4864 		*val = dm_state->abm_level;
4865 		ret = 0;
4866 	}
4867 
4868 	return ret;
4869 }
4870 
4871 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4872 {
4873 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4874 
4875 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4876 }
4877 
4878 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4879 {
4880 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4881 	const struct dc_link *link = aconnector->dc_link;
4882 	struct amdgpu_device *adev = connector->dev->dev_private;
4883 	struct amdgpu_display_manager *dm = &adev->dm;
4884 
4885 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4886 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4887 
4888 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4889 	    link->type != dc_connection_none &&
4890 	    dm->backlight_dev) {
4891 		backlight_device_unregister(dm->backlight_dev);
4892 		dm->backlight_dev = NULL;
4893 	}
4894 #endif
4895 
4896 	if (aconnector->dc_em_sink)
4897 		dc_sink_release(aconnector->dc_em_sink);
4898 	aconnector->dc_em_sink = NULL;
4899 	if (aconnector->dc_sink)
4900 		dc_sink_release(aconnector->dc_sink);
4901 	aconnector->dc_sink = NULL;
4902 
4903 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4904 	drm_connector_unregister(connector);
4905 	drm_connector_cleanup(connector);
4906 	if (aconnector->i2c) {
4907 		i2c_del_adapter(&aconnector->i2c->base);
4908 		kfree(aconnector->i2c);
4909 	}
4910 	kfree(aconnector->dm_dp_aux.aux.name);
4911 
4912 	kfree(connector);
4913 }
4914 
4915 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4916 {
4917 	struct dm_connector_state *state =
4918 		to_dm_connector_state(connector->state);
4919 
4920 	if (connector->state)
4921 		__drm_atomic_helper_connector_destroy_state(connector->state);
4922 
4923 	kfree(state);
4924 
4925 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4926 
4927 	if (state) {
4928 		state->scaling = RMX_OFF;
4929 		state->underscan_enable = false;
4930 		state->underscan_hborder = 0;
4931 		state->underscan_vborder = 0;
4932 		state->base.max_requested_bpc = 8;
4933 		state->vcpi_slots = 0;
4934 		state->pbn = 0;
4935 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4936 			state->abm_level = amdgpu_dm_abm_level;
4937 
4938 		__drm_atomic_helper_connector_reset(connector, &state->base);
4939 	}
4940 }
4941 
4942 struct drm_connector_state *
4943 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4944 {
4945 	struct dm_connector_state *state =
4946 		to_dm_connector_state(connector->state);
4947 
4948 	struct dm_connector_state *new_state =
4949 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4950 
4951 	if (!new_state)
4952 		return NULL;
4953 
4954 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4955 
4956 	new_state->freesync_capable = state->freesync_capable;
4957 	new_state->abm_level = state->abm_level;
4958 	new_state->scaling = state->scaling;
4959 	new_state->underscan_enable = state->underscan_enable;
4960 	new_state->underscan_hborder = state->underscan_hborder;
4961 	new_state->underscan_vborder = state->underscan_vborder;
4962 	new_state->vcpi_slots = state->vcpi_slots;
4963 	new_state->pbn = state->pbn;
4964 	return &new_state->base;
4965 }
4966 
4967 static int
4968 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4969 {
4970 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4971 		to_amdgpu_dm_connector(connector);
4972 	int r;
4973 
4974 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4975 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4976 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4977 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4978 		if (r)
4979 			return r;
4980 	}
4981 
4982 #if defined(CONFIG_DEBUG_FS)
4983 	connector_debugfs_init(amdgpu_dm_connector);
4984 #endif
4985 
4986 	return 0;
4987 }
4988 
4989 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4990 	.reset = amdgpu_dm_connector_funcs_reset,
4991 	.detect = amdgpu_dm_connector_detect,
4992 	.fill_modes = drm_helper_probe_single_connector_modes,
4993 	.destroy = amdgpu_dm_connector_destroy,
4994 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4995 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4996 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4997 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4998 	.late_register = amdgpu_dm_connector_late_register,
4999 	.early_unregister = amdgpu_dm_connector_unregister
5000 };
5001 
5002 static int get_modes(struct drm_connector *connector)
5003 {
5004 	return amdgpu_dm_connector_get_modes(connector);
5005 }
5006 
5007 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5008 {
5009 	struct dc_sink_init_data init_params = {
5010 			.link = aconnector->dc_link,
5011 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5012 	};
5013 	struct edid *edid;
5014 
5015 	if (!aconnector->base.edid_blob_ptr) {
5016 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5017 				aconnector->base.name);
5018 
5019 		aconnector->base.force = DRM_FORCE_OFF;
5020 		aconnector->base.override_edid = false;
5021 		return;
5022 	}
5023 
5024 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5025 
5026 	aconnector->edid = edid;
5027 
5028 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5029 		aconnector->dc_link,
5030 		(uint8_t *)edid,
5031 		(edid->extensions + 1) * EDID_LENGTH,
5032 		&init_params);
5033 
5034 	if (aconnector->base.force == DRM_FORCE_ON) {
5035 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5036 		aconnector->dc_link->local_sink :
5037 		aconnector->dc_em_sink;
5038 		dc_sink_retain(aconnector->dc_sink);
5039 	}
5040 }
5041 
5042 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5043 {
5044 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5045 
5046 	/*
5047 	 * In case of headless boot with force on for DP managed connector
5048 	 * Those settings have to be != 0 to get initial modeset
5049 	 */
5050 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5051 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5052 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5053 	}
5054 
5055 
5056 	aconnector->base.override_edid = true;
5057 	create_eml_sink(aconnector);
5058 }
5059 
5060 static struct dc_stream_state *
5061 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5062 				const struct drm_display_mode *drm_mode,
5063 				const struct dm_connector_state *dm_state,
5064 				const struct dc_stream_state *old_stream)
5065 {
5066 	struct drm_connector *connector = &aconnector->base;
5067 	struct amdgpu_device *adev = connector->dev->dev_private;
5068 	struct dc_stream_state *stream;
5069 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5070 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5071 	enum dc_status dc_result = DC_OK;
5072 
5073 	do {
5074 		stream = create_stream_for_sink(aconnector, drm_mode,
5075 						dm_state, old_stream,
5076 						requested_bpc);
5077 		if (stream == NULL) {
5078 			DRM_ERROR("Failed to create stream for sink!\n");
5079 			break;
5080 		}
5081 
5082 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5083 
5084 		if (dc_result != DC_OK) {
5085 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5086 				      drm_mode->hdisplay,
5087 				      drm_mode->vdisplay,
5088 				      drm_mode->clock,
5089 				      dc_result,
5090 				      dc_status_to_str(dc_result));
5091 
5092 			dc_stream_release(stream);
5093 			stream = NULL;
5094 			requested_bpc -= 2; /* lower bpc to retry validation */
5095 		}
5096 
5097 	} while (stream == NULL && requested_bpc >= 6);
5098 
5099 	return stream;
5100 }
5101 
5102 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5103 				   struct drm_display_mode *mode)
5104 {
5105 	int result = MODE_ERROR;
5106 	struct dc_sink *dc_sink;
5107 	/* TODO: Unhardcode stream count */
5108 	struct dc_stream_state *stream;
5109 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5110 
5111 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5112 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5113 		return result;
5114 
5115 	/*
5116 	 * Only run this the first time mode_valid is called to initilialize
5117 	 * EDID mgmt
5118 	 */
5119 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5120 		!aconnector->dc_em_sink)
5121 		handle_edid_mgmt(aconnector);
5122 
5123 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5124 
5125 	if (dc_sink == NULL) {
5126 		DRM_ERROR("dc_sink is NULL!\n");
5127 		goto fail;
5128 	}
5129 
5130 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5131 	if (stream) {
5132 		dc_stream_release(stream);
5133 		result = MODE_OK;
5134 	}
5135 
5136 fail:
5137 	/* TODO: error handling*/
5138 	return result;
5139 }
5140 
5141 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5142 				struct dc_info_packet *out)
5143 {
5144 	struct hdmi_drm_infoframe frame;
5145 	unsigned char buf[30]; /* 26 + 4 */
5146 	ssize_t len;
5147 	int ret, i;
5148 
5149 	memset(out, 0, sizeof(*out));
5150 
5151 	if (!state->hdr_output_metadata)
5152 		return 0;
5153 
5154 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5155 	if (ret)
5156 		return ret;
5157 
5158 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5159 	if (len < 0)
5160 		return (int)len;
5161 
5162 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5163 	if (len != 30)
5164 		return -EINVAL;
5165 
5166 	/* Prepare the infopacket for DC. */
5167 	switch (state->connector->connector_type) {
5168 	case DRM_MODE_CONNECTOR_HDMIA:
5169 		out->hb0 = 0x87; /* type */
5170 		out->hb1 = 0x01; /* version */
5171 		out->hb2 = 0x1A; /* length */
5172 		out->sb[0] = buf[3]; /* checksum */
5173 		i = 1;
5174 		break;
5175 
5176 	case DRM_MODE_CONNECTOR_DisplayPort:
5177 	case DRM_MODE_CONNECTOR_eDP:
5178 		out->hb0 = 0x00; /* sdp id, zero */
5179 		out->hb1 = 0x87; /* type */
5180 		out->hb2 = 0x1D; /* payload len - 1 */
5181 		out->hb3 = (0x13 << 2); /* sdp version */
5182 		out->sb[0] = 0x01; /* version */
5183 		out->sb[1] = 0x1A; /* length */
5184 		i = 2;
5185 		break;
5186 
5187 	default:
5188 		return -EINVAL;
5189 	}
5190 
5191 	memcpy(&out->sb[i], &buf[4], 26);
5192 	out->valid = true;
5193 
5194 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5195 		       sizeof(out->sb), false);
5196 
5197 	return 0;
5198 }
5199 
5200 static bool
5201 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5202 			  const struct drm_connector_state *new_state)
5203 {
5204 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5205 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5206 
5207 	if (old_blob != new_blob) {
5208 		if (old_blob && new_blob &&
5209 		    old_blob->length == new_blob->length)
5210 			return memcmp(old_blob->data, new_blob->data,
5211 				      old_blob->length);
5212 
5213 		return true;
5214 	}
5215 
5216 	return false;
5217 }
5218 
5219 static int
5220 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5221 				 struct drm_atomic_state *state)
5222 {
5223 	struct drm_connector_state *new_con_state =
5224 		drm_atomic_get_new_connector_state(state, conn);
5225 	struct drm_connector_state *old_con_state =
5226 		drm_atomic_get_old_connector_state(state, conn);
5227 	struct drm_crtc *crtc = new_con_state->crtc;
5228 	struct drm_crtc_state *new_crtc_state;
5229 	int ret;
5230 
5231 	if (!crtc)
5232 		return 0;
5233 
5234 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5235 		struct dc_info_packet hdr_infopacket;
5236 
5237 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5238 		if (ret)
5239 			return ret;
5240 
5241 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5242 		if (IS_ERR(new_crtc_state))
5243 			return PTR_ERR(new_crtc_state);
5244 
5245 		/*
5246 		 * DC considers the stream backends changed if the
5247 		 * static metadata changes. Forcing the modeset also
5248 		 * gives a simple way for userspace to switch from
5249 		 * 8bpc to 10bpc when setting the metadata to enter
5250 		 * or exit HDR.
5251 		 *
5252 		 * Changing the static metadata after it's been
5253 		 * set is permissible, however. So only force a
5254 		 * modeset if we're entering or exiting HDR.
5255 		 */
5256 		new_crtc_state->mode_changed =
5257 			!old_con_state->hdr_output_metadata ||
5258 			!new_con_state->hdr_output_metadata;
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 static const struct drm_connector_helper_funcs
5265 amdgpu_dm_connector_helper_funcs = {
5266 	/*
5267 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5268 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5269 	 * are missing after user start lightdm. So we need to renew modes list.
5270 	 * in get_modes call back, not just return the modes count
5271 	 */
5272 	.get_modes = get_modes,
5273 	.mode_valid = amdgpu_dm_connector_mode_valid,
5274 	.atomic_check = amdgpu_dm_connector_atomic_check,
5275 };
5276 
5277 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5278 {
5279 }
5280 
5281 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5282 {
5283 	struct drm_device *dev = new_crtc_state->crtc->dev;
5284 	struct drm_plane *plane;
5285 
5286 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5287 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5288 			return true;
5289 	}
5290 
5291 	return false;
5292 }
5293 
5294 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5295 {
5296 	struct drm_atomic_state *state = new_crtc_state->state;
5297 	struct drm_plane *plane;
5298 	int num_active = 0;
5299 
5300 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5301 		struct drm_plane_state *new_plane_state;
5302 
5303 		/* Cursor planes are "fake". */
5304 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5305 			continue;
5306 
5307 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5308 
5309 		if (!new_plane_state) {
5310 			/*
5311 			 * The plane is enable on the CRTC and hasn't changed
5312 			 * state. This means that it previously passed
5313 			 * validation and is therefore enabled.
5314 			 */
5315 			num_active += 1;
5316 			continue;
5317 		}
5318 
5319 		/* We need a framebuffer to be considered enabled. */
5320 		num_active += (new_plane_state->fb != NULL);
5321 	}
5322 
5323 	return num_active;
5324 }
5325 
5326 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5327 					 struct drm_crtc_state *new_crtc_state)
5328 {
5329 	struct dm_crtc_state *dm_new_crtc_state =
5330 		to_dm_crtc_state(new_crtc_state);
5331 
5332 	dm_new_crtc_state->active_planes = 0;
5333 
5334 	if (!dm_new_crtc_state->stream)
5335 		return;
5336 
5337 	dm_new_crtc_state->active_planes =
5338 		count_crtc_active_planes(new_crtc_state);
5339 }
5340 
5341 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5342 				       struct drm_crtc_state *state)
5343 {
5344 	struct amdgpu_device *adev = crtc->dev->dev_private;
5345 	struct dc *dc = adev->dm.dc;
5346 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5347 	int ret = -EINVAL;
5348 
5349 	dm_update_crtc_active_planes(crtc, state);
5350 
5351 	if (unlikely(!dm_crtc_state->stream &&
5352 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5353 		WARN_ON(1);
5354 		return ret;
5355 	}
5356 
5357 	/* In some use cases, like reset, no stream is attached */
5358 	if (!dm_crtc_state->stream)
5359 		return 0;
5360 
5361 	/*
5362 	 * We want at least one hardware plane enabled to use
5363 	 * the stream with a cursor enabled.
5364 	 */
5365 	if (state->enable && state->active &&
5366 	    does_crtc_have_active_cursor(state) &&
5367 	    dm_crtc_state->active_planes == 0)
5368 		return -EINVAL;
5369 
5370 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5371 		return 0;
5372 
5373 	return ret;
5374 }
5375 
5376 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5377 				      const struct drm_display_mode *mode,
5378 				      struct drm_display_mode *adjusted_mode)
5379 {
5380 	return true;
5381 }
5382 
5383 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5384 	.disable = dm_crtc_helper_disable,
5385 	.atomic_check = dm_crtc_helper_atomic_check,
5386 	.mode_fixup = dm_crtc_helper_mode_fixup,
5387 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5388 };
5389 
5390 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5391 {
5392 
5393 }
5394 
5395 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5396 {
5397 	switch (display_color_depth) {
5398 		case COLOR_DEPTH_666:
5399 			return 6;
5400 		case COLOR_DEPTH_888:
5401 			return 8;
5402 		case COLOR_DEPTH_101010:
5403 			return 10;
5404 		case COLOR_DEPTH_121212:
5405 			return 12;
5406 		case COLOR_DEPTH_141414:
5407 			return 14;
5408 		case COLOR_DEPTH_161616:
5409 			return 16;
5410 		default:
5411 			break;
5412 		}
5413 	return 0;
5414 }
5415 
5416 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5417 					  struct drm_crtc_state *crtc_state,
5418 					  struct drm_connector_state *conn_state)
5419 {
5420 	struct drm_atomic_state *state = crtc_state->state;
5421 	struct drm_connector *connector = conn_state->connector;
5422 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5423 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5424 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5425 	struct drm_dp_mst_topology_mgr *mst_mgr;
5426 	struct drm_dp_mst_port *mst_port;
5427 	enum dc_color_depth color_depth;
5428 	int clock, bpp = 0;
5429 	bool is_y420 = false;
5430 
5431 	if (!aconnector->port || !aconnector->dc_sink)
5432 		return 0;
5433 
5434 	mst_port = aconnector->port;
5435 	mst_mgr = &aconnector->mst_port->mst_mgr;
5436 
5437 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5438 		return 0;
5439 
5440 	if (!state->duplicated) {
5441 		int max_bpc = conn_state->max_requested_bpc;
5442 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5443 				aconnector->force_yuv420_output;
5444 		color_depth = convert_color_depth_from_display_info(connector,
5445 								    is_y420,
5446 								    max_bpc);
5447 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5448 		clock = adjusted_mode->clock;
5449 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5450 	}
5451 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5452 									   mst_mgr,
5453 									   mst_port,
5454 									   dm_new_connector_state->pbn,
5455 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5456 	if (dm_new_connector_state->vcpi_slots < 0) {
5457 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5458 		return dm_new_connector_state->vcpi_slots;
5459 	}
5460 	return 0;
5461 }
5462 
5463 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5464 	.disable = dm_encoder_helper_disable,
5465 	.atomic_check = dm_encoder_helper_atomic_check
5466 };
5467 
5468 #if defined(CONFIG_DRM_AMD_DC_DCN)
5469 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5470 					    struct dc_state *dc_state)
5471 {
5472 	struct dc_stream_state *stream = NULL;
5473 	struct drm_connector *connector;
5474 	struct drm_connector_state *new_con_state, *old_con_state;
5475 	struct amdgpu_dm_connector *aconnector;
5476 	struct dm_connector_state *dm_conn_state;
5477 	int i, j, clock, bpp;
5478 	int vcpi, pbn_div, pbn = 0;
5479 
5480 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5481 
5482 		aconnector = to_amdgpu_dm_connector(connector);
5483 
5484 		if (!aconnector->port)
5485 			continue;
5486 
5487 		if (!new_con_state || !new_con_state->crtc)
5488 			continue;
5489 
5490 		dm_conn_state = to_dm_connector_state(new_con_state);
5491 
5492 		for (j = 0; j < dc_state->stream_count; j++) {
5493 			stream = dc_state->streams[j];
5494 			if (!stream)
5495 				continue;
5496 
5497 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5498 				break;
5499 
5500 			stream = NULL;
5501 		}
5502 
5503 		if (!stream)
5504 			continue;
5505 
5506 		if (stream->timing.flags.DSC != 1) {
5507 			drm_dp_mst_atomic_enable_dsc(state,
5508 						     aconnector->port,
5509 						     dm_conn_state->pbn,
5510 						     0,
5511 						     false);
5512 			continue;
5513 		}
5514 
5515 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5516 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5517 		clock = stream->timing.pix_clk_100hz / 10;
5518 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5519 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5520 						    aconnector->port,
5521 						    pbn, pbn_div,
5522 						    true);
5523 		if (vcpi < 0)
5524 			return vcpi;
5525 
5526 		dm_conn_state->pbn = pbn;
5527 		dm_conn_state->vcpi_slots = vcpi;
5528 	}
5529 	return 0;
5530 }
5531 #endif
5532 
5533 static void dm_drm_plane_reset(struct drm_plane *plane)
5534 {
5535 	struct dm_plane_state *amdgpu_state = NULL;
5536 
5537 	if (plane->state)
5538 		plane->funcs->atomic_destroy_state(plane, plane->state);
5539 
5540 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5541 	WARN_ON(amdgpu_state == NULL);
5542 
5543 	if (amdgpu_state)
5544 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5545 }
5546 
5547 static struct drm_plane_state *
5548 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5549 {
5550 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5551 
5552 	old_dm_plane_state = to_dm_plane_state(plane->state);
5553 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5554 	if (!dm_plane_state)
5555 		return NULL;
5556 
5557 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5558 
5559 	if (old_dm_plane_state->dc_state) {
5560 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5561 		dc_plane_state_retain(dm_plane_state->dc_state);
5562 	}
5563 
5564 	return &dm_plane_state->base;
5565 }
5566 
5567 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5568 				struct drm_plane_state *state)
5569 {
5570 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5571 
5572 	if (dm_plane_state->dc_state)
5573 		dc_plane_state_release(dm_plane_state->dc_state);
5574 
5575 	drm_atomic_helper_plane_destroy_state(plane, state);
5576 }
5577 
5578 static const struct drm_plane_funcs dm_plane_funcs = {
5579 	.update_plane	= drm_atomic_helper_update_plane,
5580 	.disable_plane	= drm_atomic_helper_disable_plane,
5581 	.destroy	= drm_primary_helper_destroy,
5582 	.reset = dm_drm_plane_reset,
5583 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5584 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5585 };
5586 
5587 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5588 				      struct drm_plane_state *new_state)
5589 {
5590 	struct amdgpu_framebuffer *afb;
5591 	struct drm_gem_object *obj;
5592 	struct amdgpu_device *adev;
5593 	struct amdgpu_bo *rbo;
5594 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5595 	struct list_head list;
5596 	struct ttm_validate_buffer tv;
5597 	struct ww_acquire_ctx ticket;
5598 	uint64_t tiling_flags;
5599 	uint32_t domain;
5600 	int r;
5601 	bool tmz_surface = false;
5602 	bool force_disable_dcc = false;
5603 
5604 	dm_plane_state_old = to_dm_plane_state(plane->state);
5605 	dm_plane_state_new = to_dm_plane_state(new_state);
5606 
5607 	if (!new_state->fb) {
5608 		DRM_DEBUG_DRIVER("No FB bound\n");
5609 		return 0;
5610 	}
5611 
5612 	afb = to_amdgpu_framebuffer(new_state->fb);
5613 	obj = new_state->fb->obj[0];
5614 	rbo = gem_to_amdgpu_bo(obj);
5615 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5616 	INIT_LIST_HEAD(&list);
5617 
5618 	tv.bo = &rbo->tbo;
5619 	tv.num_shared = 1;
5620 	list_add(&tv.head, &list);
5621 
5622 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5623 	if (r) {
5624 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5625 		return r;
5626 	}
5627 
5628 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5629 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5630 	else
5631 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5632 
5633 	r = amdgpu_bo_pin(rbo, domain);
5634 	if (unlikely(r != 0)) {
5635 		if (r != -ERESTARTSYS)
5636 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5637 		ttm_eu_backoff_reservation(&ticket, &list);
5638 		return r;
5639 	}
5640 
5641 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5642 	if (unlikely(r != 0)) {
5643 		amdgpu_bo_unpin(rbo);
5644 		ttm_eu_backoff_reservation(&ticket, &list);
5645 		DRM_ERROR("%p bind failed\n", rbo);
5646 		return r;
5647 	}
5648 
5649 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5650 
5651 	tmz_surface = amdgpu_bo_encrypted(rbo);
5652 
5653 	ttm_eu_backoff_reservation(&ticket, &list);
5654 
5655 	afb->address = amdgpu_bo_gpu_offset(rbo);
5656 
5657 	amdgpu_bo_ref(rbo);
5658 
5659 	if (dm_plane_state_new->dc_state &&
5660 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5661 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5662 
5663 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5664 		fill_plane_buffer_attributes(
5665 			adev, afb, plane_state->format, plane_state->rotation,
5666 			tiling_flags, &plane_state->tiling_info,
5667 			&plane_state->plane_size, &plane_state->dcc,
5668 			&plane_state->address, tmz_surface,
5669 			force_disable_dcc);
5670 	}
5671 
5672 	return 0;
5673 }
5674 
5675 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5676 				       struct drm_plane_state *old_state)
5677 {
5678 	struct amdgpu_bo *rbo;
5679 	int r;
5680 
5681 	if (!old_state->fb)
5682 		return;
5683 
5684 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5685 	r = amdgpu_bo_reserve(rbo, false);
5686 	if (unlikely(r)) {
5687 		DRM_ERROR("failed to reserve rbo before unpin\n");
5688 		return;
5689 	}
5690 
5691 	amdgpu_bo_unpin(rbo);
5692 	amdgpu_bo_unreserve(rbo);
5693 	amdgpu_bo_unref(&rbo);
5694 }
5695 
5696 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5697 				       struct drm_crtc_state *new_crtc_state)
5698 {
5699 	int max_downscale = 0;
5700 	int max_upscale = INT_MAX;
5701 
5702 	/* TODO: These should be checked against DC plane caps */
5703 	return drm_atomic_helper_check_plane_state(
5704 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5705 }
5706 
5707 static int dm_plane_atomic_check(struct drm_plane *plane,
5708 				 struct drm_plane_state *state)
5709 {
5710 	struct amdgpu_device *adev = plane->dev->dev_private;
5711 	struct dc *dc = adev->dm.dc;
5712 	struct dm_plane_state *dm_plane_state;
5713 	struct dc_scaling_info scaling_info;
5714 	struct drm_crtc_state *new_crtc_state;
5715 	int ret;
5716 
5717 	dm_plane_state = to_dm_plane_state(state);
5718 
5719 	if (!dm_plane_state->dc_state)
5720 		return 0;
5721 
5722 	new_crtc_state =
5723 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5724 	if (!new_crtc_state)
5725 		return -EINVAL;
5726 
5727 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5728 	if (ret)
5729 		return ret;
5730 
5731 	ret = fill_dc_scaling_info(state, &scaling_info);
5732 	if (ret)
5733 		return ret;
5734 
5735 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5736 		return 0;
5737 
5738 	return -EINVAL;
5739 }
5740 
5741 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5742 				       struct drm_plane_state *new_plane_state)
5743 {
5744 	/* Only support async updates on cursor planes. */
5745 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5746 		return -EINVAL;
5747 
5748 	return 0;
5749 }
5750 
5751 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5752 					 struct drm_plane_state *new_state)
5753 {
5754 	struct drm_plane_state *old_state =
5755 		drm_atomic_get_old_plane_state(new_state->state, plane);
5756 
5757 	swap(plane->state->fb, new_state->fb);
5758 
5759 	plane->state->src_x = new_state->src_x;
5760 	plane->state->src_y = new_state->src_y;
5761 	plane->state->src_w = new_state->src_w;
5762 	plane->state->src_h = new_state->src_h;
5763 	plane->state->crtc_x = new_state->crtc_x;
5764 	plane->state->crtc_y = new_state->crtc_y;
5765 	plane->state->crtc_w = new_state->crtc_w;
5766 	plane->state->crtc_h = new_state->crtc_h;
5767 
5768 	handle_cursor_update(plane, old_state);
5769 }
5770 
5771 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5772 	.prepare_fb = dm_plane_helper_prepare_fb,
5773 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5774 	.atomic_check = dm_plane_atomic_check,
5775 	.atomic_async_check = dm_plane_atomic_async_check,
5776 	.atomic_async_update = dm_plane_atomic_async_update
5777 };
5778 
5779 /*
5780  * TODO: these are currently initialized to rgb formats only.
5781  * For future use cases we should either initialize them dynamically based on
5782  * plane capabilities, or initialize this array to all formats, so internal drm
5783  * check will succeed, and let DC implement proper check
5784  */
5785 static const uint32_t rgb_formats[] = {
5786 	DRM_FORMAT_XRGB8888,
5787 	DRM_FORMAT_ARGB8888,
5788 	DRM_FORMAT_RGBA8888,
5789 	DRM_FORMAT_XRGB2101010,
5790 	DRM_FORMAT_XBGR2101010,
5791 	DRM_FORMAT_ARGB2101010,
5792 	DRM_FORMAT_ABGR2101010,
5793 	DRM_FORMAT_XBGR8888,
5794 	DRM_FORMAT_ABGR8888,
5795 	DRM_FORMAT_RGB565,
5796 };
5797 
5798 static const uint32_t overlay_formats[] = {
5799 	DRM_FORMAT_XRGB8888,
5800 	DRM_FORMAT_ARGB8888,
5801 	DRM_FORMAT_RGBA8888,
5802 	DRM_FORMAT_XBGR8888,
5803 	DRM_FORMAT_ABGR8888,
5804 	DRM_FORMAT_RGB565
5805 };
5806 
5807 static const u32 cursor_formats[] = {
5808 	DRM_FORMAT_ARGB8888
5809 };
5810 
5811 static int get_plane_formats(const struct drm_plane *plane,
5812 			     const struct dc_plane_cap *plane_cap,
5813 			     uint32_t *formats, int max_formats)
5814 {
5815 	int i, num_formats = 0;
5816 
5817 	/*
5818 	 * TODO: Query support for each group of formats directly from
5819 	 * DC plane caps. This will require adding more formats to the
5820 	 * caps list.
5821 	 */
5822 
5823 	switch (plane->type) {
5824 	case DRM_PLANE_TYPE_PRIMARY:
5825 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5826 			if (num_formats >= max_formats)
5827 				break;
5828 
5829 			formats[num_formats++] = rgb_formats[i];
5830 		}
5831 
5832 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5833 			formats[num_formats++] = DRM_FORMAT_NV12;
5834 		if (plane_cap && plane_cap->pixel_format_support.p010)
5835 			formats[num_formats++] = DRM_FORMAT_P010;
5836 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5837 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5838 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5839 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5840 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5841 		}
5842 		break;
5843 
5844 	case DRM_PLANE_TYPE_OVERLAY:
5845 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5846 			if (num_formats >= max_formats)
5847 				break;
5848 
5849 			formats[num_formats++] = overlay_formats[i];
5850 		}
5851 		break;
5852 
5853 	case DRM_PLANE_TYPE_CURSOR:
5854 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5855 			if (num_formats >= max_formats)
5856 				break;
5857 
5858 			formats[num_formats++] = cursor_formats[i];
5859 		}
5860 		break;
5861 	}
5862 
5863 	return num_formats;
5864 }
5865 
5866 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5867 				struct drm_plane *plane,
5868 				unsigned long possible_crtcs,
5869 				const struct dc_plane_cap *plane_cap)
5870 {
5871 	uint32_t formats[32];
5872 	int num_formats;
5873 	int res = -EPERM;
5874 	unsigned int supported_rotations;
5875 
5876 	num_formats = get_plane_formats(plane, plane_cap, formats,
5877 					ARRAY_SIZE(formats));
5878 
5879 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5880 				       &dm_plane_funcs, formats, num_formats,
5881 				       NULL, plane->type, NULL);
5882 	if (res)
5883 		return res;
5884 
5885 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5886 	    plane_cap && plane_cap->per_pixel_alpha) {
5887 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5888 					  BIT(DRM_MODE_BLEND_PREMULTI);
5889 
5890 		drm_plane_create_alpha_property(plane);
5891 		drm_plane_create_blend_mode_property(plane, blend_caps);
5892 	}
5893 
5894 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5895 	    plane_cap &&
5896 	    (plane_cap->pixel_format_support.nv12 ||
5897 	     plane_cap->pixel_format_support.p010)) {
5898 		/* This only affects YUV formats. */
5899 		drm_plane_create_color_properties(
5900 			plane,
5901 			BIT(DRM_COLOR_YCBCR_BT601) |
5902 			BIT(DRM_COLOR_YCBCR_BT709) |
5903 			BIT(DRM_COLOR_YCBCR_BT2020),
5904 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5905 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5906 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5907 	}
5908 
5909 	supported_rotations =
5910 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5911 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5912 
5913 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5914 					   supported_rotations);
5915 
5916 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5917 
5918 	/* Create (reset) the plane state */
5919 	if (plane->funcs->reset)
5920 		plane->funcs->reset(plane);
5921 
5922 	return 0;
5923 }
5924 
5925 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5926 			       struct drm_plane *plane,
5927 			       uint32_t crtc_index)
5928 {
5929 	struct amdgpu_crtc *acrtc = NULL;
5930 	struct drm_plane *cursor_plane;
5931 
5932 	int res = -ENOMEM;
5933 
5934 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5935 	if (!cursor_plane)
5936 		goto fail;
5937 
5938 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5939 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5940 
5941 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5942 	if (!acrtc)
5943 		goto fail;
5944 
5945 	res = drm_crtc_init_with_planes(
5946 			dm->ddev,
5947 			&acrtc->base,
5948 			plane,
5949 			cursor_plane,
5950 			&amdgpu_dm_crtc_funcs, NULL);
5951 
5952 	if (res)
5953 		goto fail;
5954 
5955 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5956 
5957 	/* Create (reset) the plane state */
5958 	if (acrtc->base.funcs->reset)
5959 		acrtc->base.funcs->reset(&acrtc->base);
5960 
5961 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5962 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5963 
5964 	acrtc->crtc_id = crtc_index;
5965 	acrtc->base.enabled = false;
5966 	acrtc->otg_inst = -1;
5967 
5968 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5969 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5970 				   true, MAX_COLOR_LUT_ENTRIES);
5971 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5972 
5973 	return 0;
5974 
5975 fail:
5976 	kfree(acrtc);
5977 	kfree(cursor_plane);
5978 	return res;
5979 }
5980 
5981 
5982 static int to_drm_connector_type(enum signal_type st)
5983 {
5984 	switch (st) {
5985 	case SIGNAL_TYPE_HDMI_TYPE_A:
5986 		return DRM_MODE_CONNECTOR_HDMIA;
5987 	case SIGNAL_TYPE_EDP:
5988 		return DRM_MODE_CONNECTOR_eDP;
5989 	case SIGNAL_TYPE_LVDS:
5990 		return DRM_MODE_CONNECTOR_LVDS;
5991 	case SIGNAL_TYPE_RGB:
5992 		return DRM_MODE_CONNECTOR_VGA;
5993 	case SIGNAL_TYPE_DISPLAY_PORT:
5994 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5995 		return DRM_MODE_CONNECTOR_DisplayPort;
5996 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5997 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5998 		return DRM_MODE_CONNECTOR_DVID;
5999 	case SIGNAL_TYPE_VIRTUAL:
6000 		return DRM_MODE_CONNECTOR_VIRTUAL;
6001 
6002 	default:
6003 		return DRM_MODE_CONNECTOR_Unknown;
6004 	}
6005 }
6006 
6007 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6008 {
6009 	struct drm_encoder *encoder;
6010 
6011 	/* There is only one encoder per connector */
6012 	drm_connector_for_each_possible_encoder(connector, encoder)
6013 		return encoder;
6014 
6015 	return NULL;
6016 }
6017 
6018 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6019 {
6020 	struct drm_encoder *encoder;
6021 	struct amdgpu_encoder *amdgpu_encoder;
6022 
6023 	encoder = amdgpu_dm_connector_to_encoder(connector);
6024 
6025 	if (encoder == NULL)
6026 		return;
6027 
6028 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6029 
6030 	amdgpu_encoder->native_mode.clock = 0;
6031 
6032 	if (!list_empty(&connector->probed_modes)) {
6033 		struct drm_display_mode *preferred_mode = NULL;
6034 
6035 		list_for_each_entry(preferred_mode,
6036 				    &connector->probed_modes,
6037 				    head) {
6038 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6039 				amdgpu_encoder->native_mode = *preferred_mode;
6040 
6041 			break;
6042 		}
6043 
6044 	}
6045 }
6046 
6047 static struct drm_display_mode *
6048 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6049 			     char *name,
6050 			     int hdisplay, int vdisplay)
6051 {
6052 	struct drm_device *dev = encoder->dev;
6053 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6054 	struct drm_display_mode *mode = NULL;
6055 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6056 
6057 	mode = drm_mode_duplicate(dev, native_mode);
6058 
6059 	if (mode == NULL)
6060 		return NULL;
6061 
6062 	mode->hdisplay = hdisplay;
6063 	mode->vdisplay = vdisplay;
6064 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6065 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6066 
6067 	return mode;
6068 
6069 }
6070 
6071 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6072 						 struct drm_connector *connector)
6073 {
6074 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6075 	struct drm_display_mode *mode = NULL;
6076 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6077 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6078 				to_amdgpu_dm_connector(connector);
6079 	int i;
6080 	int n;
6081 	struct mode_size {
6082 		char name[DRM_DISPLAY_MODE_LEN];
6083 		int w;
6084 		int h;
6085 	} common_modes[] = {
6086 		{  "640x480",  640,  480},
6087 		{  "800x600",  800,  600},
6088 		{ "1024x768", 1024,  768},
6089 		{ "1280x720", 1280,  720},
6090 		{ "1280x800", 1280,  800},
6091 		{"1280x1024", 1280, 1024},
6092 		{ "1440x900", 1440,  900},
6093 		{"1680x1050", 1680, 1050},
6094 		{"1600x1200", 1600, 1200},
6095 		{"1920x1080", 1920, 1080},
6096 		{"1920x1200", 1920, 1200}
6097 	};
6098 
6099 	n = ARRAY_SIZE(common_modes);
6100 
6101 	for (i = 0; i < n; i++) {
6102 		struct drm_display_mode *curmode = NULL;
6103 		bool mode_existed = false;
6104 
6105 		if (common_modes[i].w > native_mode->hdisplay ||
6106 		    common_modes[i].h > native_mode->vdisplay ||
6107 		   (common_modes[i].w == native_mode->hdisplay &&
6108 		    common_modes[i].h == native_mode->vdisplay))
6109 			continue;
6110 
6111 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6112 			if (common_modes[i].w == curmode->hdisplay &&
6113 			    common_modes[i].h == curmode->vdisplay) {
6114 				mode_existed = true;
6115 				break;
6116 			}
6117 		}
6118 
6119 		if (mode_existed)
6120 			continue;
6121 
6122 		mode = amdgpu_dm_create_common_mode(encoder,
6123 				common_modes[i].name, common_modes[i].w,
6124 				common_modes[i].h);
6125 		drm_mode_probed_add(connector, mode);
6126 		amdgpu_dm_connector->num_modes++;
6127 	}
6128 }
6129 
6130 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6131 					      struct edid *edid)
6132 {
6133 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6134 			to_amdgpu_dm_connector(connector);
6135 
6136 	if (edid) {
6137 		/* empty probed_modes */
6138 		INIT_LIST_HEAD(&connector->probed_modes);
6139 		amdgpu_dm_connector->num_modes =
6140 				drm_add_edid_modes(connector, edid);
6141 
6142 		/* sorting the probed modes before calling function
6143 		 * amdgpu_dm_get_native_mode() since EDID can have
6144 		 * more than one preferred mode. The modes that are
6145 		 * later in the probed mode list could be of higher
6146 		 * and preferred resolution. For example, 3840x2160
6147 		 * resolution in base EDID preferred timing and 4096x2160
6148 		 * preferred resolution in DID extension block later.
6149 		 */
6150 		drm_mode_sort(&connector->probed_modes);
6151 		amdgpu_dm_get_native_mode(connector);
6152 	} else {
6153 		amdgpu_dm_connector->num_modes = 0;
6154 	}
6155 }
6156 
6157 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6158 {
6159 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6160 			to_amdgpu_dm_connector(connector);
6161 	struct drm_encoder *encoder;
6162 	struct edid *edid = amdgpu_dm_connector->edid;
6163 
6164 	encoder = amdgpu_dm_connector_to_encoder(connector);
6165 
6166 	if (!edid || !drm_edid_is_valid(edid)) {
6167 		amdgpu_dm_connector->num_modes =
6168 				drm_add_modes_noedid(connector, 640, 480);
6169 	} else {
6170 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6171 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6172 	}
6173 	amdgpu_dm_fbc_init(connector);
6174 
6175 	return amdgpu_dm_connector->num_modes;
6176 }
6177 
6178 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6179 				     struct amdgpu_dm_connector *aconnector,
6180 				     int connector_type,
6181 				     struct dc_link *link,
6182 				     int link_index)
6183 {
6184 	struct amdgpu_device *adev = dm->ddev->dev_private;
6185 
6186 	/*
6187 	 * Some of the properties below require access to state, like bpc.
6188 	 * Allocate some default initial connector state with our reset helper.
6189 	 */
6190 	if (aconnector->base.funcs->reset)
6191 		aconnector->base.funcs->reset(&aconnector->base);
6192 
6193 	aconnector->connector_id = link_index;
6194 	aconnector->dc_link = link;
6195 	aconnector->base.interlace_allowed = false;
6196 	aconnector->base.doublescan_allowed = false;
6197 	aconnector->base.stereo_allowed = false;
6198 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6199 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6200 	aconnector->audio_inst = -1;
6201 	mutex_init(&aconnector->hpd_lock);
6202 
6203 	/*
6204 	 * configure support HPD hot plug connector_>polled default value is 0
6205 	 * which means HPD hot plug not supported
6206 	 */
6207 	switch (connector_type) {
6208 	case DRM_MODE_CONNECTOR_HDMIA:
6209 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6210 		aconnector->base.ycbcr_420_allowed =
6211 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6212 		break;
6213 	case DRM_MODE_CONNECTOR_DisplayPort:
6214 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6215 		aconnector->base.ycbcr_420_allowed =
6216 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6217 		break;
6218 	case DRM_MODE_CONNECTOR_DVID:
6219 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6220 		break;
6221 	default:
6222 		break;
6223 	}
6224 
6225 	drm_object_attach_property(&aconnector->base.base,
6226 				dm->ddev->mode_config.scaling_mode_property,
6227 				DRM_MODE_SCALE_NONE);
6228 
6229 	drm_object_attach_property(&aconnector->base.base,
6230 				adev->mode_info.underscan_property,
6231 				UNDERSCAN_OFF);
6232 	drm_object_attach_property(&aconnector->base.base,
6233 				adev->mode_info.underscan_hborder_property,
6234 				0);
6235 	drm_object_attach_property(&aconnector->base.base,
6236 				adev->mode_info.underscan_vborder_property,
6237 				0);
6238 
6239 	if (!aconnector->mst_port)
6240 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6241 
6242 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6243 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6244 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6245 
6246 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6247 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6248 		drm_object_attach_property(&aconnector->base.base,
6249 				adev->mode_info.abm_level_property, 0);
6250 	}
6251 
6252 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6253 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6254 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6255 		drm_object_attach_property(
6256 			&aconnector->base.base,
6257 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6258 
6259 		if (!aconnector->mst_port)
6260 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6261 
6262 #ifdef CONFIG_DRM_AMD_DC_HDCP
6263 		if (adev->dm.hdcp_workqueue)
6264 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6265 #endif
6266 	}
6267 }
6268 
6269 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6270 			      struct i2c_msg *msgs, int num)
6271 {
6272 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6273 	struct ddc_service *ddc_service = i2c->ddc_service;
6274 	struct i2c_command cmd;
6275 	int i;
6276 	int result = -EIO;
6277 
6278 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6279 
6280 	if (!cmd.payloads)
6281 		return result;
6282 
6283 	cmd.number_of_payloads = num;
6284 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6285 	cmd.speed = 100;
6286 
6287 	for (i = 0; i < num; i++) {
6288 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6289 		cmd.payloads[i].address = msgs[i].addr;
6290 		cmd.payloads[i].length = msgs[i].len;
6291 		cmd.payloads[i].data = msgs[i].buf;
6292 	}
6293 
6294 	if (dc_submit_i2c(
6295 			ddc_service->ctx->dc,
6296 			ddc_service->ddc_pin->hw_info.ddc_channel,
6297 			&cmd))
6298 		result = num;
6299 
6300 	kfree(cmd.payloads);
6301 	return result;
6302 }
6303 
6304 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6305 {
6306 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6307 }
6308 
6309 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6310 	.master_xfer = amdgpu_dm_i2c_xfer,
6311 	.functionality = amdgpu_dm_i2c_func,
6312 };
6313 
6314 static struct amdgpu_i2c_adapter *
6315 create_i2c(struct ddc_service *ddc_service,
6316 	   int link_index,
6317 	   int *res)
6318 {
6319 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6320 	struct amdgpu_i2c_adapter *i2c;
6321 
6322 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6323 	if (!i2c)
6324 		return NULL;
6325 	i2c->base.owner = THIS_MODULE;
6326 	i2c->base.class = I2C_CLASS_DDC;
6327 	i2c->base.dev.parent = &adev->pdev->dev;
6328 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6329 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6330 	i2c_set_adapdata(&i2c->base, i2c);
6331 	i2c->ddc_service = ddc_service;
6332 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6333 
6334 	return i2c;
6335 }
6336 
6337 
6338 /*
6339  * Note: this function assumes that dc_link_detect() was called for the
6340  * dc_link which will be represented by this aconnector.
6341  */
6342 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6343 				    struct amdgpu_dm_connector *aconnector,
6344 				    uint32_t link_index,
6345 				    struct amdgpu_encoder *aencoder)
6346 {
6347 	int res = 0;
6348 	int connector_type;
6349 	struct dc *dc = dm->dc;
6350 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6351 	struct amdgpu_i2c_adapter *i2c;
6352 
6353 	link->priv = aconnector;
6354 
6355 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6356 
6357 	i2c = create_i2c(link->ddc, link->link_index, &res);
6358 	if (!i2c) {
6359 		DRM_ERROR("Failed to create i2c adapter data\n");
6360 		return -ENOMEM;
6361 	}
6362 
6363 	aconnector->i2c = i2c;
6364 	res = i2c_add_adapter(&i2c->base);
6365 
6366 	if (res) {
6367 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6368 		goto out_free;
6369 	}
6370 
6371 	connector_type = to_drm_connector_type(link->connector_signal);
6372 
6373 	res = drm_connector_init_with_ddc(
6374 			dm->ddev,
6375 			&aconnector->base,
6376 			&amdgpu_dm_connector_funcs,
6377 			connector_type,
6378 			&i2c->base);
6379 
6380 	if (res) {
6381 		DRM_ERROR("connector_init failed\n");
6382 		aconnector->connector_id = -1;
6383 		goto out_free;
6384 	}
6385 
6386 	drm_connector_helper_add(
6387 			&aconnector->base,
6388 			&amdgpu_dm_connector_helper_funcs);
6389 
6390 	amdgpu_dm_connector_init_helper(
6391 		dm,
6392 		aconnector,
6393 		connector_type,
6394 		link,
6395 		link_index);
6396 
6397 	drm_connector_attach_encoder(
6398 		&aconnector->base, &aencoder->base);
6399 
6400 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6401 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6402 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6403 
6404 out_free:
6405 	if (res) {
6406 		kfree(i2c);
6407 		aconnector->i2c = NULL;
6408 	}
6409 	return res;
6410 }
6411 
6412 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6413 {
6414 	switch (adev->mode_info.num_crtc) {
6415 	case 1:
6416 		return 0x1;
6417 	case 2:
6418 		return 0x3;
6419 	case 3:
6420 		return 0x7;
6421 	case 4:
6422 		return 0xf;
6423 	case 5:
6424 		return 0x1f;
6425 	case 6:
6426 	default:
6427 		return 0x3f;
6428 	}
6429 }
6430 
6431 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6432 				  struct amdgpu_encoder *aencoder,
6433 				  uint32_t link_index)
6434 {
6435 	struct amdgpu_device *adev = dev->dev_private;
6436 
6437 	int res = drm_encoder_init(dev,
6438 				   &aencoder->base,
6439 				   &amdgpu_dm_encoder_funcs,
6440 				   DRM_MODE_ENCODER_TMDS,
6441 				   NULL);
6442 
6443 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6444 
6445 	if (!res)
6446 		aencoder->encoder_id = link_index;
6447 	else
6448 		aencoder->encoder_id = -1;
6449 
6450 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6451 
6452 	return res;
6453 }
6454 
6455 static void manage_dm_interrupts(struct amdgpu_device *adev,
6456 				 struct amdgpu_crtc *acrtc,
6457 				 bool enable)
6458 {
6459 	/*
6460 	 * We have no guarantee that the frontend index maps to the same
6461 	 * backend index - some even map to more than one.
6462 	 *
6463 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6464 	 */
6465 	int irq_type =
6466 		amdgpu_display_crtc_idx_to_irq_type(
6467 			adev,
6468 			acrtc->crtc_id);
6469 
6470 	if (enable) {
6471 		drm_crtc_vblank_on(&acrtc->base);
6472 		amdgpu_irq_get(
6473 			adev,
6474 			&adev->pageflip_irq,
6475 			irq_type);
6476 	} else {
6477 
6478 		amdgpu_irq_put(
6479 			adev,
6480 			&adev->pageflip_irq,
6481 			irq_type);
6482 		drm_crtc_vblank_off(&acrtc->base);
6483 	}
6484 }
6485 
6486 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6487 				      struct amdgpu_crtc *acrtc)
6488 {
6489 	int irq_type =
6490 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6491 
6492 	/**
6493 	 * This reads the current state for the IRQ and force reapplies
6494 	 * the setting to hardware.
6495 	 */
6496 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6497 }
6498 
6499 static bool
6500 is_scaling_state_different(const struct dm_connector_state *dm_state,
6501 			   const struct dm_connector_state *old_dm_state)
6502 {
6503 	if (dm_state->scaling != old_dm_state->scaling)
6504 		return true;
6505 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6506 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6507 			return true;
6508 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6509 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6510 			return true;
6511 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6512 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6513 		return true;
6514 	return false;
6515 }
6516 
6517 #ifdef CONFIG_DRM_AMD_DC_HDCP
6518 static bool is_content_protection_different(struct drm_connector_state *state,
6519 					    const struct drm_connector_state *old_state,
6520 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6521 {
6522 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6523 
6524 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6525 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6526 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6527 		return true;
6528 	}
6529 
6530 	/* CP is being re enabled, ignore this */
6531 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6532 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6533 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6534 		return false;
6535 	}
6536 
6537 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6538 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6539 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6540 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6541 
6542 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6543 	 * hot-plug, headless s3, dpms
6544 	 */
6545 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6546 	    aconnector->dc_sink != NULL)
6547 		return true;
6548 
6549 	if (old_state->content_protection == state->content_protection)
6550 		return false;
6551 
6552 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6553 		return true;
6554 
6555 	return false;
6556 }
6557 
6558 #endif
6559 static void remove_stream(struct amdgpu_device *adev,
6560 			  struct amdgpu_crtc *acrtc,
6561 			  struct dc_stream_state *stream)
6562 {
6563 	/* this is the update mode case */
6564 
6565 	acrtc->otg_inst = -1;
6566 	acrtc->enabled = false;
6567 }
6568 
6569 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6570 			       struct dc_cursor_position *position)
6571 {
6572 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6573 	int x, y;
6574 	int xorigin = 0, yorigin = 0;
6575 
6576 	position->enable = false;
6577 	position->x = 0;
6578 	position->y = 0;
6579 
6580 	if (!crtc || !plane->state->fb)
6581 		return 0;
6582 
6583 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6584 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6585 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6586 			  __func__,
6587 			  plane->state->crtc_w,
6588 			  plane->state->crtc_h);
6589 		return -EINVAL;
6590 	}
6591 
6592 	x = plane->state->crtc_x;
6593 	y = plane->state->crtc_y;
6594 
6595 	if (x <= -amdgpu_crtc->max_cursor_width ||
6596 	    y <= -amdgpu_crtc->max_cursor_height)
6597 		return 0;
6598 
6599 	if (x < 0) {
6600 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6601 		x = 0;
6602 	}
6603 	if (y < 0) {
6604 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6605 		y = 0;
6606 	}
6607 	position->enable = true;
6608 	position->translate_by_source = true;
6609 	position->x = x;
6610 	position->y = y;
6611 	position->x_hotspot = xorigin;
6612 	position->y_hotspot = yorigin;
6613 
6614 	return 0;
6615 }
6616 
6617 static void handle_cursor_update(struct drm_plane *plane,
6618 				 struct drm_plane_state *old_plane_state)
6619 {
6620 	struct amdgpu_device *adev = plane->dev->dev_private;
6621 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6622 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6623 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6624 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6625 	uint64_t address = afb ? afb->address : 0;
6626 	struct dc_cursor_position position;
6627 	struct dc_cursor_attributes attributes;
6628 	int ret;
6629 
6630 	if (!plane->state->fb && !old_plane_state->fb)
6631 		return;
6632 
6633 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6634 			 __func__,
6635 			 amdgpu_crtc->crtc_id,
6636 			 plane->state->crtc_w,
6637 			 plane->state->crtc_h);
6638 
6639 	ret = get_cursor_position(plane, crtc, &position);
6640 	if (ret)
6641 		return;
6642 
6643 	if (!position.enable) {
6644 		/* turn off cursor */
6645 		if (crtc_state && crtc_state->stream) {
6646 			mutex_lock(&adev->dm.dc_lock);
6647 			dc_stream_set_cursor_position(crtc_state->stream,
6648 						      &position);
6649 			mutex_unlock(&adev->dm.dc_lock);
6650 		}
6651 		return;
6652 	}
6653 
6654 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6655 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6656 
6657 	memset(&attributes, 0, sizeof(attributes));
6658 	attributes.address.high_part = upper_32_bits(address);
6659 	attributes.address.low_part  = lower_32_bits(address);
6660 	attributes.width             = plane->state->crtc_w;
6661 	attributes.height            = plane->state->crtc_h;
6662 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6663 	attributes.rotation_angle    = 0;
6664 	attributes.attribute_flags.value = 0;
6665 
6666 	attributes.pitch = attributes.width;
6667 
6668 	if (crtc_state->stream) {
6669 		mutex_lock(&adev->dm.dc_lock);
6670 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6671 							 &attributes))
6672 			DRM_ERROR("DC failed to set cursor attributes\n");
6673 
6674 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6675 						   &position))
6676 			DRM_ERROR("DC failed to set cursor position\n");
6677 		mutex_unlock(&adev->dm.dc_lock);
6678 	}
6679 }
6680 
6681 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6682 {
6683 
6684 	assert_spin_locked(&acrtc->base.dev->event_lock);
6685 	WARN_ON(acrtc->event);
6686 
6687 	acrtc->event = acrtc->base.state->event;
6688 
6689 	/* Set the flip status */
6690 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6691 
6692 	/* Mark this event as consumed */
6693 	acrtc->base.state->event = NULL;
6694 
6695 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6696 						 acrtc->crtc_id);
6697 }
6698 
6699 static void update_freesync_state_on_stream(
6700 	struct amdgpu_display_manager *dm,
6701 	struct dm_crtc_state *new_crtc_state,
6702 	struct dc_stream_state *new_stream,
6703 	struct dc_plane_state *surface,
6704 	u32 flip_timestamp_in_us)
6705 {
6706 	struct mod_vrr_params vrr_params;
6707 	struct dc_info_packet vrr_infopacket = {0};
6708 	struct amdgpu_device *adev = dm->adev;
6709 	unsigned long flags;
6710 
6711 	if (!new_stream)
6712 		return;
6713 
6714 	/*
6715 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6716 	 * For now it's sufficient to just guard against these conditions.
6717 	 */
6718 
6719 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6720 		return;
6721 
6722 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6723 	vrr_params = new_crtc_state->vrr_params;
6724 
6725 	if (surface) {
6726 		mod_freesync_handle_preflip(
6727 			dm->freesync_module,
6728 			surface,
6729 			new_stream,
6730 			flip_timestamp_in_us,
6731 			&vrr_params);
6732 
6733 		if (adev->family < AMDGPU_FAMILY_AI &&
6734 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6735 			mod_freesync_handle_v_update(dm->freesync_module,
6736 						     new_stream, &vrr_params);
6737 
6738 			/* Need to call this before the frame ends. */
6739 			dc_stream_adjust_vmin_vmax(dm->dc,
6740 						   new_crtc_state->stream,
6741 						   &vrr_params.adjust);
6742 		}
6743 	}
6744 
6745 	mod_freesync_build_vrr_infopacket(
6746 		dm->freesync_module,
6747 		new_stream,
6748 		&vrr_params,
6749 		PACKET_TYPE_VRR,
6750 		TRANSFER_FUNC_UNKNOWN,
6751 		&vrr_infopacket);
6752 
6753 	new_crtc_state->freesync_timing_changed |=
6754 		(memcmp(&new_crtc_state->vrr_params.adjust,
6755 			&vrr_params.adjust,
6756 			sizeof(vrr_params.adjust)) != 0);
6757 
6758 	new_crtc_state->freesync_vrr_info_changed |=
6759 		(memcmp(&new_crtc_state->vrr_infopacket,
6760 			&vrr_infopacket,
6761 			sizeof(vrr_infopacket)) != 0);
6762 
6763 	new_crtc_state->vrr_params = vrr_params;
6764 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6765 
6766 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6767 	new_stream->vrr_infopacket = vrr_infopacket;
6768 
6769 	if (new_crtc_state->freesync_vrr_info_changed)
6770 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6771 			      new_crtc_state->base.crtc->base.id,
6772 			      (int)new_crtc_state->base.vrr_enabled,
6773 			      (int)vrr_params.state);
6774 
6775 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6776 }
6777 
6778 static void pre_update_freesync_state_on_stream(
6779 	struct amdgpu_display_manager *dm,
6780 	struct dm_crtc_state *new_crtc_state)
6781 {
6782 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6783 	struct mod_vrr_params vrr_params;
6784 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6785 	struct amdgpu_device *adev = dm->adev;
6786 	unsigned long flags;
6787 
6788 	if (!new_stream)
6789 		return;
6790 
6791 	/*
6792 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6793 	 * For now it's sufficient to just guard against these conditions.
6794 	 */
6795 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6796 		return;
6797 
6798 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6799 	vrr_params = new_crtc_state->vrr_params;
6800 
6801 	if (new_crtc_state->vrr_supported &&
6802 	    config.min_refresh_in_uhz &&
6803 	    config.max_refresh_in_uhz) {
6804 		config.state = new_crtc_state->base.vrr_enabled ?
6805 			VRR_STATE_ACTIVE_VARIABLE :
6806 			VRR_STATE_INACTIVE;
6807 	} else {
6808 		config.state = VRR_STATE_UNSUPPORTED;
6809 	}
6810 
6811 	mod_freesync_build_vrr_params(dm->freesync_module,
6812 				      new_stream,
6813 				      &config, &vrr_params);
6814 
6815 	new_crtc_state->freesync_timing_changed |=
6816 		(memcmp(&new_crtc_state->vrr_params.adjust,
6817 			&vrr_params.adjust,
6818 			sizeof(vrr_params.adjust)) != 0);
6819 
6820 	new_crtc_state->vrr_params = vrr_params;
6821 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6822 }
6823 
6824 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6825 					    struct dm_crtc_state *new_state)
6826 {
6827 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6828 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6829 
6830 	if (!old_vrr_active && new_vrr_active) {
6831 		/* Transition VRR inactive -> active:
6832 		 * While VRR is active, we must not disable vblank irq, as a
6833 		 * reenable after disable would compute bogus vblank/pflip
6834 		 * timestamps if it likely happened inside display front-porch.
6835 		 *
6836 		 * We also need vupdate irq for the actual core vblank handling
6837 		 * at end of vblank.
6838 		 */
6839 		dm_set_vupdate_irq(new_state->base.crtc, true);
6840 		drm_crtc_vblank_get(new_state->base.crtc);
6841 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6842 				 __func__, new_state->base.crtc->base.id);
6843 	} else if (old_vrr_active && !new_vrr_active) {
6844 		/* Transition VRR active -> inactive:
6845 		 * Allow vblank irq disable again for fixed refresh rate.
6846 		 */
6847 		dm_set_vupdate_irq(new_state->base.crtc, false);
6848 		drm_crtc_vblank_put(new_state->base.crtc);
6849 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6850 				 __func__, new_state->base.crtc->base.id);
6851 	}
6852 }
6853 
6854 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6855 {
6856 	struct drm_plane *plane;
6857 	struct drm_plane_state *old_plane_state, *new_plane_state;
6858 	int i;
6859 
6860 	/*
6861 	 * TODO: Make this per-stream so we don't issue redundant updates for
6862 	 * commits with multiple streams.
6863 	 */
6864 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6865 				       new_plane_state, i)
6866 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6867 			handle_cursor_update(plane, old_plane_state);
6868 }
6869 
6870 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6871 				    struct dc_state *dc_state,
6872 				    struct drm_device *dev,
6873 				    struct amdgpu_display_manager *dm,
6874 				    struct drm_crtc *pcrtc,
6875 				    bool wait_for_vblank)
6876 {
6877 	uint32_t i;
6878 	uint64_t timestamp_ns;
6879 	struct drm_plane *plane;
6880 	struct drm_plane_state *old_plane_state, *new_plane_state;
6881 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6882 	struct drm_crtc_state *new_pcrtc_state =
6883 			drm_atomic_get_new_crtc_state(state, pcrtc);
6884 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6885 	struct dm_crtc_state *dm_old_crtc_state =
6886 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6887 	int planes_count = 0, vpos, hpos;
6888 	long r;
6889 	unsigned long flags;
6890 	struct amdgpu_bo *abo;
6891 	uint64_t tiling_flags;
6892 	bool tmz_surface = false;
6893 	uint32_t target_vblank, last_flip_vblank;
6894 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6895 	bool pflip_present = false;
6896 	struct {
6897 		struct dc_surface_update surface_updates[MAX_SURFACES];
6898 		struct dc_plane_info plane_infos[MAX_SURFACES];
6899 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6900 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6901 		struct dc_stream_update stream_update;
6902 	} *bundle;
6903 
6904 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6905 
6906 	if (!bundle) {
6907 		dm_error("Failed to allocate update bundle\n");
6908 		goto cleanup;
6909 	}
6910 
6911 	/*
6912 	 * Disable the cursor first if we're disabling all the planes.
6913 	 * It'll remain on the screen after the planes are re-enabled
6914 	 * if we don't.
6915 	 */
6916 	if (acrtc_state->active_planes == 0)
6917 		amdgpu_dm_commit_cursors(state);
6918 
6919 	/* update planes when needed */
6920 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6921 		struct drm_crtc *crtc = new_plane_state->crtc;
6922 		struct drm_crtc_state *new_crtc_state;
6923 		struct drm_framebuffer *fb = new_plane_state->fb;
6924 		bool plane_needs_flip;
6925 		struct dc_plane_state *dc_plane;
6926 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6927 
6928 		/* Cursor plane is handled after stream updates */
6929 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6930 			continue;
6931 
6932 		if (!fb || !crtc || pcrtc != crtc)
6933 			continue;
6934 
6935 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6936 		if (!new_crtc_state->active)
6937 			continue;
6938 
6939 		dc_plane = dm_new_plane_state->dc_state;
6940 
6941 		bundle->surface_updates[planes_count].surface = dc_plane;
6942 		if (new_pcrtc_state->color_mgmt_changed) {
6943 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6944 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6945 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6946 		}
6947 
6948 		fill_dc_scaling_info(new_plane_state,
6949 				     &bundle->scaling_infos[planes_count]);
6950 
6951 		bundle->surface_updates[planes_count].scaling_info =
6952 			&bundle->scaling_infos[planes_count];
6953 
6954 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6955 
6956 		pflip_present = pflip_present || plane_needs_flip;
6957 
6958 		if (!plane_needs_flip) {
6959 			planes_count += 1;
6960 			continue;
6961 		}
6962 
6963 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6964 
6965 		/*
6966 		 * Wait for all fences on this FB. Do limited wait to avoid
6967 		 * deadlock during GPU reset when this fence will not signal
6968 		 * but we hold reservation lock for the BO.
6969 		 */
6970 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6971 							false,
6972 							msecs_to_jiffies(5000));
6973 		if (unlikely(r <= 0))
6974 			DRM_ERROR("Waiting for fences timed out!");
6975 
6976 		/*
6977 		 * TODO This might fail and hence better not used, wait
6978 		 * explicitly on fences instead
6979 		 * and in general should be called for
6980 		 * blocking commit to as per framework helpers
6981 		 */
6982 		r = amdgpu_bo_reserve(abo, true);
6983 		if (unlikely(r != 0))
6984 			DRM_ERROR("failed to reserve buffer before flip\n");
6985 
6986 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6987 
6988 		tmz_surface = amdgpu_bo_encrypted(abo);
6989 
6990 		amdgpu_bo_unreserve(abo);
6991 
6992 		fill_dc_plane_info_and_addr(
6993 			dm->adev, new_plane_state, tiling_flags,
6994 			&bundle->plane_infos[planes_count],
6995 			&bundle->flip_addrs[planes_count].address,
6996 			tmz_surface,
6997 			false);
6998 
6999 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7000 				 new_plane_state->plane->index,
7001 				 bundle->plane_infos[planes_count].dcc.enable);
7002 
7003 		bundle->surface_updates[planes_count].plane_info =
7004 			&bundle->plane_infos[planes_count];
7005 
7006 		/*
7007 		 * Only allow immediate flips for fast updates that don't
7008 		 * change FB pitch, DCC state, rotation or mirroing.
7009 		 */
7010 		bundle->flip_addrs[planes_count].flip_immediate =
7011 			crtc->state->async_flip &&
7012 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7013 
7014 		timestamp_ns = ktime_get_ns();
7015 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7016 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7017 		bundle->surface_updates[planes_count].surface = dc_plane;
7018 
7019 		if (!bundle->surface_updates[planes_count].surface) {
7020 			DRM_ERROR("No surface for CRTC: id=%d\n",
7021 					acrtc_attach->crtc_id);
7022 			continue;
7023 		}
7024 
7025 		if (plane == pcrtc->primary)
7026 			update_freesync_state_on_stream(
7027 				dm,
7028 				acrtc_state,
7029 				acrtc_state->stream,
7030 				dc_plane,
7031 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7032 
7033 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7034 				 __func__,
7035 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7036 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7037 
7038 		planes_count += 1;
7039 
7040 	}
7041 
7042 	if (pflip_present) {
7043 		if (!vrr_active) {
7044 			/* Use old throttling in non-vrr fixed refresh rate mode
7045 			 * to keep flip scheduling based on target vblank counts
7046 			 * working in a backwards compatible way, e.g., for
7047 			 * clients using the GLX_OML_sync_control extension or
7048 			 * DRI3/Present extension with defined target_msc.
7049 			 */
7050 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7051 		}
7052 		else {
7053 			/* For variable refresh rate mode only:
7054 			 * Get vblank of last completed flip to avoid > 1 vrr
7055 			 * flips per video frame by use of throttling, but allow
7056 			 * flip programming anywhere in the possibly large
7057 			 * variable vrr vblank interval for fine-grained flip
7058 			 * timing control and more opportunity to avoid stutter
7059 			 * on late submission of flips.
7060 			 */
7061 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7062 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7063 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7064 		}
7065 
7066 		target_vblank = last_flip_vblank + wait_for_vblank;
7067 
7068 		/*
7069 		 * Wait until we're out of the vertical blank period before the one
7070 		 * targeted by the flip
7071 		 */
7072 		while ((acrtc_attach->enabled &&
7073 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7074 							    0, &vpos, &hpos, NULL,
7075 							    NULL, &pcrtc->hwmode)
7076 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7077 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7078 			(int)(target_vblank -
7079 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7080 			usleep_range(1000, 1100);
7081 		}
7082 
7083 		/**
7084 		 * Prepare the flip event for the pageflip interrupt to handle.
7085 		 *
7086 		 * This only works in the case where we've already turned on the
7087 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7088 		 * from 0 -> n planes we have to skip a hardware generated event
7089 		 * and rely on sending it from software.
7090 		 */
7091 		if (acrtc_attach->base.state->event &&
7092 		    acrtc_state->active_planes > 0) {
7093 			drm_crtc_vblank_get(pcrtc);
7094 
7095 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7096 
7097 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7098 			prepare_flip_isr(acrtc_attach);
7099 
7100 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7101 		}
7102 
7103 		if (acrtc_state->stream) {
7104 			if (acrtc_state->freesync_vrr_info_changed)
7105 				bundle->stream_update.vrr_infopacket =
7106 					&acrtc_state->stream->vrr_infopacket;
7107 		}
7108 	}
7109 
7110 	/* Update the planes if changed or disable if we don't have any. */
7111 	if ((planes_count || acrtc_state->active_planes == 0) &&
7112 		acrtc_state->stream) {
7113 		bundle->stream_update.stream = acrtc_state->stream;
7114 		if (new_pcrtc_state->mode_changed) {
7115 			bundle->stream_update.src = acrtc_state->stream->src;
7116 			bundle->stream_update.dst = acrtc_state->stream->dst;
7117 		}
7118 
7119 		if (new_pcrtc_state->color_mgmt_changed) {
7120 			/*
7121 			 * TODO: This isn't fully correct since we've actually
7122 			 * already modified the stream in place.
7123 			 */
7124 			bundle->stream_update.gamut_remap =
7125 				&acrtc_state->stream->gamut_remap_matrix;
7126 			bundle->stream_update.output_csc_transform =
7127 				&acrtc_state->stream->csc_color_matrix;
7128 			bundle->stream_update.out_transfer_func =
7129 				acrtc_state->stream->out_transfer_func;
7130 		}
7131 
7132 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7133 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7134 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7135 
7136 		/*
7137 		 * If FreeSync state on the stream has changed then we need to
7138 		 * re-adjust the min/max bounds now that DC doesn't handle this
7139 		 * as part of commit.
7140 		 */
7141 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7142 		    amdgpu_dm_vrr_active(acrtc_state)) {
7143 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7144 			dc_stream_adjust_vmin_vmax(
7145 				dm->dc, acrtc_state->stream,
7146 				&acrtc_state->vrr_params.adjust);
7147 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7148 		}
7149 		mutex_lock(&dm->dc_lock);
7150 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7151 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7152 			amdgpu_dm_psr_disable(acrtc_state->stream);
7153 
7154 		dc_commit_updates_for_stream(dm->dc,
7155 						     bundle->surface_updates,
7156 						     planes_count,
7157 						     acrtc_state->stream,
7158 						     &bundle->stream_update,
7159 						     dc_state);
7160 
7161 		/**
7162 		 * Enable or disable the interrupts on the backend.
7163 		 *
7164 		 * Most pipes are put into power gating when unused.
7165 		 *
7166 		 * When power gating is enabled on a pipe we lose the
7167 		 * interrupt enablement state when power gating is disabled.
7168 		 *
7169 		 * So we need to update the IRQ control state in hardware
7170 		 * whenever the pipe turns on (since it could be previously
7171 		 * power gated) or off (since some pipes can't be power gated
7172 		 * on some ASICs).
7173 		 */
7174 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7175 			dm_update_pflip_irq_state(
7176 				(struct amdgpu_device *)dev->dev_private,
7177 				acrtc_attach);
7178 
7179 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7180 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7181 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7182 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7183 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7184 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7185 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7186 			amdgpu_dm_psr_enable(acrtc_state->stream);
7187 		}
7188 
7189 		mutex_unlock(&dm->dc_lock);
7190 	}
7191 
7192 	/*
7193 	 * Update cursor state *after* programming all the planes.
7194 	 * This avoids redundant programming in the case where we're going
7195 	 * to be disabling a single plane - those pipes are being disabled.
7196 	 */
7197 	if (acrtc_state->active_planes)
7198 		amdgpu_dm_commit_cursors(state);
7199 
7200 cleanup:
7201 	kfree(bundle);
7202 }
7203 
7204 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7205 				   struct drm_atomic_state *state)
7206 {
7207 	struct amdgpu_device *adev = dev->dev_private;
7208 	struct amdgpu_dm_connector *aconnector;
7209 	struct drm_connector *connector;
7210 	struct drm_connector_state *old_con_state, *new_con_state;
7211 	struct drm_crtc_state *new_crtc_state;
7212 	struct dm_crtc_state *new_dm_crtc_state;
7213 	const struct dc_stream_status *status;
7214 	int i, inst;
7215 
7216 	/* Notify device removals. */
7217 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7218 		if (old_con_state->crtc != new_con_state->crtc) {
7219 			/* CRTC changes require notification. */
7220 			goto notify;
7221 		}
7222 
7223 		if (!new_con_state->crtc)
7224 			continue;
7225 
7226 		new_crtc_state = drm_atomic_get_new_crtc_state(
7227 			state, new_con_state->crtc);
7228 
7229 		if (!new_crtc_state)
7230 			continue;
7231 
7232 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7233 			continue;
7234 
7235 	notify:
7236 		aconnector = to_amdgpu_dm_connector(connector);
7237 
7238 		mutex_lock(&adev->dm.audio_lock);
7239 		inst = aconnector->audio_inst;
7240 		aconnector->audio_inst = -1;
7241 		mutex_unlock(&adev->dm.audio_lock);
7242 
7243 		amdgpu_dm_audio_eld_notify(adev, inst);
7244 	}
7245 
7246 	/* Notify audio device additions. */
7247 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7248 		if (!new_con_state->crtc)
7249 			continue;
7250 
7251 		new_crtc_state = drm_atomic_get_new_crtc_state(
7252 			state, new_con_state->crtc);
7253 
7254 		if (!new_crtc_state)
7255 			continue;
7256 
7257 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7258 			continue;
7259 
7260 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7261 		if (!new_dm_crtc_state->stream)
7262 			continue;
7263 
7264 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7265 		if (!status)
7266 			continue;
7267 
7268 		aconnector = to_amdgpu_dm_connector(connector);
7269 
7270 		mutex_lock(&adev->dm.audio_lock);
7271 		inst = status->audio_inst;
7272 		aconnector->audio_inst = inst;
7273 		mutex_unlock(&adev->dm.audio_lock);
7274 
7275 		amdgpu_dm_audio_eld_notify(adev, inst);
7276 	}
7277 }
7278 
7279 /*
7280  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7281  * @crtc_state: the DRM CRTC state
7282  * @stream_state: the DC stream state.
7283  *
7284  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7285  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7286  */
7287 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7288 						struct dc_stream_state *stream_state)
7289 {
7290 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7291 }
7292 
7293 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7294 				   struct drm_atomic_state *state,
7295 				   bool nonblock)
7296 {
7297 	struct drm_crtc *crtc;
7298 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7299 	struct amdgpu_device *adev = dev->dev_private;
7300 	int i;
7301 
7302 	/*
7303 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7304 	 * a modeset, being disabled, or have no active planes.
7305 	 *
7306 	 * It's done in atomic commit rather than commit tail for now since
7307 	 * some of these interrupt handlers access the current CRTC state and
7308 	 * potentially the stream pointer itself.
7309 	 *
7310 	 * Since the atomic state is swapped within atomic commit and not within
7311 	 * commit tail this would leave to new state (that hasn't been committed yet)
7312 	 * being accesssed from within the handlers.
7313 	 *
7314 	 * TODO: Fix this so we can do this in commit tail and not have to block
7315 	 * in atomic check.
7316 	 */
7317 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7318 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7319 
7320 		if (old_crtc_state->active &&
7321 		    (!new_crtc_state->active ||
7322 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7323 			manage_dm_interrupts(adev, acrtc, false);
7324 	}
7325 	/*
7326 	 * Add check here for SoC's that support hardware cursor plane, to
7327 	 * unset legacy_cursor_update
7328 	 */
7329 
7330 	return drm_atomic_helper_commit(dev, state, nonblock);
7331 
7332 	/*TODO Handle EINTR, reenable IRQ*/
7333 }
7334 
7335 /**
7336  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7337  * @state: The atomic state to commit
7338  *
7339  * This will tell DC to commit the constructed DC state from atomic_check,
7340  * programming the hardware. Any failures here implies a hardware failure, since
7341  * atomic check should have filtered anything non-kosher.
7342  */
7343 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7344 {
7345 	struct drm_device *dev = state->dev;
7346 	struct amdgpu_device *adev = dev->dev_private;
7347 	struct amdgpu_display_manager *dm = &adev->dm;
7348 	struct dm_atomic_state *dm_state;
7349 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7350 	uint32_t i, j;
7351 	struct drm_crtc *crtc;
7352 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7353 	unsigned long flags;
7354 	bool wait_for_vblank = true;
7355 	struct drm_connector *connector;
7356 	struct drm_connector_state *old_con_state, *new_con_state;
7357 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7358 	int crtc_disable_count = 0;
7359 
7360 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7361 
7362 	dm_state = dm_atomic_get_new_state(state);
7363 	if (dm_state && dm_state->context) {
7364 		dc_state = dm_state->context;
7365 	} else {
7366 		/* No state changes, retain current state. */
7367 		dc_state_temp = dc_create_state(dm->dc);
7368 		ASSERT(dc_state_temp);
7369 		dc_state = dc_state_temp;
7370 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7371 	}
7372 
7373 	/* update changed items */
7374 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7375 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7376 
7377 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7378 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7379 
7380 		DRM_DEBUG_DRIVER(
7381 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7382 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7383 			"connectors_changed:%d\n",
7384 			acrtc->crtc_id,
7385 			new_crtc_state->enable,
7386 			new_crtc_state->active,
7387 			new_crtc_state->planes_changed,
7388 			new_crtc_state->mode_changed,
7389 			new_crtc_state->active_changed,
7390 			new_crtc_state->connectors_changed);
7391 
7392 		/* Copy all transient state flags into dc state */
7393 		if (dm_new_crtc_state->stream) {
7394 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7395 							    dm_new_crtc_state->stream);
7396 		}
7397 
7398 		/* handles headless hotplug case, updating new_state and
7399 		 * aconnector as needed
7400 		 */
7401 
7402 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7403 
7404 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7405 
7406 			if (!dm_new_crtc_state->stream) {
7407 				/*
7408 				 * this could happen because of issues with
7409 				 * userspace notifications delivery.
7410 				 * In this case userspace tries to set mode on
7411 				 * display which is disconnected in fact.
7412 				 * dc_sink is NULL in this case on aconnector.
7413 				 * We expect reset mode will come soon.
7414 				 *
7415 				 * This can also happen when unplug is done
7416 				 * during resume sequence ended
7417 				 *
7418 				 * In this case, we want to pretend we still
7419 				 * have a sink to keep the pipe running so that
7420 				 * hw state is consistent with the sw state
7421 				 */
7422 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7423 						__func__, acrtc->base.base.id);
7424 				continue;
7425 			}
7426 
7427 			if (dm_old_crtc_state->stream)
7428 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7429 
7430 			pm_runtime_get_noresume(dev->dev);
7431 
7432 			acrtc->enabled = true;
7433 			acrtc->hw_mode = new_crtc_state->mode;
7434 			crtc->hwmode = new_crtc_state->mode;
7435 		} else if (modereset_required(new_crtc_state)) {
7436 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7437 			/* i.e. reset mode */
7438 			if (dm_old_crtc_state->stream) {
7439 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7440 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7441 
7442 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7443 			}
7444 		}
7445 	} /* for_each_crtc_in_state() */
7446 
7447 	if (dc_state) {
7448 		dm_enable_per_frame_crtc_master_sync(dc_state);
7449 		mutex_lock(&dm->dc_lock);
7450 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7451 		mutex_unlock(&dm->dc_lock);
7452 	}
7453 
7454 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7455 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7456 
7457 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7458 
7459 		if (dm_new_crtc_state->stream != NULL) {
7460 			const struct dc_stream_status *status =
7461 					dc_stream_get_status(dm_new_crtc_state->stream);
7462 
7463 			if (!status)
7464 				status = dc_stream_get_status_from_state(dc_state,
7465 									 dm_new_crtc_state->stream);
7466 
7467 			if (!status)
7468 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7469 			else
7470 				acrtc->otg_inst = status->primary_otg_inst;
7471 		}
7472 	}
7473 #ifdef CONFIG_DRM_AMD_DC_HDCP
7474 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7475 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7476 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7477 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7478 
7479 		new_crtc_state = NULL;
7480 
7481 		if (acrtc)
7482 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7483 
7484 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7485 
7486 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7487 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7488 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7489 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7490 			continue;
7491 		}
7492 
7493 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7494 			hdcp_update_display(
7495 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7496 				new_con_state->hdcp_content_type,
7497 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7498 													 : false);
7499 	}
7500 #endif
7501 
7502 	/* Handle connector state changes */
7503 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7504 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7505 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7506 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7507 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7508 		struct dc_stream_update stream_update;
7509 		struct dc_info_packet hdr_packet;
7510 		struct dc_stream_status *status = NULL;
7511 		bool abm_changed, hdr_changed, scaling_changed;
7512 
7513 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7514 		memset(&stream_update, 0, sizeof(stream_update));
7515 
7516 		if (acrtc) {
7517 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7518 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7519 		}
7520 
7521 		/* Skip any modesets/resets */
7522 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7523 			continue;
7524 
7525 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7526 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7527 
7528 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7529 							     dm_old_con_state);
7530 
7531 		abm_changed = dm_new_crtc_state->abm_level !=
7532 			      dm_old_crtc_state->abm_level;
7533 
7534 		hdr_changed =
7535 			is_hdr_metadata_different(old_con_state, new_con_state);
7536 
7537 		if (!scaling_changed && !abm_changed && !hdr_changed)
7538 			continue;
7539 
7540 		stream_update.stream = dm_new_crtc_state->stream;
7541 		if (scaling_changed) {
7542 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7543 					dm_new_con_state, dm_new_crtc_state->stream);
7544 
7545 			stream_update.src = dm_new_crtc_state->stream->src;
7546 			stream_update.dst = dm_new_crtc_state->stream->dst;
7547 		}
7548 
7549 		if (abm_changed) {
7550 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7551 
7552 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7553 		}
7554 
7555 		if (hdr_changed) {
7556 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7557 			stream_update.hdr_static_metadata = &hdr_packet;
7558 		}
7559 
7560 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7561 		WARN_ON(!status);
7562 		WARN_ON(!status->plane_count);
7563 
7564 		/*
7565 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7566 		 * Here we create an empty update on each plane.
7567 		 * To fix this, DC should permit updating only stream properties.
7568 		 */
7569 		for (j = 0; j < status->plane_count; j++)
7570 			dummy_updates[j].surface = status->plane_states[0];
7571 
7572 
7573 		mutex_lock(&dm->dc_lock);
7574 		dc_commit_updates_for_stream(dm->dc,
7575 						     dummy_updates,
7576 						     status->plane_count,
7577 						     dm_new_crtc_state->stream,
7578 						     &stream_update,
7579 						     dc_state);
7580 		mutex_unlock(&dm->dc_lock);
7581 	}
7582 
7583 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7584 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7585 				      new_crtc_state, i) {
7586 		if (old_crtc_state->active && !new_crtc_state->active)
7587 			crtc_disable_count++;
7588 
7589 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7590 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7591 
7592 		/* Update freesync active state. */
7593 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7594 
7595 		/* Handle vrr on->off / off->on transitions */
7596 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7597 						dm_new_crtc_state);
7598 	}
7599 
7600 	/**
7601 	 * Enable interrupts for CRTCs that are newly enabled or went through
7602 	 * a modeset. It was intentionally deferred until after the front end
7603 	 * state was modified to wait until the OTG was on and so the IRQ
7604 	 * handlers didn't access stale or invalid state.
7605 	 */
7606 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7607 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7608 
7609 		if (new_crtc_state->active &&
7610 		    (!old_crtc_state->active ||
7611 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7612 			manage_dm_interrupts(adev, acrtc, true);
7613 #ifdef CONFIG_DEBUG_FS
7614 			/**
7615 			 * Frontend may have changed so reapply the CRC capture
7616 			 * settings for the stream.
7617 			 */
7618 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7619 
7620 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7621 				amdgpu_dm_crtc_configure_crc_source(
7622 					crtc, dm_new_crtc_state,
7623 					dm_new_crtc_state->crc_src);
7624 			}
7625 #endif
7626 		}
7627 	}
7628 
7629 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7630 		if (new_crtc_state->async_flip)
7631 			wait_for_vblank = false;
7632 
7633 	/* update planes when needed per crtc*/
7634 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7635 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7636 
7637 		if (dm_new_crtc_state->stream)
7638 			amdgpu_dm_commit_planes(state, dc_state, dev,
7639 						dm, crtc, wait_for_vblank);
7640 	}
7641 
7642 	/* Update audio instances for each connector. */
7643 	amdgpu_dm_commit_audio(dev, state);
7644 
7645 	/*
7646 	 * send vblank event on all events not handled in flip and
7647 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7648 	 */
7649 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7650 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7651 
7652 		if (new_crtc_state->event)
7653 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7654 
7655 		new_crtc_state->event = NULL;
7656 	}
7657 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7658 
7659 	/* Signal HW programming completion */
7660 	drm_atomic_helper_commit_hw_done(state);
7661 
7662 	if (wait_for_vblank)
7663 		drm_atomic_helper_wait_for_flip_done(dev, state);
7664 
7665 	drm_atomic_helper_cleanup_planes(dev, state);
7666 
7667 	/*
7668 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7669 	 * so we can put the GPU into runtime suspend if we're not driving any
7670 	 * displays anymore
7671 	 */
7672 	for (i = 0; i < crtc_disable_count; i++)
7673 		pm_runtime_put_autosuspend(dev->dev);
7674 	pm_runtime_mark_last_busy(dev->dev);
7675 
7676 	if (dc_state_temp)
7677 		dc_release_state(dc_state_temp);
7678 }
7679 
7680 
7681 static int dm_force_atomic_commit(struct drm_connector *connector)
7682 {
7683 	int ret = 0;
7684 	struct drm_device *ddev = connector->dev;
7685 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7686 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7687 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7688 	struct drm_connector_state *conn_state;
7689 	struct drm_crtc_state *crtc_state;
7690 	struct drm_plane_state *plane_state;
7691 
7692 	if (!state)
7693 		return -ENOMEM;
7694 
7695 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7696 
7697 	/* Construct an atomic state to restore previous display setting */
7698 
7699 	/*
7700 	 * Attach connectors to drm_atomic_state
7701 	 */
7702 	conn_state = drm_atomic_get_connector_state(state, connector);
7703 
7704 	ret = PTR_ERR_OR_ZERO(conn_state);
7705 	if (ret)
7706 		goto err;
7707 
7708 	/* Attach crtc to drm_atomic_state*/
7709 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7710 
7711 	ret = PTR_ERR_OR_ZERO(crtc_state);
7712 	if (ret)
7713 		goto err;
7714 
7715 	/* force a restore */
7716 	crtc_state->mode_changed = true;
7717 
7718 	/* Attach plane to drm_atomic_state */
7719 	plane_state = drm_atomic_get_plane_state(state, plane);
7720 
7721 	ret = PTR_ERR_OR_ZERO(plane_state);
7722 	if (ret)
7723 		goto err;
7724 
7725 
7726 	/* Call commit internally with the state we just constructed */
7727 	ret = drm_atomic_commit(state);
7728 	if (!ret)
7729 		return 0;
7730 
7731 err:
7732 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7733 	drm_atomic_state_put(state);
7734 
7735 	return ret;
7736 }
7737 
7738 /*
7739  * This function handles all cases when set mode does not come upon hotplug.
7740  * This includes when a display is unplugged then plugged back into the
7741  * same port and when running without usermode desktop manager supprot
7742  */
7743 void dm_restore_drm_connector_state(struct drm_device *dev,
7744 				    struct drm_connector *connector)
7745 {
7746 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7747 	struct amdgpu_crtc *disconnected_acrtc;
7748 	struct dm_crtc_state *acrtc_state;
7749 
7750 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7751 		return;
7752 
7753 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7754 	if (!disconnected_acrtc)
7755 		return;
7756 
7757 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7758 	if (!acrtc_state->stream)
7759 		return;
7760 
7761 	/*
7762 	 * If the previous sink is not released and different from the current,
7763 	 * we deduce we are in a state where we can not rely on usermode call
7764 	 * to turn on the display, so we do it here
7765 	 */
7766 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7767 		dm_force_atomic_commit(&aconnector->base);
7768 }
7769 
7770 /*
7771  * Grabs all modesetting locks to serialize against any blocking commits,
7772  * Waits for completion of all non blocking commits.
7773  */
7774 static int do_aquire_global_lock(struct drm_device *dev,
7775 				 struct drm_atomic_state *state)
7776 {
7777 	struct drm_crtc *crtc;
7778 	struct drm_crtc_commit *commit;
7779 	long ret;
7780 
7781 	/*
7782 	 * Adding all modeset locks to aquire_ctx will
7783 	 * ensure that when the framework release it the
7784 	 * extra locks we are locking here will get released to
7785 	 */
7786 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7787 	if (ret)
7788 		return ret;
7789 
7790 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7791 		spin_lock(&crtc->commit_lock);
7792 		commit = list_first_entry_or_null(&crtc->commit_list,
7793 				struct drm_crtc_commit, commit_entry);
7794 		if (commit)
7795 			drm_crtc_commit_get(commit);
7796 		spin_unlock(&crtc->commit_lock);
7797 
7798 		if (!commit)
7799 			continue;
7800 
7801 		/*
7802 		 * Make sure all pending HW programming completed and
7803 		 * page flips done
7804 		 */
7805 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7806 
7807 		if (ret > 0)
7808 			ret = wait_for_completion_interruptible_timeout(
7809 					&commit->flip_done, 10*HZ);
7810 
7811 		if (ret == 0)
7812 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7813 				  "timed out\n", crtc->base.id, crtc->name);
7814 
7815 		drm_crtc_commit_put(commit);
7816 	}
7817 
7818 	return ret < 0 ? ret : 0;
7819 }
7820 
7821 static void get_freesync_config_for_crtc(
7822 	struct dm_crtc_state *new_crtc_state,
7823 	struct dm_connector_state *new_con_state)
7824 {
7825 	struct mod_freesync_config config = {0};
7826 	struct amdgpu_dm_connector *aconnector =
7827 			to_amdgpu_dm_connector(new_con_state->base.connector);
7828 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7829 	int vrefresh = drm_mode_vrefresh(mode);
7830 
7831 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7832 					vrefresh >= aconnector->min_vfreq &&
7833 					vrefresh <= aconnector->max_vfreq;
7834 
7835 	if (new_crtc_state->vrr_supported) {
7836 		new_crtc_state->stream->ignore_msa_timing_param = true;
7837 		config.state = new_crtc_state->base.vrr_enabled ?
7838 				VRR_STATE_ACTIVE_VARIABLE :
7839 				VRR_STATE_INACTIVE;
7840 		config.min_refresh_in_uhz =
7841 				aconnector->min_vfreq * 1000000;
7842 		config.max_refresh_in_uhz =
7843 				aconnector->max_vfreq * 1000000;
7844 		config.vsif_supported = true;
7845 		config.btr = true;
7846 	}
7847 
7848 	new_crtc_state->freesync_config = config;
7849 }
7850 
7851 static void reset_freesync_config_for_crtc(
7852 	struct dm_crtc_state *new_crtc_state)
7853 {
7854 	new_crtc_state->vrr_supported = false;
7855 
7856 	memset(&new_crtc_state->vrr_params, 0,
7857 	       sizeof(new_crtc_state->vrr_params));
7858 	memset(&new_crtc_state->vrr_infopacket, 0,
7859 	       sizeof(new_crtc_state->vrr_infopacket));
7860 }
7861 
7862 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7863 				struct drm_atomic_state *state,
7864 				struct drm_crtc *crtc,
7865 				struct drm_crtc_state *old_crtc_state,
7866 				struct drm_crtc_state *new_crtc_state,
7867 				bool enable,
7868 				bool *lock_and_validation_needed)
7869 {
7870 	struct dm_atomic_state *dm_state = NULL;
7871 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7872 	struct dc_stream_state *new_stream;
7873 	int ret = 0;
7874 
7875 	/*
7876 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7877 	 * update changed items
7878 	 */
7879 	struct amdgpu_crtc *acrtc = NULL;
7880 	struct amdgpu_dm_connector *aconnector = NULL;
7881 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7882 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7883 
7884 	new_stream = NULL;
7885 
7886 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7887 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7888 	acrtc = to_amdgpu_crtc(crtc);
7889 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7890 
7891 	/* TODO This hack should go away */
7892 	if (aconnector && enable) {
7893 		/* Make sure fake sink is created in plug-in scenario */
7894 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7895 							    &aconnector->base);
7896 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7897 							    &aconnector->base);
7898 
7899 		if (IS_ERR(drm_new_conn_state)) {
7900 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7901 			goto fail;
7902 		}
7903 
7904 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7905 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7906 
7907 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7908 			goto skip_modeset;
7909 
7910 		new_stream = create_validate_stream_for_sink(aconnector,
7911 							     &new_crtc_state->mode,
7912 							     dm_new_conn_state,
7913 							     dm_old_crtc_state->stream);
7914 
7915 		/*
7916 		 * we can have no stream on ACTION_SET if a display
7917 		 * was disconnected during S3, in this case it is not an
7918 		 * error, the OS will be updated after detection, and
7919 		 * will do the right thing on next atomic commit
7920 		 */
7921 
7922 		if (!new_stream) {
7923 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7924 					__func__, acrtc->base.base.id);
7925 			ret = -ENOMEM;
7926 			goto fail;
7927 		}
7928 
7929 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7930 
7931 		ret = fill_hdr_info_packet(drm_new_conn_state,
7932 					   &new_stream->hdr_static_metadata);
7933 		if (ret)
7934 			goto fail;
7935 
7936 		/*
7937 		 * If we already removed the old stream from the context
7938 		 * (and set the new stream to NULL) then we can't reuse
7939 		 * the old stream even if the stream and scaling are unchanged.
7940 		 * We'll hit the BUG_ON and black screen.
7941 		 *
7942 		 * TODO: Refactor this function to allow this check to work
7943 		 * in all conditions.
7944 		 */
7945 		if (dm_new_crtc_state->stream &&
7946 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7947 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7948 			new_crtc_state->mode_changed = false;
7949 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7950 					 new_crtc_state->mode_changed);
7951 		}
7952 	}
7953 
7954 	/* mode_changed flag may get updated above, need to check again */
7955 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7956 		goto skip_modeset;
7957 
7958 	DRM_DEBUG_DRIVER(
7959 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7960 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7961 		"connectors_changed:%d\n",
7962 		acrtc->crtc_id,
7963 		new_crtc_state->enable,
7964 		new_crtc_state->active,
7965 		new_crtc_state->planes_changed,
7966 		new_crtc_state->mode_changed,
7967 		new_crtc_state->active_changed,
7968 		new_crtc_state->connectors_changed);
7969 
7970 	/* Remove stream for any changed/disabled CRTC */
7971 	if (!enable) {
7972 
7973 		if (!dm_old_crtc_state->stream)
7974 			goto skip_modeset;
7975 
7976 		ret = dm_atomic_get_state(state, &dm_state);
7977 		if (ret)
7978 			goto fail;
7979 
7980 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7981 				crtc->base.id);
7982 
7983 		/* i.e. reset mode */
7984 		if (dc_remove_stream_from_ctx(
7985 				dm->dc,
7986 				dm_state->context,
7987 				dm_old_crtc_state->stream) != DC_OK) {
7988 			ret = -EINVAL;
7989 			goto fail;
7990 		}
7991 
7992 		dc_stream_release(dm_old_crtc_state->stream);
7993 		dm_new_crtc_state->stream = NULL;
7994 
7995 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7996 
7997 		*lock_and_validation_needed = true;
7998 
7999 	} else {/* Add stream for any updated/enabled CRTC */
8000 		/*
8001 		 * Quick fix to prevent NULL pointer on new_stream when
8002 		 * added MST connectors not found in existing crtc_state in the chained mode
8003 		 * TODO: need to dig out the root cause of that
8004 		 */
8005 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8006 			goto skip_modeset;
8007 
8008 		if (modereset_required(new_crtc_state))
8009 			goto skip_modeset;
8010 
8011 		if (modeset_required(new_crtc_state, new_stream,
8012 				     dm_old_crtc_state->stream)) {
8013 
8014 			WARN_ON(dm_new_crtc_state->stream);
8015 
8016 			ret = dm_atomic_get_state(state, &dm_state);
8017 			if (ret)
8018 				goto fail;
8019 
8020 			dm_new_crtc_state->stream = new_stream;
8021 
8022 			dc_stream_retain(new_stream);
8023 
8024 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8025 						crtc->base.id);
8026 
8027 			if (dc_add_stream_to_ctx(
8028 					dm->dc,
8029 					dm_state->context,
8030 					dm_new_crtc_state->stream) != DC_OK) {
8031 				ret = -EINVAL;
8032 				goto fail;
8033 			}
8034 
8035 			*lock_and_validation_needed = true;
8036 		}
8037 	}
8038 
8039 skip_modeset:
8040 	/* Release extra reference */
8041 	if (new_stream)
8042 		 dc_stream_release(new_stream);
8043 
8044 	/*
8045 	 * We want to do dc stream updates that do not require a
8046 	 * full modeset below.
8047 	 */
8048 	if (!(enable && aconnector && new_crtc_state->enable &&
8049 	      new_crtc_state->active))
8050 		return 0;
8051 	/*
8052 	 * Given above conditions, the dc state cannot be NULL because:
8053 	 * 1. We're in the process of enabling CRTCs (just been added
8054 	 *    to the dc context, or already is on the context)
8055 	 * 2. Has a valid connector attached, and
8056 	 * 3. Is currently active and enabled.
8057 	 * => The dc stream state currently exists.
8058 	 */
8059 	BUG_ON(dm_new_crtc_state->stream == NULL);
8060 
8061 	/* Scaling or underscan settings */
8062 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8063 		update_stream_scaling_settings(
8064 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8065 
8066 	/* ABM settings */
8067 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8068 
8069 	/*
8070 	 * Color management settings. We also update color properties
8071 	 * when a modeset is needed, to ensure it gets reprogrammed.
8072 	 */
8073 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8074 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8075 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8076 		if (ret)
8077 			goto fail;
8078 	}
8079 
8080 	/* Update Freesync settings. */
8081 	get_freesync_config_for_crtc(dm_new_crtc_state,
8082 				     dm_new_conn_state);
8083 
8084 	return ret;
8085 
8086 fail:
8087 	if (new_stream)
8088 		dc_stream_release(new_stream);
8089 	return ret;
8090 }
8091 
8092 static bool should_reset_plane(struct drm_atomic_state *state,
8093 			       struct drm_plane *plane,
8094 			       struct drm_plane_state *old_plane_state,
8095 			       struct drm_plane_state *new_plane_state)
8096 {
8097 	struct drm_plane *other;
8098 	struct drm_plane_state *old_other_state, *new_other_state;
8099 	struct drm_crtc_state *new_crtc_state;
8100 	int i;
8101 
8102 	/*
8103 	 * TODO: Remove this hack once the checks below are sufficient
8104 	 * enough to determine when we need to reset all the planes on
8105 	 * the stream.
8106 	 */
8107 	if (state->allow_modeset)
8108 		return true;
8109 
8110 	/* Exit early if we know that we're adding or removing the plane. */
8111 	if (old_plane_state->crtc != new_plane_state->crtc)
8112 		return true;
8113 
8114 	/* old crtc == new_crtc == NULL, plane not in context. */
8115 	if (!new_plane_state->crtc)
8116 		return false;
8117 
8118 	new_crtc_state =
8119 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8120 
8121 	if (!new_crtc_state)
8122 		return true;
8123 
8124 	/* CRTC Degamma changes currently require us to recreate planes. */
8125 	if (new_crtc_state->color_mgmt_changed)
8126 		return true;
8127 
8128 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8129 		return true;
8130 
8131 	/*
8132 	 * If there are any new primary or overlay planes being added or
8133 	 * removed then the z-order can potentially change. To ensure
8134 	 * correct z-order and pipe acquisition the current DC architecture
8135 	 * requires us to remove and recreate all existing planes.
8136 	 *
8137 	 * TODO: Come up with a more elegant solution for this.
8138 	 */
8139 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8140 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8141 			continue;
8142 
8143 		if (old_other_state->crtc != new_plane_state->crtc &&
8144 		    new_other_state->crtc != new_plane_state->crtc)
8145 			continue;
8146 
8147 		if (old_other_state->crtc != new_other_state->crtc)
8148 			return true;
8149 
8150 		/* TODO: Remove this once we can handle fast format changes. */
8151 		if (old_other_state->fb && new_other_state->fb &&
8152 		    old_other_state->fb->format != new_other_state->fb->format)
8153 			return true;
8154 	}
8155 
8156 	return false;
8157 }
8158 
8159 static int dm_update_plane_state(struct dc *dc,
8160 				 struct drm_atomic_state *state,
8161 				 struct drm_plane *plane,
8162 				 struct drm_plane_state *old_plane_state,
8163 				 struct drm_plane_state *new_plane_state,
8164 				 bool enable,
8165 				 bool *lock_and_validation_needed)
8166 {
8167 
8168 	struct dm_atomic_state *dm_state = NULL;
8169 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8170 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8171 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8172 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8173 	struct amdgpu_crtc *new_acrtc;
8174 	bool needs_reset;
8175 	int ret = 0;
8176 
8177 
8178 	new_plane_crtc = new_plane_state->crtc;
8179 	old_plane_crtc = old_plane_state->crtc;
8180 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8181 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8182 
8183 	/*TODO Implement better atomic check for cursor plane */
8184 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8185 		if (!enable || !new_plane_crtc ||
8186 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8187 			return 0;
8188 
8189 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8190 
8191 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8192 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8193 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8194 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8195 			return -EINVAL;
8196 		}
8197 
8198 		return 0;
8199 	}
8200 
8201 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8202 					 new_plane_state);
8203 
8204 	/* Remove any changed/removed planes */
8205 	if (!enable) {
8206 		if (!needs_reset)
8207 			return 0;
8208 
8209 		if (!old_plane_crtc)
8210 			return 0;
8211 
8212 		old_crtc_state = drm_atomic_get_old_crtc_state(
8213 				state, old_plane_crtc);
8214 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8215 
8216 		if (!dm_old_crtc_state->stream)
8217 			return 0;
8218 
8219 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8220 				plane->base.id, old_plane_crtc->base.id);
8221 
8222 		ret = dm_atomic_get_state(state, &dm_state);
8223 		if (ret)
8224 			return ret;
8225 
8226 		if (!dc_remove_plane_from_context(
8227 				dc,
8228 				dm_old_crtc_state->stream,
8229 				dm_old_plane_state->dc_state,
8230 				dm_state->context)) {
8231 
8232 			ret = EINVAL;
8233 			return ret;
8234 		}
8235 
8236 
8237 		dc_plane_state_release(dm_old_plane_state->dc_state);
8238 		dm_new_plane_state->dc_state = NULL;
8239 
8240 		*lock_and_validation_needed = true;
8241 
8242 	} else { /* Add new planes */
8243 		struct dc_plane_state *dc_new_plane_state;
8244 
8245 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8246 			return 0;
8247 
8248 		if (!new_plane_crtc)
8249 			return 0;
8250 
8251 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8252 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8253 
8254 		if (!dm_new_crtc_state->stream)
8255 			return 0;
8256 
8257 		if (!needs_reset)
8258 			return 0;
8259 
8260 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8261 		if (ret)
8262 			return ret;
8263 
8264 		WARN_ON(dm_new_plane_state->dc_state);
8265 
8266 		dc_new_plane_state = dc_create_plane_state(dc);
8267 		if (!dc_new_plane_state)
8268 			return -ENOMEM;
8269 
8270 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8271 				plane->base.id, new_plane_crtc->base.id);
8272 
8273 		ret = fill_dc_plane_attributes(
8274 			new_plane_crtc->dev->dev_private,
8275 			dc_new_plane_state,
8276 			new_plane_state,
8277 			new_crtc_state);
8278 		if (ret) {
8279 			dc_plane_state_release(dc_new_plane_state);
8280 			return ret;
8281 		}
8282 
8283 		ret = dm_atomic_get_state(state, &dm_state);
8284 		if (ret) {
8285 			dc_plane_state_release(dc_new_plane_state);
8286 			return ret;
8287 		}
8288 
8289 		/*
8290 		 * Any atomic check errors that occur after this will
8291 		 * not need a release. The plane state will be attached
8292 		 * to the stream, and therefore part of the atomic
8293 		 * state. It'll be released when the atomic state is
8294 		 * cleaned.
8295 		 */
8296 		if (!dc_add_plane_to_context(
8297 				dc,
8298 				dm_new_crtc_state->stream,
8299 				dc_new_plane_state,
8300 				dm_state->context)) {
8301 
8302 			dc_plane_state_release(dc_new_plane_state);
8303 			return -EINVAL;
8304 		}
8305 
8306 		dm_new_plane_state->dc_state = dc_new_plane_state;
8307 
8308 		/* Tell DC to do a full surface update every time there
8309 		 * is a plane change. Inefficient, but works for now.
8310 		 */
8311 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8312 
8313 		*lock_and_validation_needed = true;
8314 	}
8315 
8316 
8317 	return ret;
8318 }
8319 
8320 static int
8321 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8322 				    struct drm_atomic_state *state,
8323 				    enum surface_update_type *out_type)
8324 {
8325 	struct dc *dc = dm->dc;
8326 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8327 	int i, j, num_plane, ret = 0;
8328 	struct drm_plane_state *old_plane_state, *new_plane_state;
8329 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8330 	struct drm_crtc *new_plane_crtc;
8331 	struct drm_plane *plane;
8332 
8333 	struct drm_crtc *crtc;
8334 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8335 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8336 	struct dc_stream_status *status = NULL;
8337 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8338 	struct surface_info_bundle {
8339 		struct dc_surface_update surface_updates[MAX_SURFACES];
8340 		struct dc_plane_info plane_infos[MAX_SURFACES];
8341 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8342 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8343 		struct dc_stream_update stream_update;
8344 	} *bundle;
8345 
8346 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8347 
8348 	if (!bundle) {
8349 		DRM_ERROR("Failed to allocate update bundle\n");
8350 		/* Set type to FULL to avoid crashing in DC*/
8351 		update_type = UPDATE_TYPE_FULL;
8352 		goto cleanup;
8353 	}
8354 
8355 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8356 
8357 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8358 
8359 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8360 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8361 		num_plane = 0;
8362 
8363 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8364 			update_type = UPDATE_TYPE_FULL;
8365 			goto cleanup;
8366 		}
8367 
8368 		if (!new_dm_crtc_state->stream)
8369 			continue;
8370 
8371 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8372 			const struct amdgpu_framebuffer *amdgpu_fb =
8373 				to_amdgpu_framebuffer(new_plane_state->fb);
8374 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8375 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8376 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8377 			uint64_t tiling_flags;
8378 			bool tmz_surface = false;
8379 
8380 			new_plane_crtc = new_plane_state->crtc;
8381 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8382 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8383 
8384 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8385 				continue;
8386 
8387 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8388 				update_type = UPDATE_TYPE_FULL;
8389 				goto cleanup;
8390 			}
8391 
8392 			if (crtc != new_plane_crtc)
8393 				continue;
8394 
8395 			bundle->surface_updates[num_plane].surface =
8396 					new_dm_plane_state->dc_state;
8397 
8398 			if (new_crtc_state->mode_changed) {
8399 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8400 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8401 			}
8402 
8403 			if (new_crtc_state->color_mgmt_changed) {
8404 				bundle->surface_updates[num_plane].gamma =
8405 						new_dm_plane_state->dc_state->gamma_correction;
8406 				bundle->surface_updates[num_plane].in_transfer_func =
8407 						new_dm_plane_state->dc_state->in_transfer_func;
8408 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8409 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8410 				bundle->stream_update.gamut_remap =
8411 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8412 				bundle->stream_update.output_csc_transform =
8413 						&new_dm_crtc_state->stream->csc_color_matrix;
8414 				bundle->stream_update.out_transfer_func =
8415 						new_dm_crtc_state->stream->out_transfer_func;
8416 			}
8417 
8418 			ret = fill_dc_scaling_info(new_plane_state,
8419 						   scaling_info);
8420 			if (ret)
8421 				goto cleanup;
8422 
8423 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8424 
8425 			if (amdgpu_fb) {
8426 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8427 				if (ret)
8428 					goto cleanup;
8429 
8430 				ret = fill_dc_plane_info_and_addr(
8431 					dm->adev, new_plane_state, tiling_flags,
8432 					plane_info,
8433 					&flip_addr->address, tmz_surface,
8434 					false);
8435 				if (ret)
8436 					goto cleanup;
8437 
8438 				bundle->surface_updates[num_plane].plane_info = plane_info;
8439 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8440 			}
8441 
8442 			num_plane++;
8443 		}
8444 
8445 		if (num_plane == 0)
8446 			continue;
8447 
8448 		ret = dm_atomic_get_state(state, &dm_state);
8449 		if (ret)
8450 			goto cleanup;
8451 
8452 		old_dm_state = dm_atomic_get_old_state(state);
8453 		if (!old_dm_state) {
8454 			ret = -EINVAL;
8455 			goto cleanup;
8456 		}
8457 
8458 		status = dc_stream_get_status_from_state(old_dm_state->context,
8459 							 new_dm_crtc_state->stream);
8460 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8461 		/*
8462 		 * TODO: DC modifies the surface during this call so we need
8463 		 * to lock here - find a way to do this without locking.
8464 		 */
8465 		mutex_lock(&dm->dc_lock);
8466 		update_type = dc_check_update_surfaces_for_stream(
8467 				dc,	bundle->surface_updates, num_plane,
8468 				&bundle->stream_update, status);
8469 		mutex_unlock(&dm->dc_lock);
8470 
8471 		if (update_type > UPDATE_TYPE_MED) {
8472 			update_type = UPDATE_TYPE_FULL;
8473 			goto cleanup;
8474 		}
8475 	}
8476 
8477 cleanup:
8478 	kfree(bundle);
8479 
8480 	*out_type = update_type;
8481 	return ret;
8482 }
8483 #if defined(CONFIG_DRM_AMD_DC_DCN)
8484 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8485 {
8486 	struct drm_connector *connector;
8487 	struct drm_connector_state *conn_state;
8488 	struct amdgpu_dm_connector *aconnector = NULL;
8489 	int i;
8490 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8491 		if (conn_state->crtc != crtc)
8492 			continue;
8493 
8494 		aconnector = to_amdgpu_dm_connector(connector);
8495 		if (!aconnector->port || !aconnector->mst_port)
8496 			aconnector = NULL;
8497 		else
8498 			break;
8499 	}
8500 
8501 	if (!aconnector)
8502 		return 0;
8503 
8504 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8505 }
8506 #endif
8507 
8508 /**
8509  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8510  * @dev: The DRM device
8511  * @state: The atomic state to commit
8512  *
8513  * Validate that the given atomic state is programmable by DC into hardware.
8514  * This involves constructing a &struct dc_state reflecting the new hardware
8515  * state we wish to commit, then querying DC to see if it is programmable. It's
8516  * important not to modify the existing DC state. Otherwise, atomic_check
8517  * may unexpectedly commit hardware changes.
8518  *
8519  * When validating the DC state, it's important that the right locks are
8520  * acquired. For full updates case which removes/adds/updates streams on one
8521  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8522  * that any such full update commit will wait for completion of any outstanding
8523  * flip using DRMs synchronization events. See
8524  * dm_determine_update_type_for_commit()
8525  *
8526  * Note that DM adds the affected connectors for all CRTCs in state, when that
8527  * might not seem necessary. This is because DC stream creation requires the
8528  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8529  * be possible but non-trivial - a possible TODO item.
8530  *
8531  * Return: -Error code if validation failed.
8532  */
8533 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8534 				  struct drm_atomic_state *state)
8535 {
8536 	struct amdgpu_device *adev = dev->dev_private;
8537 	struct dm_atomic_state *dm_state = NULL;
8538 	struct dc *dc = adev->dm.dc;
8539 	struct drm_connector *connector;
8540 	struct drm_connector_state *old_con_state, *new_con_state;
8541 	struct drm_crtc *crtc;
8542 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8543 	struct drm_plane *plane;
8544 	struct drm_plane_state *old_plane_state, *new_plane_state;
8545 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8546 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8547 	enum dc_status status;
8548 	int ret, i;
8549 
8550 	/*
8551 	 * This bool will be set for true for any modeset/reset
8552 	 * or plane update which implies non fast surface update.
8553 	 */
8554 	bool lock_and_validation_needed = false;
8555 
8556 	ret = drm_atomic_helper_check_modeset(dev, state);
8557 	if (ret)
8558 		goto fail;
8559 
8560 	/* Check connector changes */
8561 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8562 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8563 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8564 
8565 		/* Skip connectors that are disabled or part of modeset already. */
8566 		if (!old_con_state->crtc && !new_con_state->crtc)
8567 			continue;
8568 
8569 		if (!new_con_state->crtc)
8570 			continue;
8571 
8572 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8573 		if (IS_ERR(new_crtc_state)) {
8574 			ret = PTR_ERR(new_crtc_state);
8575 			goto fail;
8576 		}
8577 
8578 		if (dm_old_con_state->abm_level !=
8579 		    dm_new_con_state->abm_level)
8580 			new_crtc_state->connectors_changed = true;
8581 	}
8582 
8583 #if defined(CONFIG_DRM_AMD_DC_DCN)
8584 	if (adev->asic_type >= CHIP_NAVI10) {
8585 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8586 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8587 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8588 				if (ret)
8589 					goto fail;
8590 			}
8591 		}
8592 	}
8593 #endif
8594 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8595 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8596 		    !new_crtc_state->color_mgmt_changed &&
8597 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8598 			continue;
8599 
8600 		if (!new_crtc_state->enable)
8601 			continue;
8602 
8603 		ret = drm_atomic_add_affected_connectors(state, crtc);
8604 		if (ret)
8605 			return ret;
8606 
8607 		ret = drm_atomic_add_affected_planes(state, crtc);
8608 		if (ret)
8609 			goto fail;
8610 	}
8611 
8612 	/*
8613 	 * Add all primary and overlay planes on the CRTC to the state
8614 	 * whenever a plane is enabled to maintain correct z-ordering
8615 	 * and to enable fast surface updates.
8616 	 */
8617 	drm_for_each_crtc(crtc, dev) {
8618 		bool modified = false;
8619 
8620 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8621 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8622 				continue;
8623 
8624 			if (new_plane_state->crtc == crtc ||
8625 			    old_plane_state->crtc == crtc) {
8626 				modified = true;
8627 				break;
8628 			}
8629 		}
8630 
8631 		if (!modified)
8632 			continue;
8633 
8634 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8635 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8636 				continue;
8637 
8638 			new_plane_state =
8639 				drm_atomic_get_plane_state(state, plane);
8640 
8641 			if (IS_ERR(new_plane_state)) {
8642 				ret = PTR_ERR(new_plane_state);
8643 				goto fail;
8644 			}
8645 		}
8646 	}
8647 
8648 	/* Remove exiting planes if they are modified */
8649 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8650 		ret = dm_update_plane_state(dc, state, plane,
8651 					    old_plane_state,
8652 					    new_plane_state,
8653 					    false,
8654 					    &lock_and_validation_needed);
8655 		if (ret)
8656 			goto fail;
8657 	}
8658 
8659 	/* Disable all crtcs which require disable */
8660 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8661 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8662 					   old_crtc_state,
8663 					   new_crtc_state,
8664 					   false,
8665 					   &lock_and_validation_needed);
8666 		if (ret)
8667 			goto fail;
8668 	}
8669 
8670 	/* Enable all crtcs which require enable */
8671 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8672 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8673 					   old_crtc_state,
8674 					   new_crtc_state,
8675 					   true,
8676 					   &lock_and_validation_needed);
8677 		if (ret)
8678 			goto fail;
8679 	}
8680 
8681 	/* Add new/modified planes */
8682 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8683 		ret = dm_update_plane_state(dc, state, plane,
8684 					    old_plane_state,
8685 					    new_plane_state,
8686 					    true,
8687 					    &lock_and_validation_needed);
8688 		if (ret)
8689 			goto fail;
8690 	}
8691 
8692 	/* Run this here since we want to validate the streams we created */
8693 	ret = drm_atomic_helper_check_planes(dev, state);
8694 	if (ret)
8695 		goto fail;
8696 
8697 	if (state->legacy_cursor_update) {
8698 		/*
8699 		 * This is a fast cursor update coming from the plane update
8700 		 * helper, check if it can be done asynchronously for better
8701 		 * performance.
8702 		 */
8703 		state->async_update =
8704 			!drm_atomic_helper_async_check(dev, state);
8705 
8706 		/*
8707 		 * Skip the remaining global validation if this is an async
8708 		 * update. Cursor updates can be done without affecting
8709 		 * state or bandwidth calcs and this avoids the performance
8710 		 * penalty of locking the private state object and
8711 		 * allocating a new dc_state.
8712 		 */
8713 		if (state->async_update)
8714 			return 0;
8715 	}
8716 
8717 	/* Check scaling and underscan changes*/
8718 	/* TODO Removed scaling changes validation due to inability to commit
8719 	 * new stream into context w\o causing full reset. Need to
8720 	 * decide how to handle.
8721 	 */
8722 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8723 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8724 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8725 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8726 
8727 		/* Skip any modesets/resets */
8728 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8729 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8730 			continue;
8731 
8732 		/* Skip any thing not scale or underscan changes */
8733 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8734 			continue;
8735 
8736 		overall_update_type = UPDATE_TYPE_FULL;
8737 		lock_and_validation_needed = true;
8738 	}
8739 
8740 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8741 	if (ret)
8742 		goto fail;
8743 
8744 	if (overall_update_type < update_type)
8745 		overall_update_type = update_type;
8746 
8747 	/*
8748 	 * lock_and_validation_needed was an old way to determine if we need to set
8749 	 * the global lock. Leaving it in to check if we broke any corner cases
8750 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8751 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8752 	 */
8753 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8754 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8755 
8756 	if (overall_update_type > UPDATE_TYPE_FAST) {
8757 		ret = dm_atomic_get_state(state, &dm_state);
8758 		if (ret)
8759 			goto fail;
8760 
8761 		ret = do_aquire_global_lock(dev, state);
8762 		if (ret)
8763 			goto fail;
8764 
8765 #if defined(CONFIG_DRM_AMD_DC_DCN)
8766 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8767 			goto fail;
8768 
8769 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8770 		if (ret)
8771 			goto fail;
8772 #endif
8773 
8774 		/*
8775 		 * Perform validation of MST topology in the state:
8776 		 * We need to perform MST atomic check before calling
8777 		 * dc_validate_global_state(), or there is a chance
8778 		 * to get stuck in an infinite loop and hang eventually.
8779 		 */
8780 		ret = drm_dp_mst_atomic_check(state);
8781 		if (ret)
8782 			goto fail;
8783 		status = dc_validate_global_state(dc, dm_state->context, false);
8784 		if (status != DC_OK) {
8785 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8786 				       dc_status_to_str(status), status);
8787 			ret = -EINVAL;
8788 			goto fail;
8789 		}
8790 	} else {
8791 		/*
8792 		 * The commit is a fast update. Fast updates shouldn't change
8793 		 * the DC context, affect global validation, and can have their
8794 		 * commit work done in parallel with other commits not touching
8795 		 * the same resource. If we have a new DC context as part of
8796 		 * the DM atomic state from validation we need to free it and
8797 		 * retain the existing one instead.
8798 		 *
8799 		 * Furthermore, since the DM atomic state only contains the DC
8800 		 * context and can safely be annulled, we can free the state
8801 		 * and clear the associated private object now to free
8802 		 * some memory and avoid a possible use-after-free later.
8803 		 */
8804 
8805 		for (i = 0; i < state->num_private_objs; i++) {
8806 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8807 
8808 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8809 				int j = state->num_private_objs-1;
8810 
8811 				dm_atomic_destroy_state(obj,
8812 						state->private_objs[i].state);
8813 
8814 				/* If i is not at the end of the array then the
8815 				 * last element needs to be moved to where i was
8816 				 * before the array can safely be truncated.
8817 				 */
8818 				if (i != j)
8819 					state->private_objs[i] =
8820 						state->private_objs[j];
8821 
8822 				state->private_objs[j].ptr = NULL;
8823 				state->private_objs[j].state = NULL;
8824 				state->private_objs[j].old_state = NULL;
8825 				state->private_objs[j].new_state = NULL;
8826 
8827 				state->num_private_objs = j;
8828 				break;
8829 			}
8830 		}
8831 	}
8832 
8833 	/* Store the overall update type for use later in atomic check. */
8834 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8835 		struct dm_crtc_state *dm_new_crtc_state =
8836 			to_dm_crtc_state(new_crtc_state);
8837 
8838 		dm_new_crtc_state->update_type = (int)overall_update_type;
8839 	}
8840 
8841 	/* Must be success */
8842 	WARN_ON(ret);
8843 	return ret;
8844 
8845 fail:
8846 	if (ret == -EDEADLK)
8847 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8848 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8849 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8850 	else
8851 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8852 
8853 	return ret;
8854 }
8855 
8856 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8857 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8858 {
8859 	uint8_t dpcd_data;
8860 	bool capable = false;
8861 
8862 	if (amdgpu_dm_connector->dc_link &&
8863 		dm_helpers_dp_read_dpcd(
8864 				NULL,
8865 				amdgpu_dm_connector->dc_link,
8866 				DP_DOWN_STREAM_PORT_COUNT,
8867 				&dpcd_data,
8868 				sizeof(dpcd_data))) {
8869 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8870 	}
8871 
8872 	return capable;
8873 }
8874 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8875 					struct edid *edid)
8876 {
8877 	int i;
8878 	bool edid_check_required;
8879 	struct detailed_timing *timing;
8880 	struct detailed_non_pixel *data;
8881 	struct detailed_data_monitor_range *range;
8882 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8883 			to_amdgpu_dm_connector(connector);
8884 	struct dm_connector_state *dm_con_state = NULL;
8885 
8886 	struct drm_device *dev = connector->dev;
8887 	struct amdgpu_device *adev = dev->dev_private;
8888 	bool freesync_capable = false;
8889 
8890 	if (!connector->state) {
8891 		DRM_ERROR("%s - Connector has no state", __func__);
8892 		goto update;
8893 	}
8894 
8895 	if (!edid) {
8896 		dm_con_state = to_dm_connector_state(connector->state);
8897 
8898 		amdgpu_dm_connector->min_vfreq = 0;
8899 		amdgpu_dm_connector->max_vfreq = 0;
8900 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8901 
8902 		goto update;
8903 	}
8904 
8905 	dm_con_state = to_dm_connector_state(connector->state);
8906 
8907 	edid_check_required = false;
8908 	if (!amdgpu_dm_connector->dc_sink) {
8909 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8910 		goto update;
8911 	}
8912 	if (!adev->dm.freesync_module)
8913 		goto update;
8914 	/*
8915 	 * if edid non zero restrict freesync only for dp and edp
8916 	 */
8917 	if (edid) {
8918 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8919 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8920 			edid_check_required = is_dp_capable_without_timing_msa(
8921 						adev->dm.dc,
8922 						amdgpu_dm_connector);
8923 		}
8924 	}
8925 	if (edid_check_required == true && (edid->version > 1 ||
8926 	   (edid->version == 1 && edid->revision > 1))) {
8927 		for (i = 0; i < 4; i++) {
8928 
8929 			timing	= &edid->detailed_timings[i];
8930 			data	= &timing->data.other_data;
8931 			range	= &data->data.range;
8932 			/*
8933 			 * Check if monitor has continuous frequency mode
8934 			 */
8935 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8936 				continue;
8937 			/*
8938 			 * Check for flag range limits only. If flag == 1 then
8939 			 * no additional timing information provided.
8940 			 * Default GTF, GTF Secondary curve and CVT are not
8941 			 * supported
8942 			 */
8943 			if (range->flags != 1)
8944 				continue;
8945 
8946 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8947 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8948 			amdgpu_dm_connector->pixel_clock_mhz =
8949 				range->pixel_clock_mhz * 10;
8950 			break;
8951 		}
8952 
8953 		if (amdgpu_dm_connector->max_vfreq -
8954 		    amdgpu_dm_connector->min_vfreq > 10) {
8955 
8956 			freesync_capable = true;
8957 		}
8958 	}
8959 
8960 update:
8961 	if (dm_con_state)
8962 		dm_con_state->freesync_capable = freesync_capable;
8963 
8964 	if (connector->vrr_capable_property)
8965 		drm_connector_set_vrr_capable_property(connector,
8966 						       freesync_capable);
8967 }
8968 
8969 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8970 {
8971 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8972 
8973 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8974 		return;
8975 	if (link->type == dc_connection_none)
8976 		return;
8977 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8978 					dpcd_data, sizeof(dpcd_data))) {
8979 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8980 
8981 		if (dpcd_data[0] == 0) {
8982 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8983 			link->psr_settings.psr_feature_enabled = false;
8984 		} else {
8985 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8986 			link->psr_settings.psr_feature_enabled = true;
8987 		}
8988 
8989 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8990 	}
8991 }
8992 
8993 /*
8994  * amdgpu_dm_link_setup_psr() - configure psr link
8995  * @stream: stream state
8996  *
8997  * Return: true if success
8998  */
8999 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9000 {
9001 	struct dc_link *link = NULL;
9002 	struct psr_config psr_config = {0};
9003 	struct psr_context psr_context = {0};
9004 	bool ret = false;
9005 
9006 	if (stream == NULL)
9007 		return false;
9008 
9009 	link = stream->link;
9010 
9011 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9012 
9013 	if (psr_config.psr_version > 0) {
9014 		psr_config.psr_exit_link_training_required = 0x1;
9015 		psr_config.psr_frame_capture_indication_req = 0;
9016 		psr_config.psr_rfb_setup_time = 0x37;
9017 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9018 		psr_config.allow_smu_optimizations = 0x0;
9019 
9020 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9021 
9022 	}
9023 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9024 
9025 	return ret;
9026 }
9027 
9028 /*
9029  * amdgpu_dm_psr_enable() - enable psr f/w
9030  * @stream: stream state
9031  *
9032  * Return: true if success
9033  */
9034 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9035 {
9036 	struct dc_link *link = stream->link;
9037 	unsigned int vsync_rate_hz = 0;
9038 	struct dc_static_screen_params params = {0};
9039 	/* Calculate number of static frames before generating interrupt to
9040 	 * enter PSR.
9041 	 */
9042 	// Init fail safe of 2 frames static
9043 	unsigned int num_frames_static = 2;
9044 
9045 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9046 
9047 	vsync_rate_hz = div64_u64(div64_u64((
9048 			stream->timing.pix_clk_100hz * 100),
9049 			stream->timing.v_total),
9050 			stream->timing.h_total);
9051 
9052 	/* Round up
9053 	 * Calculate number of frames such that at least 30 ms of time has
9054 	 * passed.
9055 	 */
9056 	if (vsync_rate_hz != 0) {
9057 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9058 		num_frames_static = (30000 / frame_time_microsec) + 1;
9059 	}
9060 
9061 	params.triggers.cursor_update = true;
9062 	params.triggers.overlay_update = true;
9063 	params.triggers.surface_update = true;
9064 	params.num_frames = num_frames_static;
9065 
9066 	dc_stream_set_static_screen_params(link->ctx->dc,
9067 					   &stream, 1,
9068 					   &params);
9069 
9070 	return dc_link_set_psr_allow_active(link, true, false);
9071 }
9072 
9073 /*
9074  * amdgpu_dm_psr_disable() - disable psr f/w
9075  * @stream:  stream state
9076  *
9077  * Return: true if success
9078  */
9079 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9080 {
9081 
9082 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9083 
9084 	return dc_link_set_psr_allow_active(stream->link, false, true);
9085 }
9086